Commit | Line | Data |
---|---|---|
caab277b | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
4f04d8f0 CM |
2 | /* |
3 | * Copyright (C) 2012 ARM Ltd. | |
4f04d8f0 CM |
4 | */ |
5 | #ifndef __ASM_PGTABLE_H | |
6 | #define __ASM_PGTABLE_H | |
7 | ||
2f4b829c | 8 | #include <asm/bug.h> |
4f04d8f0 CM |
9 | #include <asm/proc-fns.h> |
10 | ||
11 | #include <asm/memory.h> | |
12 | #include <asm/pgtable-hwdef.h> | |
3eca86e7 | 13 | #include <asm/pgtable-prot.h> |
3403e56b | 14 | #include <asm/tlbflush.h> |
4f04d8f0 CM |
15 | |
16 | /* | |
3e1907d5 | 17 | * VMALLOC range. |
08375198 | 18 | * |
f9040773 | 19 | * VMALLOC_START: beginning of the kernel vmalloc space |
3e1907d5 AB |
20 | * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space |
21 | * and fixed mappings | |
4f04d8f0 | 22 | */ |
f9040773 | 23 | #define VMALLOC_START (MODULES_END) |
14c127c9 | 24 | #define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K) |
4f04d8f0 | 25 | |
d016bf7e | 26 | #define FIRST_USER_ADDRESS 0UL |
4f04d8f0 CM |
27 | |
28 | #ifndef __ASSEMBLY__ | |
2f4b829c | 29 | |
3bbf7157 | 30 | #include <asm/cmpxchg.h> |
961faac1 | 31 | #include <asm/fixmap.h> |
2f4b829c | 32 | #include <linux/mmdebug.h> |
86c9e812 WD |
33 | #include <linux/mm_types.h> |
34 | #include <linux/sched.h> | |
2f4b829c | 35 | |
c8b6d2cc SC |
36 | extern struct page *vmemmap; |
37 | ||
4f04d8f0 CM |
38 | extern void __pte_error(const char *file, int line, unsigned long val); |
39 | extern void __pmd_error(const char *file, int line, unsigned long val); | |
c79b954b | 40 | extern void __pud_error(const char *file, int line, unsigned long val); |
4f04d8f0 CM |
41 | extern void __pgd_error(const char *file, int line, unsigned long val); |
42 | ||
4f04d8f0 CM |
43 | /* |
44 | * ZERO_PAGE is a global shared page that is always zero: used | |
45 | * for zero-mapped memory areas etc.. | |
46 | */ | |
5227cfa7 | 47 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; |
2077be67 | 48 | #define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page)) |
4f04d8f0 | 49 | |
7078db46 CM |
50 | #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) |
51 | ||
75387b92 KM |
52 | /* |
53 | * Macros to convert between a physical address and its placement in a | |
54 | * page table entry, taking care of 52-bit addresses. | |
55 | */ | |
56 | #ifdef CONFIG_ARM64_PA_BITS_52 | |
57 | #define __pte_to_phys(pte) \ | |
58 | ((pte_val(pte) & PTE_ADDR_LOW) | ((pte_val(pte) & PTE_ADDR_HIGH) << 36)) | |
59 | #define __phys_to_pte_val(phys) (((phys) | ((phys) >> 36)) & PTE_ADDR_MASK) | |
60 | #else | |
61 | #define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK) | |
62 | #define __phys_to_pte_val(phys) (phys) | |
63 | #endif | |
4f04d8f0 | 64 | |
75387b92 KM |
65 | #define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT) |
66 | #define pfn_pte(pfn,prot) \ | |
67 | __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) | |
4f04d8f0 CM |
68 | |
69 | #define pte_none(pte) (!pte_val(pte)) | |
70 | #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) | |
71 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) | |
7078db46 | 72 | |
4f04d8f0 CM |
73 | /* |
74 | * The following only work if pte_present(). Undefined behaviour otherwise. | |
75 | */ | |
84fe6826 | 76 | #define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))) |
84fe6826 SC |
77 | #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) |
78 | #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) | |
79 | #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) | |
ec663d96 | 80 | #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) |
93ef666a | 81 | #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) |
73b20c84 | 82 | #define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP)) |
4f04d8f0 | 83 | |
d27cfa1f AB |
84 | #define pte_cont_addr_end(addr, end) \ |
85 | ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \ | |
86 | (__boundary - 1 < (end) - 1) ? __boundary : (end); \ | |
87 | }) | |
88 | ||
89 | #define pmd_cont_addr_end(addr, end) \ | |
90 | ({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \ | |
91 | (__boundary - 1 < (end) - 1) ? __boundary : (end); \ | |
92 | }) | |
93 | ||
b847415c | 94 | #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) |
2f4b829c CM |
95 | #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) |
96 | #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) | |
97 | ||
766ffb69 | 98 | #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) |
ec663d96 CM |
99 | /* |
100 | * Execute-only user mappings do not have the PTE_USER bit set. All valid | |
101 | * kernel mappings have the PTE_UXN bit set. | |
102 | */ | |
103 | #define pte_valid_not_user(pte) \ | |
104 | ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) | |
76c714be WD |
105 | #define pte_valid_young(pte) \ |
106 | ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) | |
6218f96c CM |
107 | #define pte_valid_user(pte) \ |
108 | ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) | |
76c714be WD |
109 | |
110 | /* | |
111 | * Could the pte be present in the TLB? We must check mm_tlb_flush_pending | |
112 | * so that we don't erroneously return false for pages that have been | |
113 | * remapped as PROT_NONE but are yet to be flushed from the TLB. | |
114 | */ | |
115 | #define pte_accessible(mm, pte) \ | |
116 | (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte)) | |
4f04d8f0 | 117 | |
6218f96c CM |
118 | /* |
119 | * p??_access_permitted() is true for valid user mappings (subject to the | |
120 | * write permission check) other than user execute-only which do not have the | |
121 | * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set. | |
122 | */ | |
123 | #define pte_access_permitted(pte, write) \ | |
124 | (pte_valid_user(pte) && (!(write) || pte_write(pte))) | |
125 | #define pmd_access_permitted(pmd, write) \ | |
126 | (pte_access_permitted(pmd_pte(pmd), (write))) | |
127 | #define pud_access_permitted(pud, write) \ | |
128 | (pte_access_permitted(pud_pte(pud), (write))) | |
129 | ||
b6d4f280 | 130 | static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) |
44b6dfc5 | 131 | { |
b6d4f280 | 132 | pte_val(pte) &= ~pgprot_val(prot); |
44b6dfc5 SC |
133 | return pte; |
134 | } | |
135 | ||
b6d4f280 | 136 | static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) |
44b6dfc5 | 137 | { |
b6d4f280 | 138 | pte_val(pte) |= pgprot_val(prot); |
44b6dfc5 SC |
139 | return pte; |
140 | } | |
141 | ||
b6d4f280 LA |
142 | static inline pte_t pte_wrprotect(pte_t pte) |
143 | { | |
73e86cb0 CM |
144 | pte = clear_pte_bit(pte, __pgprot(PTE_WRITE)); |
145 | pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); | |
146 | return pte; | |
b6d4f280 LA |
147 | } |
148 | ||
149 | static inline pte_t pte_mkwrite(pte_t pte) | |
150 | { | |
73e86cb0 CM |
151 | pte = set_pte_bit(pte, __pgprot(PTE_WRITE)); |
152 | pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); | |
153 | return pte; | |
b6d4f280 LA |
154 | } |
155 | ||
44b6dfc5 SC |
156 | static inline pte_t pte_mkclean(pte_t pte) |
157 | { | |
8781bcbc SC |
158 | pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY)); |
159 | pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); | |
160 | ||
161 | return pte; | |
44b6dfc5 SC |
162 | } |
163 | ||
164 | static inline pte_t pte_mkdirty(pte_t pte) | |
165 | { | |
8781bcbc SC |
166 | pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); |
167 | ||
168 | if (pte_write(pte)) | |
169 | pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); | |
170 | ||
171 | return pte; | |
44b6dfc5 SC |
172 | } |
173 | ||
174 | static inline pte_t pte_mkold(pte_t pte) | |
175 | { | |
b6d4f280 | 176 | return clear_pte_bit(pte, __pgprot(PTE_AF)); |
44b6dfc5 SC |
177 | } |
178 | ||
179 | static inline pte_t pte_mkyoung(pte_t pte) | |
180 | { | |
b6d4f280 | 181 | return set_pte_bit(pte, __pgprot(PTE_AF)); |
44b6dfc5 SC |
182 | } |
183 | ||
184 | static inline pte_t pte_mkspecial(pte_t pte) | |
185 | { | |
b6d4f280 | 186 | return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); |
44b6dfc5 | 187 | } |
4f04d8f0 | 188 | |
93ef666a JL |
189 | static inline pte_t pte_mkcont(pte_t pte) |
190 | { | |
66b3923a DW |
191 | pte = set_pte_bit(pte, __pgprot(PTE_CONT)); |
192 | return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE)); | |
93ef666a JL |
193 | } |
194 | ||
195 | static inline pte_t pte_mknoncont(pte_t pte) | |
196 | { | |
197 | return clear_pte_bit(pte, __pgprot(PTE_CONT)); | |
198 | } | |
199 | ||
5ebe3a44 JM |
200 | static inline pte_t pte_mkpresent(pte_t pte) |
201 | { | |
202 | return set_pte_bit(pte, __pgprot(PTE_VALID)); | |
203 | } | |
204 | ||
66b3923a DW |
205 | static inline pmd_t pmd_mkcont(pmd_t pmd) |
206 | { | |
207 | return __pmd(pmd_val(pmd) | PMD_SECT_CONT); | |
208 | } | |
209 | ||
73b20c84 RM |
210 | static inline pte_t pte_mkdevmap(pte_t pte) |
211 | { | |
30e23538 | 212 | return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL)); |
73b20c84 RM |
213 | } |
214 | ||
4f04d8f0 CM |
215 | static inline void set_pte(pte_t *ptep, pte_t pte) |
216 | { | |
20a004e7 | 217 | WRITE_ONCE(*ptep, pte); |
7f0b1bf0 CM |
218 | |
219 | /* | |
220 | * Only if the new pte is valid and kernel, otherwise TLB maintenance | |
221 | * or update_mmu_cache() have the necessary barriers. | |
222 | */ | |
d0b7a302 | 223 | if (pte_valid_not_user(pte)) { |
7f0b1bf0 | 224 | dsb(ishst); |
d0b7a302 WD |
225 | isb(); |
226 | } | |
4f04d8f0 CM |
227 | } |
228 | ||
907e21c1 | 229 | extern void __sync_icache_dcache(pte_t pteval); |
4f04d8f0 | 230 | |
2f4b829c CM |
231 | /* |
232 | * PTE bits configuration in the presence of hardware Dirty Bit Management | |
233 | * (PTE_WRITE == PTE_DBM): | |
234 | * | |
235 | * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw) | |
236 | * 0 0 | 1 0 0 | |
237 | * 0 1 | 1 1 0 | |
238 | * 1 0 | 1 0 1 | |
239 | * 1 1 | 0 1 x | |
240 | * | |
241 | * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via | |
242 | * the page fault mechanism. Checking the dirty status of a pte becomes: | |
243 | * | |
b847415c | 244 | * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) |
2f4b829c | 245 | */ |
9b604722 MR |
246 | |
247 | static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep, | |
248 | pte_t pte) | |
4f04d8f0 | 249 | { |
20a004e7 WD |
250 | pte_t old_pte; |
251 | ||
9b604722 MR |
252 | if (!IS_ENABLED(CONFIG_DEBUG_VM)) |
253 | return; | |
254 | ||
255 | old_pte = READ_ONCE(*ptep); | |
256 | ||
257 | if (!pte_valid(old_pte) || !pte_valid(pte)) | |
258 | return; | |
259 | if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1) | |
260 | return; | |
02522463 | 261 | |
2f4b829c | 262 | /* |
9b604722 MR |
263 | * Check for potential race with hardware updates of the pte |
264 | * (ptep_set_access_flags safely changes valid ptes without going | |
265 | * through an invalid entry). | |
2f4b829c | 266 | */ |
9b604722 MR |
267 | VM_WARN_ONCE(!pte_young(pte), |
268 | "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", | |
269 | __func__, pte_val(old_pte), pte_val(pte)); | |
270 | VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte), | |
271 | "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", | |
272 | __func__, pte_val(old_pte), pte_val(pte)); | |
273 | } | |
274 | ||
275 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |
276 | pte_t *ptep, pte_t pte) | |
277 | { | |
278 | if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) | |
279 | __sync_icache_dcache(pte); | |
280 | ||
281 | __check_racy_pte_update(mm, ptep, pte); | |
2f4b829c | 282 | |
4f04d8f0 CM |
283 | set_pte(ptep, pte); |
284 | } | |
285 | ||
747a70e6 SC |
286 | #define __HAVE_ARCH_PTE_SAME |
287 | static inline int pte_same(pte_t pte_a, pte_t pte_b) | |
288 | { | |
289 | pteval_t lhs, rhs; | |
290 | ||
291 | lhs = pte_val(pte_a); | |
292 | rhs = pte_val(pte_b); | |
293 | ||
294 | if (pte_present(pte_a)) | |
295 | lhs &= ~PTE_RDONLY; | |
296 | ||
297 | if (pte_present(pte_b)) | |
298 | rhs &= ~PTE_RDONLY; | |
299 | ||
300 | return (lhs == rhs); | |
301 | } | |
302 | ||
4f04d8f0 CM |
303 | /* |
304 | * Huge pte definitions. | |
305 | */ | |
084bd298 SC |
306 | #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) |
307 | ||
308 | /* | |
309 | * Hugetlb definitions. | |
310 | */ | |
66b3923a | 311 | #define HUGE_MAX_HSTATE 4 |
084bd298 SC |
312 | #define HPAGE_SHIFT PMD_SHIFT |
313 | #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) | |
314 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | |
315 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | |
4f04d8f0 | 316 | |
75387b92 KM |
317 | static inline pte_t pgd_pte(pgd_t pgd) |
318 | { | |
319 | return __pte(pgd_val(pgd)); | |
320 | } | |
321 | ||
29e56940 SC |
322 | static inline pte_t pud_pte(pud_t pud) |
323 | { | |
324 | return __pte(pud_val(pud)); | |
325 | } | |
326 | ||
eb3f0624 PA |
327 | static inline pud_t pte_pud(pte_t pte) |
328 | { | |
329 | return __pud(pte_val(pte)); | |
330 | } | |
331 | ||
29e56940 SC |
332 | static inline pmd_t pud_pmd(pud_t pud) |
333 | { | |
334 | return __pmd(pud_val(pud)); | |
335 | } | |
336 | ||
9c7e535f SC |
337 | static inline pte_t pmd_pte(pmd_t pmd) |
338 | { | |
339 | return __pte(pmd_val(pmd)); | |
340 | } | |
af074848 | 341 | |
9c7e535f SC |
342 | static inline pmd_t pte_pmd(pte_t pte) |
343 | { | |
344 | return __pmd(pte_val(pte)); | |
345 | } | |
af074848 | 346 | |
f7f0097a | 347 | static inline pgprot_t mk_pud_sect_prot(pgprot_t prot) |
8ce837ce | 348 | { |
f7f0097a AK |
349 | return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT); |
350 | } | |
351 | ||
352 | static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot) | |
8ce837ce | 353 | { |
f7f0097a | 354 | return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT); |
8ce837ce AB |
355 | } |
356 | ||
56166230 GK |
357 | #ifdef CONFIG_NUMA_BALANCING |
358 | /* | |
359 | * See the comment in include/asm-generic/pgtable.h | |
360 | */ | |
361 | static inline int pte_protnone(pte_t pte) | |
362 | { | |
363 | return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE; | |
364 | } | |
365 | ||
366 | static inline int pmd_protnone(pmd_t pmd) | |
367 | { | |
368 | return pte_protnone(pmd_pte(pmd)); | |
369 | } | |
370 | #endif | |
371 | ||
af074848 SC |
372 | /* |
373 | * THP definitions. | |
374 | */ | |
af074848 SC |
375 | |
376 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
377 | #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) | |
29e56940 | 378 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
af074848 | 379 | |
5bb1cc0f | 380 | #define pmd_present(pmd) pte_present(pmd_pte(pmd)) |
c164e038 | 381 | #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) |
9c7e535f | 382 | #define pmd_young(pmd) pte_young(pmd_pte(pmd)) |
0795edaf | 383 | #define pmd_valid(pmd) pte_valid(pmd_pte(pmd)) |
9c7e535f | 384 | #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) |
9c7e535f SC |
385 | #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) |
386 | #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) | |
ab4db1f2 | 387 | #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) |
9c7e535f SC |
388 | #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) |
389 | #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) | |
5bb1cc0f | 390 | #define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID)) |
af074848 | 391 | |
0dbd3b18 SP |
392 | #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) |
393 | ||
9c7e535f | 394 | #define pmd_write(pmd) pte_write(pmd_pte(pmd)) |
af074848 SC |
395 | |
396 | #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) | |
397 | ||
73b20c84 RM |
398 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
399 | #define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd)) | |
400 | #endif | |
30e23538 JH |
401 | static inline pmd_t pmd_mkdevmap(pmd_t pmd) |
402 | { | |
403 | return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP))); | |
404 | } | |
73b20c84 | 405 | |
75387b92 KM |
406 | #define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd)) |
407 | #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys) | |
408 | #define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT) | |
409 | #define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) | |
af074848 SC |
410 | #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) |
411 | ||
35a63966 | 412 | #define pud_young(pud) pte_young(pud_pte(pud)) |
eb3f0624 | 413 | #define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud))) |
29e56940 | 414 | #define pud_write(pud) pte_write(pud_pte(pud)) |
75387b92 | 415 | |
b8e0ba7c PA |
416 | #define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT)) |
417 | ||
75387b92 KM |
418 | #define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud)) |
419 | #define __phys_to_pud_val(phys) __phys_to_pte_val(phys) | |
420 | #define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT) | |
421 | #define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) | |
af074848 | 422 | |
ceb21835 | 423 | #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)) |
af074848 | 424 | |
75387b92 KM |
425 | #define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd)) |
426 | #define __phys_to_pgd_val(phys) __phys_to_pte_val(phys) | |
427 | ||
a501e324 CM |
428 | #define __pgprot_modify(prot,mask,bits) \ |
429 | __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) | |
430 | ||
4f04d8f0 CM |
431 | /* |
432 | * Mark the prot value as uncacheable and unbufferable. | |
433 | */ | |
434 | #define pgprot_noncached(prot) \ | |
de2db743 | 435 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) |
4f04d8f0 | 436 | #define pgprot_writecombine(prot) \ |
de2db743 | 437 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) |
d1e6dc91 LD |
438 | #define pgprot_device(prot) \ |
439 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) | |
3e4e1d3f CH |
440 | /* |
441 | * DMA allocations for non-coherent devices use what the Arm architecture calls | |
442 | * "Normal non-cacheable" memory, which permits speculation, unaligned accesses | |
443 | * and merging of writes. This is different from "Device-nGnR[nE]" memory which | |
444 | * is intended for MMIO and thus forbids speculation, preserves access size, | |
445 | * requires strict alignment and can also force write responses to come from the | |
446 | * endpoint. | |
447 | */ | |
419e2f18 CH |
448 | #define pgprot_dmacoherent(prot) \ |
449 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, \ | |
450 | PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) | |
451 | ||
4f04d8f0 CM |
452 | #define __HAVE_PHYS_MEM_ACCESS_PROT |
453 | struct file; | |
454 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |
455 | unsigned long size, pgprot_t vma_prot); | |
456 | ||
457 | #define pmd_none(pmd) (!pmd_val(pmd)) | |
4f04d8f0 | 458 | |
ab4db1f2 | 459 | #define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT)) |
4f04d8f0 | 460 | |
36311607 MZ |
461 | #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ |
462 | PMD_TYPE_TABLE) | |
463 | #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ | |
464 | PMD_TYPE_SECT) | |
465 | ||
cac4b8cd | 466 | #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 |
7d4e2dcf QC |
467 | static inline bool pud_sect(pud_t pud) { return false; } |
468 | static inline bool pud_table(pud_t pud) { return true; } | |
206a2a73 SC |
469 | #else |
470 | #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ | |
471 | PUD_TYPE_SECT) | |
523d6e9f | 472 | #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ |
473 | PUD_TYPE_TABLE) | |
206a2a73 | 474 | #endif |
36311607 | 475 | |
2330b7ca JY |
476 | extern pgd_t init_pg_dir[PTRS_PER_PGD]; |
477 | extern pgd_t init_pg_end[]; | |
478 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | |
479 | extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; | |
480 | extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; | |
481 | ||
482 | extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd); | |
483 | ||
484 | static inline bool in_swapper_pgdir(void *addr) | |
485 | { | |
486 | return ((unsigned long)addr & PAGE_MASK) == | |
487 | ((unsigned long)swapper_pg_dir & PAGE_MASK); | |
488 | } | |
489 | ||
4f04d8f0 CM |
490 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) |
491 | { | |
e9ed821b JM |
492 | #ifdef __PAGETABLE_PMD_FOLDED |
493 | if (in_swapper_pgdir(pmdp)) { | |
2330b7ca JY |
494 | set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd))); |
495 | return; | |
496 | } | |
e9ed821b | 497 | #endif /* __PAGETABLE_PMD_FOLDED */ |
2330b7ca | 498 | |
20a004e7 | 499 | WRITE_ONCE(*pmdp, pmd); |
0795edaf | 500 | |
d0b7a302 | 501 | if (pmd_valid(pmd)) { |
0795edaf | 502 | dsb(ishst); |
d0b7a302 WD |
503 | isb(); |
504 | } | |
4f04d8f0 CM |
505 | } |
506 | ||
507 | static inline void pmd_clear(pmd_t *pmdp) | |
508 | { | |
509 | set_pmd(pmdp, __pmd(0)); | |
510 | } | |
511 | ||
dca56dca | 512 | static inline phys_addr_t pmd_page_paddr(pmd_t pmd) |
4f04d8f0 | 513 | { |
75387b92 | 514 | return __pmd_to_phys(pmd); |
4f04d8f0 CM |
515 | } |
516 | ||
74dd022f QC |
517 | static inline void pte_unmap(pte_t *pte) { } |
518 | ||
053520f7 MR |
519 | /* Find an entry in the third-level page table. */ |
520 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
521 | ||
f069faba | 522 | #define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t)) |
dca56dca | 523 | #define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr)))) |
053520f7 MR |
524 | |
525 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) | |
053520f7 | 526 | |
961faac1 MR |
527 | #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) |
528 | #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) | |
529 | #define pte_clear_fixmap() clear_fixmap(FIX_PTE) | |
530 | ||
75387b92 | 531 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(__pmd_to_phys(pmd))) |
4f04d8f0 | 532 | |
6533945a AB |
533 | /* use ONLY for statically allocated translation tables */ |
534 | #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr)))) | |
535 | ||
4f04d8f0 CM |
536 | /* |
537 | * Conversion functions: convert a page and protection to a page entry, | |
538 | * and a page entry and page directory to the page they refer to. | |
539 | */ | |
540 | #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) | |
541 | ||
9f25e6ad | 542 | #if CONFIG_PGTABLE_LEVELS > 2 |
4f04d8f0 | 543 | |
7078db46 CM |
544 | #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) |
545 | ||
4f04d8f0 | 546 | #define pud_none(pud) (!pud_val(pud)) |
ab4db1f2 | 547 | #define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT)) |
f02ab08a | 548 | #define pud_present(pud) pte_present(pud_pte(pud)) |
0795edaf | 549 | #define pud_valid(pud) pte_valid(pud_pte(pud)) |
4f04d8f0 CM |
550 | |
551 | static inline void set_pud(pud_t *pudp, pud_t pud) | |
552 | { | |
e9ed821b JM |
553 | #ifdef __PAGETABLE_PUD_FOLDED |
554 | if (in_swapper_pgdir(pudp)) { | |
2330b7ca JY |
555 | set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud))); |
556 | return; | |
557 | } | |
e9ed821b | 558 | #endif /* __PAGETABLE_PUD_FOLDED */ |
2330b7ca | 559 | |
20a004e7 | 560 | WRITE_ONCE(*pudp, pud); |
0795edaf | 561 | |
d0b7a302 | 562 | if (pud_valid(pud)) { |
0795edaf | 563 | dsb(ishst); |
d0b7a302 WD |
564 | isb(); |
565 | } | |
4f04d8f0 CM |
566 | } |
567 | ||
568 | static inline void pud_clear(pud_t *pudp) | |
569 | { | |
570 | set_pud(pudp, __pud(0)); | |
571 | } | |
572 | ||
dca56dca | 573 | static inline phys_addr_t pud_page_paddr(pud_t pud) |
4f04d8f0 | 574 | { |
75387b92 | 575 | return __pud_to_phys(pud); |
4f04d8f0 CM |
576 | } |
577 | ||
7078db46 CM |
578 | /* Find an entry in the second-level page table. */ |
579 | #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) | |
580 | ||
20a004e7 | 581 | #define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t)) |
dca56dca | 582 | #define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr)))) |
7078db46 | 583 | |
961faac1 MR |
584 | #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr)) |
585 | #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr)) | |
586 | #define pmd_clear_fixmap() clear_fixmap(FIX_PMD) | |
7078db46 | 587 | |
75387b92 | 588 | #define pud_page(pud) pfn_to_page(__phys_to_pfn(__pud_to_phys(pud))) |
29e56940 | 589 | |
6533945a AB |
590 | /* use ONLY for statically allocated translation tables */ |
591 | #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr)))) | |
592 | ||
dca56dca MR |
593 | #else |
594 | ||
595 | #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; }) | |
596 | ||
961faac1 MR |
597 | /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */ |
598 | #define pmd_set_fixmap(addr) NULL | |
599 | #define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp) | |
600 | #define pmd_clear_fixmap() | |
601 | ||
6533945a AB |
602 | #define pmd_offset_kimg(dir,addr) ((pmd_t *)dir) |
603 | ||
9f25e6ad | 604 | #endif /* CONFIG_PGTABLE_LEVELS > 2 */ |
4f04d8f0 | 605 | |
9f25e6ad | 606 | #if CONFIG_PGTABLE_LEVELS > 3 |
c79b954b | 607 | |
7078db46 CM |
608 | #define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud)) |
609 | ||
c79b954b JL |
610 | #define pgd_none(pgd) (!pgd_val(pgd)) |
611 | #define pgd_bad(pgd) (!(pgd_val(pgd) & 2)) | |
612 | #define pgd_present(pgd) (pgd_val(pgd)) | |
613 | ||
614 | static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) | |
615 | { | |
2330b7ca JY |
616 | if (in_swapper_pgdir(pgdp)) { |
617 | set_swapper_pgd(pgdp, pgd); | |
618 | return; | |
619 | } | |
620 | ||
20a004e7 | 621 | WRITE_ONCE(*pgdp, pgd); |
c79b954b | 622 | dsb(ishst); |
eb6a4dcc | 623 | isb(); |
c79b954b JL |
624 | } |
625 | ||
626 | static inline void pgd_clear(pgd_t *pgdp) | |
627 | { | |
628 | set_pgd(pgdp, __pgd(0)); | |
629 | } | |
630 | ||
dca56dca | 631 | static inline phys_addr_t pgd_page_paddr(pgd_t pgd) |
c79b954b | 632 | { |
75387b92 | 633 | return __pgd_to_phys(pgd); |
c79b954b JL |
634 | } |
635 | ||
7078db46 CM |
636 | /* Find an entry in the frst-level page table. */ |
637 | #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) | |
638 | ||
20a004e7 | 639 | #define pud_offset_phys(dir, addr) (pgd_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t)) |
dca56dca | 640 | #define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr)))) |
7078db46 | 641 | |
961faac1 MR |
642 | #define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr)) |
643 | #define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr)) | |
644 | #define pud_clear_fixmap() clear_fixmap(FIX_PUD) | |
7078db46 | 645 | |
75387b92 | 646 | #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd))) |
5d96e0cb | 647 | |
6533945a AB |
648 | /* use ONLY for statically allocated translation tables */ |
649 | #define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr)))) | |
650 | ||
dca56dca MR |
651 | #else |
652 | ||
653 | #define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;}) | |
654 | ||
961faac1 MR |
655 | /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */ |
656 | #define pud_set_fixmap(addr) NULL | |
657 | #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp) | |
658 | #define pud_clear_fixmap() | |
659 | ||
6533945a AB |
660 | #define pud_offset_kimg(dir,addr) ((pud_t *)dir) |
661 | ||
9f25e6ad | 662 | #endif /* CONFIG_PGTABLE_LEVELS > 3 */ |
c79b954b | 663 | |
7078db46 CM |
664 | #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) |
665 | ||
4f04d8f0 CM |
666 | /* to find an entry in a page-table-directory */ |
667 | #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | |
668 | ||
dca56dca MR |
669 | #define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr)) |
670 | ||
671 | #define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr))) | |
4f04d8f0 CM |
672 | |
673 | /* to find an entry in a kernel page-table-directory */ | |
674 | #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) | |
675 | ||
961faac1 MR |
676 | #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr)) |
677 | #define pgd_clear_fixmap() clear_fixmap(FIX_PGD) | |
678 | ||
4f04d8f0 CM |
679 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
680 | { | |
a6fadf7e | 681 | const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | |
1a541b4e | 682 | PTE_PROT_NONE | PTE_VALID | PTE_WRITE; |
2f4b829c CM |
683 | /* preserve the hardware dirty information */ |
684 | if (pte_hw_dirty(pte)) | |
62d96c71 | 685 | pte = pte_mkdirty(pte); |
4f04d8f0 CM |
686 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); |
687 | return pte; | |
688 | } | |
689 | ||
9c7e535f SC |
690 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
691 | { | |
692 | return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); | |
693 | } | |
694 | ||
66dbd6e6 CM |
695 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
696 | extern int ptep_set_access_flags(struct vm_area_struct *vma, | |
697 | unsigned long address, pte_t *ptep, | |
698 | pte_t entry, int dirty); | |
699 | ||
282aa705 CM |
700 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
701 | #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS | |
702 | static inline int pmdp_set_access_flags(struct vm_area_struct *vma, | |
703 | unsigned long address, pmd_t *pmdp, | |
704 | pmd_t entry, int dirty) | |
705 | { | |
706 | return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty); | |
707 | } | |
73b20c84 RM |
708 | |
709 | static inline int pud_devmap(pud_t pud) | |
710 | { | |
711 | return 0; | |
712 | } | |
713 | ||
714 | static inline int pgd_devmap(pgd_t pgd) | |
715 | { | |
716 | return 0; | |
717 | } | |
282aa705 CM |
718 | #endif |
719 | ||
2f4b829c CM |
720 | /* |
721 | * Atomic pte/pmd modifications. | |
722 | */ | |
723 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
06485053 | 724 | static inline int __ptep_test_and_clear_young(pte_t *ptep) |
2f4b829c | 725 | { |
3bbf7157 | 726 | pte_t old_pte, pte; |
2f4b829c | 727 | |
3bbf7157 CM |
728 | pte = READ_ONCE(*ptep); |
729 | do { | |
730 | old_pte = pte; | |
731 | pte = pte_mkold(pte); | |
732 | pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), | |
733 | pte_val(old_pte), pte_val(pte)); | |
734 | } while (pte_val(pte) != pte_val(old_pte)); | |
2f4b829c | 735 | |
3bbf7157 | 736 | return pte_young(pte); |
2f4b829c CM |
737 | } |
738 | ||
06485053 CM |
739 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, |
740 | unsigned long address, | |
741 | pte_t *ptep) | |
742 | { | |
743 | return __ptep_test_and_clear_young(ptep); | |
744 | } | |
745 | ||
3403e56b AVB |
746 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
747 | static inline int ptep_clear_flush_young(struct vm_area_struct *vma, | |
748 | unsigned long address, pte_t *ptep) | |
749 | { | |
750 | int young = ptep_test_and_clear_young(vma, address, ptep); | |
751 | ||
752 | if (young) { | |
753 | /* | |
754 | * We can elide the trailing DSB here since the worst that can | |
755 | * happen is that a CPU continues to use the young entry in its | |
756 | * TLB and we mistakenly reclaim the associated page. The | |
757 | * window for such an event is bounded by the next | |
758 | * context-switch, which provides a DSB to complete the TLB | |
759 | * invalidation. | |
760 | */ | |
761 | flush_tlb_page_nosync(vma, address); | |
762 | } | |
763 | ||
764 | return young; | |
765 | } | |
766 | ||
2f4b829c CM |
767 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
768 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG | |
769 | static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |
770 | unsigned long address, | |
771 | pmd_t *pmdp) | |
772 | { | |
773 | return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); | |
774 | } | |
775 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
776 | ||
777 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
778 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | |
779 | unsigned long address, pte_t *ptep) | |
780 | { | |
3bbf7157 | 781 | return __pte(xchg_relaxed(&pte_val(*ptep), 0)); |
2f4b829c CM |
782 | } |
783 | ||
784 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
911f56ee CM |
785 | #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR |
786 | static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, | |
787 | unsigned long address, pmd_t *pmdp) | |
2f4b829c CM |
788 | { |
789 | return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp)); | |
790 | } | |
791 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
792 | ||
793 | /* | |
8781bcbc SC |
794 | * ptep_set_wrprotect - mark read-only while trasferring potential hardware |
795 | * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. | |
2f4b829c CM |
796 | */ |
797 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
798 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) | |
799 | { | |
3bbf7157 CM |
800 | pte_t old_pte, pte; |
801 | ||
802 | pte = READ_ONCE(*ptep); | |
803 | do { | |
804 | old_pte = pte; | |
8781bcbc SC |
805 | /* |
806 | * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY | |
807 | * clear), set the PTE_DIRTY bit. | |
808 | */ | |
809 | if (pte_hw_dirty(pte)) | |
810 | pte = pte_mkdirty(pte); | |
3bbf7157 CM |
811 | pte = pte_wrprotect(pte); |
812 | pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), | |
813 | pte_val(old_pte), pte_val(pte)); | |
814 | } while (pte_val(pte) != pte_val(old_pte)); | |
2f4b829c CM |
815 | } |
816 | ||
817 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
818 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT | |
819 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, | |
820 | unsigned long address, pmd_t *pmdp) | |
821 | { | |
822 | ptep_set_wrprotect(mm, address, (pte_t *)pmdp); | |
823 | } | |
1d78a62c CM |
824 | |
825 | #define pmdp_establish pmdp_establish | |
826 | static inline pmd_t pmdp_establish(struct vm_area_struct *vma, | |
827 | unsigned long address, pmd_t *pmdp, pmd_t pmd) | |
828 | { | |
829 | return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd))); | |
830 | } | |
2f4b829c | 831 | #endif |
2f4b829c | 832 | |
4f04d8f0 CM |
833 | /* |
834 | * Encode and decode a swap entry: | |
3676f9ef | 835 | * bits 0-1: present (must be zero) |
9b3e661e KS |
836 | * bits 2-7: swap type |
837 | * bits 8-57: swap offset | |
fdc69e7d | 838 | * bit 58: PTE_PROT_NONE (must be zero) |
4f04d8f0 | 839 | */ |
9b3e661e | 840 | #define __SWP_TYPE_SHIFT 2 |
4f04d8f0 | 841 | #define __SWP_TYPE_BITS 6 |
9b3e661e | 842 | #define __SWP_OFFSET_BITS 50 |
4f04d8f0 CM |
843 | #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) |
844 | #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) | |
3676f9ef | 845 | #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) |
4f04d8f0 CM |
846 | |
847 | #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) | |
3676f9ef | 848 | #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) |
4f04d8f0 CM |
849 | #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) |
850 | ||
851 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
852 | #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) | |
853 | ||
854 | /* | |
855 | * Ensure that there are not more swap files than can be encoded in the kernel | |
aad9061b | 856 | * PTEs. |
4f04d8f0 CM |
857 | */ |
858 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) | |
859 | ||
4f04d8f0 CM |
860 | extern int kern_addr_valid(unsigned long addr); |
861 | ||
862 | #include <asm-generic/pgtable.h> | |
863 | ||
cba3574f WD |
864 | /* |
865 | * On AArch64, the cache coherency is handled via the set_pte_at() function. | |
866 | */ | |
867 | static inline void update_mmu_cache(struct vm_area_struct *vma, | |
868 | unsigned long addr, pte_t *ptep) | |
869 | { | |
870 | /* | |
120798d2 WD |
871 | * We don't do anything here, so there's a very small chance of |
872 | * us retaking a user fault which we just fixed up. The alternative | |
873 | * is doing a dsb(ishst), but that penalises the fastpath. | |
cba3574f | 874 | */ |
cba3574f WD |
875 | } |
876 | ||
877 | #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) | |
878 | ||
77ad4ce6 MR |
879 | #define kc_vaddr_to_offset(v) ((v) & ~PAGE_END) |
880 | #define kc_offset_to_vaddr(o) ((o) | PAGE_END) | |
7db743c6 | 881 | |
529c4b05 KM |
882 | #ifdef CONFIG_ARM64_PA_BITS_52 |
883 | #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) | |
884 | #else | |
885 | #define phys_to_ttbr(addr) (addr) | |
886 | #endif | |
887 | ||
4f04d8f0 CM |
888 | #endif /* !__ASSEMBLY__ */ |
889 | ||
890 | #endif /* __ASM_PGTABLE_H */ |