arm64: mte: Add PROT_MTE support to mmap() and mprotect()
[linux-block.git] / arch / arm64 / include / asm / pgtable.h
CommitLineData
caab277b 1/* SPDX-License-Identifier: GPL-2.0-only */
4f04d8f0
CM
2/*
3 * Copyright (C) 2012 ARM Ltd.
4f04d8f0
CM
4 */
5#ifndef __ASM_PGTABLE_H
6#define __ASM_PGTABLE_H
7
2f4b829c 8#include <asm/bug.h>
4f04d8f0
CM
9#include <asm/proc-fns.h>
10
11#include <asm/memory.h>
34bfeea4 12#include <asm/mte.h>
4f04d8f0 13#include <asm/pgtable-hwdef.h>
3eca86e7 14#include <asm/pgtable-prot.h>
3403e56b 15#include <asm/tlbflush.h>
4f04d8f0
CM
16
17/*
3e1907d5 18 * VMALLOC range.
08375198 19 *
f9040773 20 * VMALLOC_START: beginning of the kernel vmalloc space
a5315819 21 * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space
3e1907d5 22 * and fixed mappings
4f04d8f0 23 */
f9040773 24#define VMALLOC_START (MODULES_END)
14c127c9 25#define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
4f04d8f0 26
d016bf7e 27#define FIRST_USER_ADDRESS 0UL
4f04d8f0
CM
28
29#ifndef __ASSEMBLY__
2f4b829c 30
3bbf7157 31#include <asm/cmpxchg.h>
961faac1 32#include <asm/fixmap.h>
2f4b829c 33#include <linux/mmdebug.h>
86c9e812
WD
34#include <linux/mm_types.h>
35#include <linux/sched.h>
2f4b829c 36
c8b6d2cc
SC
37extern struct page *vmemmap;
38
4f04d8f0
CM
39extern void __pte_error(const char *file, int line, unsigned long val);
40extern void __pmd_error(const char *file, int line, unsigned long val);
c79b954b 41extern void __pud_error(const char *file, int line, unsigned long val);
4f04d8f0
CM
42extern void __pgd_error(const char *file, int line, unsigned long val);
43
a7ac1cfa
ZY
44#ifdef CONFIG_TRANSPARENT_HUGEPAGE
45#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
46
47/* Set stride and tlb_level in flush_*_tlb_range */
48#define flush_pmd_tlb_range(vma, addr, end) \
49 __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
50#define flush_pud_tlb_range(vma, addr, end) \
51 __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
52#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
53
4f04d8f0
CM
54/*
55 * ZERO_PAGE is a global shared page that is always zero: used
56 * for zero-mapped memory areas etc..
57 */
5227cfa7 58extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
2077be67 59#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
4f04d8f0 60
7078db46
CM
61#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
62
75387b92
KM
63/*
64 * Macros to convert between a physical address and its placement in a
65 * page table entry, taking care of 52-bit addresses.
66 */
67#ifdef CONFIG_ARM64_PA_BITS_52
68#define __pte_to_phys(pte) \
69 ((pte_val(pte) & PTE_ADDR_LOW) | ((pte_val(pte) & PTE_ADDR_HIGH) << 36))
70#define __phys_to_pte_val(phys) (((phys) | ((phys) >> 36)) & PTE_ADDR_MASK)
71#else
72#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK)
73#define __phys_to_pte_val(phys) (phys)
74#endif
4f04d8f0 75
75387b92
KM
76#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
77#define pfn_pte(pfn,prot) \
78 __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
4f04d8f0
CM
79
80#define pte_none(pte) (!pte_val(pte))
81#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
82#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
7078db46 83
4f04d8f0
CM
84/*
85 * The following only work if pte_present(). Undefined behaviour otherwise.
86 */
84fe6826 87#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
84fe6826
SC
88#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
89#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
90#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
ec663d96 91#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
93ef666a 92#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
73b20c84 93#define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP))
34bfeea4
CM
94#define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \
95 PTE_ATTRINDX(MT_NORMAL_TAGGED))
4f04d8f0 96
d27cfa1f
AB
97#define pte_cont_addr_end(addr, end) \
98({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
99 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
100})
101
102#define pmd_cont_addr_end(addr, end) \
103({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \
104 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
105})
106
b847415c 107#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
2f4b829c
CM
108#define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
109#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
110
766ffb69 111#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
ec663d96 112#define pte_valid_not_user(pte) \
24cecc37 113 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
76c714be
WD
114#define pte_valid_young(pte) \
115 ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
6218f96c
CM
116#define pte_valid_user(pte) \
117 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
76c714be
WD
118
119/*
120 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
121 * so that we don't erroneously return false for pages that have been
122 * remapped as PROT_NONE but are yet to be flushed from the TLB.
123 */
124#define pte_accessible(mm, pte) \
125 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
4f04d8f0 126
6218f96c
CM
127/*
128 * p??_access_permitted() is true for valid user mappings (subject to the
24cecc37
CM
129 * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
130 * set.
6218f96c
CM
131 */
132#define pte_access_permitted(pte, write) \
133 (pte_valid_user(pte) && (!(write) || pte_write(pte)))
134#define pmd_access_permitted(pmd, write) \
135 (pte_access_permitted(pmd_pte(pmd), (write)))
136#define pud_access_permitted(pud, write) \
137 (pte_access_permitted(pud_pte(pud), (write)))
138
b6d4f280 139static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
44b6dfc5 140{
b6d4f280 141 pte_val(pte) &= ~pgprot_val(prot);
44b6dfc5
SC
142 return pte;
143}
144
b6d4f280 145static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
44b6dfc5 146{
b6d4f280 147 pte_val(pte) |= pgprot_val(prot);
44b6dfc5
SC
148 return pte;
149}
150
b6d4f280
LA
151static inline pte_t pte_wrprotect(pte_t pte)
152{
73e86cb0
CM
153 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
154 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
155 return pte;
b6d4f280
LA
156}
157
158static inline pte_t pte_mkwrite(pte_t pte)
159{
73e86cb0
CM
160 pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
161 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
162 return pte;
b6d4f280
LA
163}
164
44b6dfc5
SC
165static inline pte_t pte_mkclean(pte_t pte)
166{
8781bcbc
SC
167 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
168 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
169
170 return pte;
44b6dfc5
SC
171}
172
173static inline pte_t pte_mkdirty(pte_t pte)
174{
8781bcbc
SC
175 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
176
177 if (pte_write(pte))
178 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
179
180 return pte;
44b6dfc5
SC
181}
182
183static inline pte_t pte_mkold(pte_t pte)
184{
b6d4f280 185 return clear_pte_bit(pte, __pgprot(PTE_AF));
44b6dfc5
SC
186}
187
188static inline pte_t pte_mkyoung(pte_t pte)
189{
b6d4f280 190 return set_pte_bit(pte, __pgprot(PTE_AF));
44b6dfc5
SC
191}
192
193static inline pte_t pte_mkspecial(pte_t pte)
194{
b6d4f280 195 return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
44b6dfc5 196}
4f04d8f0 197
93ef666a
JL
198static inline pte_t pte_mkcont(pte_t pte)
199{
66b3923a
DW
200 pte = set_pte_bit(pte, __pgprot(PTE_CONT));
201 return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
93ef666a
JL
202}
203
204static inline pte_t pte_mknoncont(pte_t pte)
205{
206 return clear_pte_bit(pte, __pgprot(PTE_CONT));
207}
208
5ebe3a44
JM
209static inline pte_t pte_mkpresent(pte_t pte)
210{
211 return set_pte_bit(pte, __pgprot(PTE_VALID));
212}
213
66b3923a
DW
214static inline pmd_t pmd_mkcont(pmd_t pmd)
215{
216 return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
217}
218
73b20c84
RM
219static inline pte_t pte_mkdevmap(pte_t pte)
220{
30e23538 221 return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
73b20c84
RM
222}
223
4f04d8f0
CM
224static inline void set_pte(pte_t *ptep, pte_t pte)
225{
20a004e7 226 WRITE_ONCE(*ptep, pte);
7f0b1bf0
CM
227
228 /*
229 * Only if the new pte is valid and kernel, otherwise TLB maintenance
230 * or update_mmu_cache() have the necessary barriers.
231 */
d0b7a302 232 if (pte_valid_not_user(pte)) {
7f0b1bf0 233 dsb(ishst);
d0b7a302
WD
234 isb();
235 }
4f04d8f0
CM
236}
237
907e21c1 238extern void __sync_icache_dcache(pte_t pteval);
4f04d8f0 239
2f4b829c
CM
240/*
241 * PTE bits configuration in the presence of hardware Dirty Bit Management
242 * (PTE_WRITE == PTE_DBM):
243 *
244 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw)
245 * 0 0 | 1 0 0
246 * 0 1 | 1 1 0
247 * 1 0 | 1 0 1
248 * 1 1 | 0 1 x
249 *
250 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
251 * the page fault mechanism. Checking the dirty status of a pte becomes:
252 *
b847415c 253 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
2f4b829c 254 */
9b604722
MR
255
256static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
257 pte_t pte)
4f04d8f0 258{
20a004e7
WD
259 pte_t old_pte;
260
9b604722
MR
261 if (!IS_ENABLED(CONFIG_DEBUG_VM))
262 return;
263
264 old_pte = READ_ONCE(*ptep);
265
266 if (!pte_valid(old_pte) || !pte_valid(pte))
267 return;
268 if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
269 return;
02522463 270
2f4b829c 271 /*
9b604722
MR
272 * Check for potential race with hardware updates of the pte
273 * (ptep_set_access_flags safely changes valid ptes without going
274 * through an invalid entry).
2f4b829c 275 */
9b604722
MR
276 VM_WARN_ONCE(!pte_young(pte),
277 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
278 __func__, pte_val(old_pte), pte_val(pte));
279 VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
280 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
281 __func__, pte_val(old_pte), pte_val(pte));
282}
283
284static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
285 pte_t *ptep, pte_t pte)
286{
287 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
288 __sync_icache_dcache(pte);
289
34bfeea4
CM
290 if (system_supports_mte() &&
291 pte_present(pte) && pte_tagged(pte) && !pte_special(pte))
292 mte_sync_tags(ptep, pte);
293
9b604722 294 __check_racy_pte_update(mm, ptep, pte);
2f4b829c 295
4f04d8f0
CM
296 set_pte(ptep, pte);
297}
298
299/*
300 * Huge pte definitions.
301 */
084bd298
SC
302#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
303
304/*
305 * Hugetlb definitions.
306 */
66b3923a 307#define HUGE_MAX_HSTATE 4
084bd298
SC
308#define HPAGE_SHIFT PMD_SHIFT
309#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
310#define HPAGE_MASK (~(HPAGE_SIZE - 1))
311#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
4f04d8f0 312
75387b92
KM
313static inline pte_t pgd_pte(pgd_t pgd)
314{
315 return __pte(pgd_val(pgd));
316}
317
e9f63768
MR
318static inline pte_t p4d_pte(p4d_t p4d)
319{
320 return __pte(p4d_val(p4d));
321}
322
29e56940
SC
323static inline pte_t pud_pte(pud_t pud)
324{
325 return __pte(pud_val(pud));
326}
327
eb3f0624
PA
328static inline pud_t pte_pud(pte_t pte)
329{
330 return __pud(pte_val(pte));
331}
332
29e56940
SC
333static inline pmd_t pud_pmd(pud_t pud)
334{
335 return __pmd(pud_val(pud));
336}
337
9c7e535f
SC
338static inline pte_t pmd_pte(pmd_t pmd)
339{
340 return __pte(pmd_val(pmd));
341}
af074848 342
9c7e535f
SC
343static inline pmd_t pte_pmd(pte_t pte)
344{
345 return __pmd(pte_val(pte));
346}
af074848 347
f7f0097a 348static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
8ce837ce 349{
f7f0097a
AK
350 return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
351}
352
353static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
8ce837ce 354{
f7f0097a 355 return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
8ce837ce
AB
356}
357
56166230
GK
358#ifdef CONFIG_NUMA_BALANCING
359/*
ca5999fd 360 * See the comment in include/linux/pgtable.h
56166230
GK
361 */
362static inline int pte_protnone(pte_t pte)
363{
364 return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
365}
366
367static inline int pmd_protnone(pmd_t pmd)
368{
369 return pte_protnone(pmd_pte(pmd));
370}
371#endif
372
af074848
SC
373/*
374 * THP definitions.
375 */
af074848
SC
376
377#ifdef CONFIG_TRANSPARENT_HUGEPAGE
378#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
29e56940 379#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
af074848 380
5bb1cc0f 381#define pmd_present(pmd) pte_present(pmd_pte(pmd))
c164e038 382#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
9c7e535f 383#define pmd_young(pmd) pte_young(pmd_pte(pmd))
0795edaf 384#define pmd_valid(pmd) pte_valid(pmd_pte(pmd))
9c7e535f 385#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
9c7e535f
SC
386#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
387#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
ab4db1f2 388#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
9c7e535f
SC
389#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
390#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
86ec2da0 391#define pmd_mkinvalid(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
af074848 392
0dbd3b18
SP
393#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
394
9c7e535f 395#define pmd_write(pmd) pte_write(pmd_pte(pmd))
af074848
SC
396
397#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
398
73b20c84
RM
399#ifdef CONFIG_TRANSPARENT_HUGEPAGE
400#define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd))
401#endif
30e23538
JH
402static inline pmd_t pmd_mkdevmap(pmd_t pmd)
403{
404 return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
405}
73b20c84 406
75387b92
KM
407#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
408#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
409#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
410#define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
af074848
SC
411#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
412
35a63966 413#define pud_young(pud) pte_young(pud_pte(pud))
eb3f0624 414#define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud)))
29e56940 415#define pud_write(pud) pte_write(pud_pte(pud))
75387b92 416
b8e0ba7c
PA
417#define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT))
418
75387b92
KM
419#define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud))
420#define __phys_to_pud_val(phys) __phys_to_pte_val(phys)
421#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
422#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
af074848 423
ceb21835 424#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
af074848 425
e9f63768
MR
426#define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d))
427#define __phys_to_p4d_val(phys) __phys_to_pte_val(phys)
428
75387b92
KM
429#define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd))
430#define __phys_to_pgd_val(phys) __phys_to_pte_val(phys)
431
a501e324
CM
432#define __pgprot_modify(prot,mask,bits) \
433 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
434
cca98e9f 435#define pgprot_nx(prot) \
034aa9cd 436 __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
cca98e9f 437
4f04d8f0
CM
438/*
439 * Mark the prot value as uncacheable and unbufferable.
440 */
441#define pgprot_noncached(prot) \
de2db743 442 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
4f04d8f0 443#define pgprot_writecombine(prot) \
de2db743 444 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
d1e6dc91
LD
445#define pgprot_device(prot) \
446 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
3e4e1d3f
CH
447/*
448 * DMA allocations for non-coherent devices use what the Arm architecture calls
449 * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
450 * and merging of writes. This is different from "Device-nGnR[nE]" memory which
451 * is intended for MMIO and thus forbids speculation, preserves access size,
452 * requires strict alignment and can also force write responses to come from the
453 * endpoint.
454 */
419e2f18
CH
455#define pgprot_dmacoherent(prot) \
456 __pgprot_modify(prot, PTE_ATTRINDX_MASK, \
457 PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
458
4f04d8f0
CM
459#define __HAVE_PHYS_MEM_ACCESS_PROT
460struct file;
461extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
462 unsigned long size, pgprot_t vma_prot);
463
464#define pmd_none(pmd) (!pmd_val(pmd))
4f04d8f0 465
ab4db1f2 466#define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT))
4f04d8f0 467
36311607
MZ
468#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
469 PMD_TYPE_TABLE)
470#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
471 PMD_TYPE_SECT)
8aa82df3 472#define pmd_leaf(pmd) pmd_sect(pmd)
36311607 473
cac4b8cd 474#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
7d4e2dcf
QC
475static inline bool pud_sect(pud_t pud) { return false; }
476static inline bool pud_table(pud_t pud) { return true; }
206a2a73
SC
477#else
478#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
479 PUD_TYPE_SECT)
523d6e9f 480#define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
481 PUD_TYPE_TABLE)
206a2a73 482#endif
36311607 483
2330b7ca
JY
484extern pgd_t init_pg_dir[PTRS_PER_PGD];
485extern pgd_t init_pg_end[];
486extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
487extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
9d2d75ed 488extern pgd_t idmap_pg_end[];
2330b7ca
JY
489extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
490
491extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
492
493static inline bool in_swapper_pgdir(void *addr)
494{
495 return ((unsigned long)addr & PAGE_MASK) ==
496 ((unsigned long)swapper_pg_dir & PAGE_MASK);
497}
498
4f04d8f0
CM
499static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
500{
e9ed821b
JM
501#ifdef __PAGETABLE_PMD_FOLDED
502 if (in_swapper_pgdir(pmdp)) {
2330b7ca
JY
503 set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
504 return;
505 }
e9ed821b 506#endif /* __PAGETABLE_PMD_FOLDED */
2330b7ca 507
20a004e7 508 WRITE_ONCE(*pmdp, pmd);
0795edaf 509
d0b7a302 510 if (pmd_valid(pmd)) {
0795edaf 511 dsb(ishst);
d0b7a302
WD
512 isb();
513 }
4f04d8f0
CM
514}
515
516static inline void pmd_clear(pmd_t *pmdp)
517{
518 set_pmd(pmdp, __pmd(0));
519}
520
dca56dca 521static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
4f04d8f0 522{
75387b92 523 return __pmd_to_phys(pmd);
4f04d8f0
CM
524}
525
974b9b2c
MR
526static inline unsigned long pmd_page_vaddr(pmd_t pmd)
527{
528 return (unsigned long)__va(pmd_page_paddr(pmd));
529}
74dd022f 530
053520f7 531/* Find an entry in the third-level page table. */
f069faba 532#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
053520f7 533
961faac1
MR
534#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
535#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
536#define pte_clear_fixmap() clear_fixmap(FIX_PTE)
537
68ecabd0 538#define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd))
4f04d8f0 539
6533945a
AB
540/* use ONLY for statically allocated translation tables */
541#define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
542
4f04d8f0
CM
543/*
544 * Conversion functions: convert a page and protection to a page entry,
545 * and a page entry and page directory to the page they refer to.
546 */
547#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
548
9f25e6ad 549#if CONFIG_PGTABLE_LEVELS > 2
4f04d8f0 550
7078db46
CM
551#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
552
4f04d8f0 553#define pud_none(pud) (!pud_val(pud))
ab4db1f2 554#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
f02ab08a 555#define pud_present(pud) pte_present(pud_pte(pud))
8aa82df3 556#define pud_leaf(pud) pud_sect(pud)
0795edaf 557#define pud_valid(pud) pte_valid(pud_pte(pud))
4f04d8f0
CM
558
559static inline void set_pud(pud_t *pudp, pud_t pud)
560{
e9ed821b
JM
561#ifdef __PAGETABLE_PUD_FOLDED
562 if (in_swapper_pgdir(pudp)) {
2330b7ca
JY
563 set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
564 return;
565 }
e9ed821b 566#endif /* __PAGETABLE_PUD_FOLDED */
2330b7ca 567
20a004e7 568 WRITE_ONCE(*pudp, pud);
0795edaf 569
d0b7a302 570 if (pud_valid(pud)) {
0795edaf 571 dsb(ishst);
d0b7a302
WD
572 isb();
573 }
4f04d8f0
CM
574}
575
576static inline void pud_clear(pud_t *pudp)
577{
578 set_pud(pudp, __pud(0));
579}
580
dca56dca 581static inline phys_addr_t pud_page_paddr(pud_t pud)
4f04d8f0 582{
75387b92 583 return __pud_to_phys(pud);
4f04d8f0
CM
584}
585
974b9b2c
MR
586static inline unsigned long pud_page_vaddr(pud_t pud)
587{
588 return (unsigned long)__va(pud_page_paddr(pud));
589}
7078db46 590
974b9b2c 591/* Find an entry in the second-level page table. */
20a004e7 592#define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
7078db46 593
961faac1
MR
594#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
595#define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
596#define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
7078db46 597
68ecabd0 598#define pud_page(pud) phys_to_page(__pud_to_phys(pud))
29e56940 599
6533945a
AB
600/* use ONLY for statically allocated translation tables */
601#define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
602
dca56dca
MR
603#else
604
605#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
606
961faac1
MR
607/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
608#define pmd_set_fixmap(addr) NULL
609#define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp)
610#define pmd_clear_fixmap()
611
6533945a
AB
612#define pmd_offset_kimg(dir,addr) ((pmd_t *)dir)
613
9f25e6ad 614#endif /* CONFIG_PGTABLE_LEVELS > 2 */
4f04d8f0 615
9f25e6ad 616#if CONFIG_PGTABLE_LEVELS > 3
c79b954b 617
7078db46
CM
618#define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud))
619
e9f63768
MR
620#define p4d_none(p4d) (!p4d_val(p4d))
621#define p4d_bad(p4d) (!(p4d_val(p4d) & 2))
622#define p4d_present(p4d) (p4d_val(p4d))
c79b954b 623
e9f63768 624static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
c79b954b 625{
e9f63768
MR
626 if (in_swapper_pgdir(p4dp)) {
627 set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
2330b7ca
JY
628 return;
629 }
630
e9f63768 631 WRITE_ONCE(*p4dp, p4d);
c79b954b 632 dsb(ishst);
eb6a4dcc 633 isb();
c79b954b
JL
634}
635
e9f63768 636static inline void p4d_clear(p4d_t *p4dp)
c79b954b 637{
e9f63768 638 set_p4d(p4dp, __p4d(0));
c79b954b
JL
639}
640
e9f63768 641static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
c79b954b 642{
e9f63768 643 return __p4d_to_phys(p4d);
c79b954b
JL
644}
645
974b9b2c
MR
646static inline unsigned long p4d_page_vaddr(p4d_t p4d)
647{
648 return (unsigned long)__va(p4d_page_paddr(p4d));
649}
7078db46 650
974b9b2c 651/* Find an entry in the frst-level page table. */
e9f63768 652#define pud_offset_phys(dir, addr) (p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
7078db46 653
961faac1 654#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
e9f63768 655#define pud_set_fixmap_offset(p4d, addr) pud_set_fixmap(pud_offset_phys(p4d, addr))
961faac1 656#define pud_clear_fixmap() clear_fixmap(FIX_PUD)
7078db46 657
e9f63768 658#define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
5d96e0cb 659
6533945a
AB
660/* use ONLY for statically allocated translation tables */
661#define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
662
dca56dca
MR
663#else
664
e9f63768 665#define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;})
dca56dca
MR
666#define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
667
961faac1
MR
668/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
669#define pud_set_fixmap(addr) NULL
670#define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp)
671#define pud_clear_fixmap()
672
6533945a
AB
673#define pud_offset_kimg(dir,addr) ((pud_t *)dir)
674
9f25e6ad 675#endif /* CONFIG_PGTABLE_LEVELS > 3 */
c79b954b 676
7078db46
CM
677#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
678
961faac1
MR
679#define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
680#define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
681
4f04d8f0
CM
682static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
683{
9f341931
CM
684 /*
685 * Normal and Normal-Tagged are two different memory types and indices
686 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
687 */
a6fadf7e 688 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
9f341931
CM
689 PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP |
690 PTE_ATTRINDX_MASK;
2f4b829c
CM
691 /* preserve the hardware dirty information */
692 if (pte_hw_dirty(pte))
62d96c71 693 pte = pte_mkdirty(pte);
4f04d8f0
CM
694 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
695 return pte;
696}
697
9c7e535f
SC
698static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
699{
700 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
701}
702
66dbd6e6
CM
703#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
704extern int ptep_set_access_flags(struct vm_area_struct *vma,
705 unsigned long address, pte_t *ptep,
706 pte_t entry, int dirty);
707
282aa705
CM
708#ifdef CONFIG_TRANSPARENT_HUGEPAGE
709#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
710static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
711 unsigned long address, pmd_t *pmdp,
712 pmd_t entry, int dirty)
713{
714 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
715}
73b20c84
RM
716
717static inline int pud_devmap(pud_t pud)
718{
719 return 0;
720}
721
722static inline int pgd_devmap(pgd_t pgd)
723{
724 return 0;
725}
282aa705
CM
726#endif
727
2f4b829c
CM
728/*
729 * Atomic pte/pmd modifications.
730 */
731#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
06485053 732static inline int __ptep_test_and_clear_young(pte_t *ptep)
2f4b829c 733{
3bbf7157 734 pte_t old_pte, pte;
2f4b829c 735
3bbf7157
CM
736 pte = READ_ONCE(*ptep);
737 do {
738 old_pte = pte;
739 pte = pte_mkold(pte);
740 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
741 pte_val(old_pte), pte_val(pte));
742 } while (pte_val(pte) != pte_val(old_pte));
2f4b829c 743
3bbf7157 744 return pte_young(pte);
2f4b829c
CM
745}
746
06485053
CM
747static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
748 unsigned long address,
749 pte_t *ptep)
750{
751 return __ptep_test_and_clear_young(ptep);
752}
753
3403e56b
AVB
754#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
755static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
756 unsigned long address, pte_t *ptep)
757{
758 int young = ptep_test_and_clear_young(vma, address, ptep);
759
760 if (young) {
761 /*
762 * We can elide the trailing DSB here since the worst that can
763 * happen is that a CPU continues to use the young entry in its
764 * TLB and we mistakenly reclaim the associated page. The
765 * window for such an event is bounded by the next
766 * context-switch, which provides a DSB to complete the TLB
767 * invalidation.
768 */
769 flush_tlb_page_nosync(vma, address);
770 }
771
772 return young;
773}
774
2f4b829c
CM
775#ifdef CONFIG_TRANSPARENT_HUGEPAGE
776#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
777static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
778 unsigned long address,
779 pmd_t *pmdp)
780{
781 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
782}
783#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
784
785#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
786static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
787 unsigned long address, pte_t *ptep)
788{
3bbf7157 789 return __pte(xchg_relaxed(&pte_val(*ptep), 0));
2f4b829c
CM
790}
791
792#ifdef CONFIG_TRANSPARENT_HUGEPAGE
911f56ee
CM
793#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
794static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
795 unsigned long address, pmd_t *pmdp)
2f4b829c
CM
796{
797 return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
798}
799#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
800
801/*
8781bcbc
SC
802 * ptep_set_wrprotect - mark read-only while trasferring potential hardware
803 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
2f4b829c
CM
804 */
805#define __HAVE_ARCH_PTEP_SET_WRPROTECT
806static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
807{
3bbf7157
CM
808 pte_t old_pte, pte;
809
810 pte = READ_ONCE(*ptep);
811 do {
812 old_pte = pte;
8781bcbc
SC
813 /*
814 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
815 * clear), set the PTE_DIRTY bit.
816 */
817 if (pte_hw_dirty(pte))
818 pte = pte_mkdirty(pte);
3bbf7157
CM
819 pte = pte_wrprotect(pte);
820 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
821 pte_val(old_pte), pte_val(pte));
822 } while (pte_val(pte) != pte_val(old_pte));
2f4b829c
CM
823}
824
825#ifdef CONFIG_TRANSPARENT_HUGEPAGE
826#define __HAVE_ARCH_PMDP_SET_WRPROTECT
827static inline void pmdp_set_wrprotect(struct mm_struct *mm,
828 unsigned long address, pmd_t *pmdp)
829{
830 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
831}
1d78a62c
CM
832
833#define pmdp_establish pmdp_establish
834static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
835 unsigned long address, pmd_t *pmdp, pmd_t pmd)
836{
837 return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
838}
2f4b829c 839#endif
2f4b829c 840
4f04d8f0
CM
841/*
842 * Encode and decode a swap entry:
3676f9ef 843 * bits 0-1: present (must be zero)
9b3e661e
KS
844 * bits 2-7: swap type
845 * bits 8-57: swap offset
fdc69e7d 846 * bit 58: PTE_PROT_NONE (must be zero)
4f04d8f0 847 */
9b3e661e 848#define __SWP_TYPE_SHIFT 2
4f04d8f0 849#define __SWP_TYPE_BITS 6
9b3e661e 850#define __SWP_OFFSET_BITS 50
4f04d8f0
CM
851#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
852#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
3676f9ef 853#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
4f04d8f0
CM
854
855#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
3676f9ef 856#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
4f04d8f0
CM
857#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
858
859#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
860#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
861
862/*
863 * Ensure that there are not more swap files than can be encoded in the kernel
aad9061b 864 * PTEs.
4f04d8f0
CM
865 */
866#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
867
4f04d8f0
CM
868extern int kern_addr_valid(unsigned long addr);
869
cba3574f
WD
870/*
871 * On AArch64, the cache coherency is handled via the set_pte_at() function.
872 */
873static inline void update_mmu_cache(struct vm_area_struct *vma,
874 unsigned long addr, pte_t *ptep)
875{
876 /*
120798d2
WD
877 * We don't do anything here, so there's a very small chance of
878 * us retaking a user fault which we just fixed up. The alternative
879 * is doing a dsb(ishst), but that penalises the fastpath.
cba3574f 880 */
cba3574f
WD
881}
882
883#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
884
529c4b05
KM
885#ifdef CONFIG_ARM64_PA_BITS_52
886#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
887#else
888#define phys_to_ttbr(addr) (addr)
889#endif
890
6af31226
JH
891/*
892 * On arm64 without hardware Access Flag, copying from user will fail because
893 * the pte is old and cannot be marked young. So we always end up with zeroed
894 * page after fork() + CoW for pfn mappings. We don't always have a
895 * hardware-managed access flag on arm64.
896 */
897static inline bool arch_faults_on_old_pte(void)
898{
899 WARN_ON(preemptible());
900
901 return !cpu_has_hw_af();
902}
903#define arch_faults_on_old_pte arch_faults_on_old_pte
904
4f04d8f0
CM
905#endif /* !__ASSEMBLY__ */
906
907#endif /* __ASM_PGTABLE_H */