arm64: add support for folded p4d page tables
[linux-2.6-block.git] / arch / arm64 / include / asm / pgtable.h
CommitLineData
caab277b 1/* SPDX-License-Identifier: GPL-2.0-only */
4f04d8f0
CM
2/*
3 * Copyright (C) 2012 ARM Ltd.
4f04d8f0
CM
4 */
5#ifndef __ASM_PGTABLE_H
6#define __ASM_PGTABLE_H
7
2f4b829c 8#include <asm/bug.h>
4f04d8f0
CM
9#include <asm/proc-fns.h>
10
11#include <asm/memory.h>
12#include <asm/pgtable-hwdef.h>
3eca86e7 13#include <asm/pgtable-prot.h>
3403e56b 14#include <asm/tlbflush.h>
4f04d8f0
CM
15
16/*
3e1907d5 17 * VMALLOC range.
08375198 18 *
f9040773 19 * VMALLOC_START: beginning of the kernel vmalloc space
a5315819 20 * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space
3e1907d5 21 * and fixed mappings
4f04d8f0 22 */
f9040773 23#define VMALLOC_START (MODULES_END)
14c127c9 24#define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
4f04d8f0 25
d016bf7e 26#define FIRST_USER_ADDRESS 0UL
4f04d8f0
CM
27
28#ifndef __ASSEMBLY__
2f4b829c 29
3bbf7157 30#include <asm/cmpxchg.h>
961faac1 31#include <asm/fixmap.h>
2f4b829c 32#include <linux/mmdebug.h>
86c9e812
WD
33#include <linux/mm_types.h>
34#include <linux/sched.h>
2f4b829c 35
c8b6d2cc
SC
36extern struct page *vmemmap;
37
4f04d8f0
CM
38extern void __pte_error(const char *file, int line, unsigned long val);
39extern void __pmd_error(const char *file, int line, unsigned long val);
c79b954b 40extern void __pud_error(const char *file, int line, unsigned long val);
4f04d8f0
CM
41extern void __pgd_error(const char *file, int line, unsigned long val);
42
4f04d8f0
CM
43/*
44 * ZERO_PAGE is a global shared page that is always zero: used
45 * for zero-mapped memory areas etc..
46 */
5227cfa7 47extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
2077be67 48#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
4f04d8f0 49
7078db46
CM
50#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
51
75387b92
KM
52/*
53 * Macros to convert between a physical address and its placement in a
54 * page table entry, taking care of 52-bit addresses.
55 */
56#ifdef CONFIG_ARM64_PA_BITS_52
57#define __pte_to_phys(pte) \
58 ((pte_val(pte) & PTE_ADDR_LOW) | ((pte_val(pte) & PTE_ADDR_HIGH) << 36))
59#define __phys_to_pte_val(phys) (((phys) | ((phys) >> 36)) & PTE_ADDR_MASK)
60#else
61#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK)
62#define __phys_to_pte_val(phys) (phys)
63#endif
4f04d8f0 64
75387b92
KM
65#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
66#define pfn_pte(pfn,prot) \
67 __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
4f04d8f0
CM
68
69#define pte_none(pte) (!pte_val(pte))
70#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
71#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
7078db46 72
4f04d8f0
CM
73/*
74 * The following only work if pte_present(). Undefined behaviour otherwise.
75 */
84fe6826 76#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
84fe6826
SC
77#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
78#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
79#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
ec663d96 80#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
93ef666a 81#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
73b20c84 82#define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP))
4f04d8f0 83
d27cfa1f
AB
84#define pte_cont_addr_end(addr, end) \
85({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
86 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
87})
88
89#define pmd_cont_addr_end(addr, end) \
90({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \
91 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
92})
93
b847415c 94#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
2f4b829c
CM
95#define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
96#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
97
766ffb69 98#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
ec663d96 99#define pte_valid_not_user(pte) \
24cecc37 100 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
76c714be
WD
101#define pte_valid_young(pte) \
102 ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
6218f96c
CM
103#define pte_valid_user(pte) \
104 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
76c714be
WD
105
106/*
107 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
108 * so that we don't erroneously return false for pages that have been
109 * remapped as PROT_NONE but are yet to be flushed from the TLB.
110 */
111#define pte_accessible(mm, pte) \
112 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
4f04d8f0 113
6218f96c
CM
114/*
115 * p??_access_permitted() is true for valid user mappings (subject to the
24cecc37
CM
116 * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
117 * set.
6218f96c
CM
118 */
119#define pte_access_permitted(pte, write) \
120 (pte_valid_user(pte) && (!(write) || pte_write(pte)))
121#define pmd_access_permitted(pmd, write) \
122 (pte_access_permitted(pmd_pte(pmd), (write)))
123#define pud_access_permitted(pud, write) \
124 (pte_access_permitted(pud_pte(pud), (write)))
125
b6d4f280 126static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
44b6dfc5 127{
b6d4f280 128 pte_val(pte) &= ~pgprot_val(prot);
44b6dfc5
SC
129 return pte;
130}
131
b6d4f280 132static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
44b6dfc5 133{
b6d4f280 134 pte_val(pte) |= pgprot_val(prot);
44b6dfc5
SC
135 return pte;
136}
137
b6d4f280
LA
138static inline pte_t pte_wrprotect(pte_t pte)
139{
73e86cb0
CM
140 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
141 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
142 return pte;
b6d4f280
LA
143}
144
145static inline pte_t pte_mkwrite(pte_t pte)
146{
73e86cb0
CM
147 pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
148 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
149 return pte;
b6d4f280
LA
150}
151
44b6dfc5
SC
152static inline pte_t pte_mkclean(pte_t pte)
153{
8781bcbc
SC
154 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
155 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
156
157 return pte;
44b6dfc5
SC
158}
159
160static inline pte_t pte_mkdirty(pte_t pte)
161{
8781bcbc
SC
162 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
163
164 if (pte_write(pte))
165 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
166
167 return pte;
44b6dfc5
SC
168}
169
170static inline pte_t pte_mkold(pte_t pte)
171{
b6d4f280 172 return clear_pte_bit(pte, __pgprot(PTE_AF));
44b6dfc5
SC
173}
174
175static inline pte_t pte_mkyoung(pte_t pte)
176{
b6d4f280 177 return set_pte_bit(pte, __pgprot(PTE_AF));
44b6dfc5
SC
178}
179
180static inline pte_t pte_mkspecial(pte_t pte)
181{
b6d4f280 182 return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
44b6dfc5 183}
4f04d8f0 184
93ef666a
JL
185static inline pte_t pte_mkcont(pte_t pte)
186{
66b3923a
DW
187 pte = set_pte_bit(pte, __pgprot(PTE_CONT));
188 return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
93ef666a
JL
189}
190
191static inline pte_t pte_mknoncont(pte_t pte)
192{
193 return clear_pte_bit(pte, __pgprot(PTE_CONT));
194}
195
5ebe3a44
JM
196static inline pte_t pte_mkpresent(pte_t pte)
197{
198 return set_pte_bit(pte, __pgprot(PTE_VALID));
199}
200
66b3923a
DW
201static inline pmd_t pmd_mkcont(pmd_t pmd)
202{
203 return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
204}
205
73b20c84
RM
206static inline pte_t pte_mkdevmap(pte_t pte)
207{
30e23538 208 return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
73b20c84
RM
209}
210
4f04d8f0
CM
211static inline void set_pte(pte_t *ptep, pte_t pte)
212{
20a004e7 213 WRITE_ONCE(*ptep, pte);
7f0b1bf0
CM
214
215 /*
216 * Only if the new pte is valid and kernel, otherwise TLB maintenance
217 * or update_mmu_cache() have the necessary barriers.
218 */
d0b7a302 219 if (pte_valid_not_user(pte)) {
7f0b1bf0 220 dsb(ishst);
d0b7a302
WD
221 isb();
222 }
4f04d8f0
CM
223}
224
907e21c1 225extern void __sync_icache_dcache(pte_t pteval);
4f04d8f0 226
2f4b829c
CM
227/*
228 * PTE bits configuration in the presence of hardware Dirty Bit Management
229 * (PTE_WRITE == PTE_DBM):
230 *
231 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw)
232 * 0 0 | 1 0 0
233 * 0 1 | 1 1 0
234 * 1 0 | 1 0 1
235 * 1 1 | 0 1 x
236 *
237 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
238 * the page fault mechanism. Checking the dirty status of a pte becomes:
239 *
b847415c 240 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
2f4b829c 241 */
9b604722
MR
242
243static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
244 pte_t pte)
4f04d8f0 245{
20a004e7
WD
246 pte_t old_pte;
247
9b604722
MR
248 if (!IS_ENABLED(CONFIG_DEBUG_VM))
249 return;
250
251 old_pte = READ_ONCE(*ptep);
252
253 if (!pte_valid(old_pte) || !pte_valid(pte))
254 return;
255 if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
256 return;
02522463 257
2f4b829c 258 /*
9b604722
MR
259 * Check for potential race with hardware updates of the pte
260 * (ptep_set_access_flags safely changes valid ptes without going
261 * through an invalid entry).
2f4b829c 262 */
9b604722
MR
263 VM_WARN_ONCE(!pte_young(pte),
264 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
265 __func__, pte_val(old_pte), pte_val(pte));
266 VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
267 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
268 __func__, pte_val(old_pte), pte_val(pte));
269}
270
271static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
272 pte_t *ptep, pte_t pte)
273{
274 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
275 __sync_icache_dcache(pte);
276
277 __check_racy_pte_update(mm, ptep, pte);
2f4b829c 278
4f04d8f0
CM
279 set_pte(ptep, pte);
280}
281
282/*
283 * Huge pte definitions.
284 */
084bd298
SC
285#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
286
287/*
288 * Hugetlb definitions.
289 */
66b3923a 290#define HUGE_MAX_HSTATE 4
084bd298
SC
291#define HPAGE_SHIFT PMD_SHIFT
292#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
293#define HPAGE_MASK (~(HPAGE_SIZE - 1))
294#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
4f04d8f0 295
75387b92
KM
296static inline pte_t pgd_pte(pgd_t pgd)
297{
298 return __pte(pgd_val(pgd));
299}
300
e9f63768
MR
301static inline pte_t p4d_pte(p4d_t p4d)
302{
303 return __pte(p4d_val(p4d));
304}
305
29e56940
SC
306static inline pte_t pud_pte(pud_t pud)
307{
308 return __pte(pud_val(pud));
309}
310
eb3f0624
PA
311static inline pud_t pte_pud(pte_t pte)
312{
313 return __pud(pte_val(pte));
314}
315
29e56940
SC
316static inline pmd_t pud_pmd(pud_t pud)
317{
318 return __pmd(pud_val(pud));
319}
320
9c7e535f
SC
321static inline pte_t pmd_pte(pmd_t pmd)
322{
323 return __pte(pmd_val(pmd));
324}
af074848 325
9c7e535f
SC
326static inline pmd_t pte_pmd(pte_t pte)
327{
328 return __pmd(pte_val(pte));
329}
af074848 330
f7f0097a 331static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
8ce837ce 332{
f7f0097a
AK
333 return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
334}
335
336static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
8ce837ce 337{
f7f0097a 338 return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
8ce837ce
AB
339}
340
56166230
GK
341#ifdef CONFIG_NUMA_BALANCING
342/*
343 * See the comment in include/asm-generic/pgtable.h
344 */
345static inline int pte_protnone(pte_t pte)
346{
347 return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
348}
349
350static inline int pmd_protnone(pmd_t pmd)
351{
352 return pte_protnone(pmd_pte(pmd));
353}
354#endif
355
af074848
SC
356/*
357 * THP definitions.
358 */
af074848
SC
359
360#ifdef CONFIG_TRANSPARENT_HUGEPAGE
361#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
29e56940 362#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
af074848 363
5bb1cc0f 364#define pmd_present(pmd) pte_present(pmd_pte(pmd))
c164e038 365#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
9c7e535f 366#define pmd_young(pmd) pte_young(pmd_pte(pmd))
0795edaf 367#define pmd_valid(pmd) pte_valid(pmd_pte(pmd))
9c7e535f 368#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
9c7e535f
SC
369#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
370#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
ab4db1f2 371#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
9c7e535f
SC
372#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
373#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
86ec2da0 374#define pmd_mkinvalid(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
af074848 375
0dbd3b18
SP
376#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
377
9c7e535f 378#define pmd_write(pmd) pte_write(pmd_pte(pmd))
af074848
SC
379
380#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
381
73b20c84
RM
382#ifdef CONFIG_TRANSPARENT_HUGEPAGE
383#define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd))
384#endif
30e23538
JH
385static inline pmd_t pmd_mkdevmap(pmd_t pmd)
386{
387 return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
388}
73b20c84 389
75387b92
KM
390#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
391#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
392#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
393#define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
af074848
SC
394#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
395
35a63966 396#define pud_young(pud) pte_young(pud_pte(pud))
eb3f0624 397#define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud)))
29e56940 398#define pud_write(pud) pte_write(pud_pte(pud))
75387b92 399
b8e0ba7c
PA
400#define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT))
401
75387b92
KM
402#define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud))
403#define __phys_to_pud_val(phys) __phys_to_pte_val(phys)
404#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
405#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
af074848 406
ceb21835 407#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
af074848 408
e9f63768
MR
409#define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d))
410#define __phys_to_p4d_val(phys) __phys_to_pte_val(phys)
411
75387b92
KM
412#define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd))
413#define __phys_to_pgd_val(phys) __phys_to_pte_val(phys)
414
a501e324
CM
415#define __pgprot_modify(prot,mask,bits) \
416 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
417
cca98e9f
CH
418#define pgprot_nx(prot) \
419 __pgprot_modify(prot, 0, PTE_PXN)
420
4f04d8f0
CM
421/*
422 * Mark the prot value as uncacheable and unbufferable.
423 */
424#define pgprot_noncached(prot) \
de2db743 425 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
4f04d8f0 426#define pgprot_writecombine(prot) \
de2db743 427 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
d1e6dc91
LD
428#define pgprot_device(prot) \
429 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
3e4e1d3f
CH
430/*
431 * DMA allocations for non-coherent devices use what the Arm architecture calls
432 * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
433 * and merging of writes. This is different from "Device-nGnR[nE]" memory which
434 * is intended for MMIO and thus forbids speculation, preserves access size,
435 * requires strict alignment and can also force write responses to come from the
436 * endpoint.
437 */
419e2f18
CH
438#define pgprot_dmacoherent(prot) \
439 __pgprot_modify(prot, PTE_ATTRINDX_MASK, \
440 PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
441
4f04d8f0
CM
442#define __HAVE_PHYS_MEM_ACCESS_PROT
443struct file;
444extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
445 unsigned long size, pgprot_t vma_prot);
446
447#define pmd_none(pmd) (!pmd_val(pmd))
4f04d8f0 448
ab4db1f2 449#define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT))
4f04d8f0 450
36311607
MZ
451#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
452 PMD_TYPE_TABLE)
453#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
454 PMD_TYPE_SECT)
8aa82df3 455#define pmd_leaf(pmd) pmd_sect(pmd)
36311607 456
cac4b8cd 457#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
7d4e2dcf
QC
458static inline bool pud_sect(pud_t pud) { return false; }
459static inline bool pud_table(pud_t pud) { return true; }
206a2a73
SC
460#else
461#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
462 PUD_TYPE_SECT)
523d6e9f 463#define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
464 PUD_TYPE_TABLE)
206a2a73 465#endif
36311607 466
2330b7ca
JY
467extern pgd_t init_pg_dir[PTRS_PER_PGD];
468extern pgd_t init_pg_end[];
469extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
470extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
9d2d75ed 471extern pgd_t idmap_pg_end[];
2330b7ca
JY
472extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
473
474extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
475
476static inline bool in_swapper_pgdir(void *addr)
477{
478 return ((unsigned long)addr & PAGE_MASK) ==
479 ((unsigned long)swapper_pg_dir & PAGE_MASK);
480}
481
4f04d8f0
CM
482static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
483{
e9ed821b
JM
484#ifdef __PAGETABLE_PMD_FOLDED
485 if (in_swapper_pgdir(pmdp)) {
2330b7ca
JY
486 set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
487 return;
488 }
e9ed821b 489#endif /* __PAGETABLE_PMD_FOLDED */
2330b7ca 490
20a004e7 491 WRITE_ONCE(*pmdp, pmd);
0795edaf 492
d0b7a302 493 if (pmd_valid(pmd)) {
0795edaf 494 dsb(ishst);
d0b7a302
WD
495 isb();
496 }
4f04d8f0
CM
497}
498
499static inline void pmd_clear(pmd_t *pmdp)
500{
501 set_pmd(pmdp, __pmd(0));
502}
503
dca56dca 504static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
4f04d8f0 505{
75387b92 506 return __pmd_to_phys(pmd);
4f04d8f0
CM
507}
508
74dd022f
QC
509static inline void pte_unmap(pte_t *pte) { }
510
053520f7
MR
511/* Find an entry in the third-level page table. */
512#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
513
f069faba 514#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
dca56dca 515#define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
053520f7
MR
516
517#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
053520f7 518
961faac1
MR
519#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
520#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
521#define pte_clear_fixmap() clear_fixmap(FIX_PTE)
522
68ecabd0 523#define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd))
4f04d8f0 524
6533945a
AB
525/* use ONLY for statically allocated translation tables */
526#define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
527
4f04d8f0
CM
528/*
529 * Conversion functions: convert a page and protection to a page entry,
530 * and a page entry and page directory to the page they refer to.
531 */
532#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
533
9f25e6ad 534#if CONFIG_PGTABLE_LEVELS > 2
4f04d8f0 535
7078db46
CM
536#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
537
4f04d8f0 538#define pud_none(pud) (!pud_val(pud))
ab4db1f2 539#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
f02ab08a 540#define pud_present(pud) pte_present(pud_pte(pud))
8aa82df3 541#define pud_leaf(pud) pud_sect(pud)
0795edaf 542#define pud_valid(pud) pte_valid(pud_pte(pud))
4f04d8f0
CM
543
544static inline void set_pud(pud_t *pudp, pud_t pud)
545{
e9ed821b
JM
546#ifdef __PAGETABLE_PUD_FOLDED
547 if (in_swapper_pgdir(pudp)) {
2330b7ca
JY
548 set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
549 return;
550 }
e9ed821b 551#endif /* __PAGETABLE_PUD_FOLDED */
2330b7ca 552
20a004e7 553 WRITE_ONCE(*pudp, pud);
0795edaf 554
d0b7a302 555 if (pud_valid(pud)) {
0795edaf 556 dsb(ishst);
d0b7a302
WD
557 isb();
558 }
4f04d8f0
CM
559}
560
561static inline void pud_clear(pud_t *pudp)
562{
563 set_pud(pudp, __pud(0));
564}
565
dca56dca 566static inline phys_addr_t pud_page_paddr(pud_t pud)
4f04d8f0 567{
75387b92 568 return __pud_to_phys(pud);
4f04d8f0
CM
569}
570
7078db46
CM
571/* Find an entry in the second-level page table. */
572#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
573
20a004e7 574#define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
dca56dca 575#define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
7078db46 576
961faac1
MR
577#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
578#define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
579#define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
7078db46 580
68ecabd0 581#define pud_page(pud) phys_to_page(__pud_to_phys(pud))
29e56940 582
6533945a
AB
583/* use ONLY for statically allocated translation tables */
584#define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
585
dca56dca
MR
586#else
587
588#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
589
961faac1
MR
590/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
591#define pmd_set_fixmap(addr) NULL
592#define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp)
593#define pmd_clear_fixmap()
594
6533945a
AB
595#define pmd_offset_kimg(dir,addr) ((pmd_t *)dir)
596
9f25e6ad 597#endif /* CONFIG_PGTABLE_LEVELS > 2 */
4f04d8f0 598
9f25e6ad 599#if CONFIG_PGTABLE_LEVELS > 3
c79b954b 600
7078db46
CM
601#define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud))
602
e9f63768
MR
603#define p4d_none(p4d) (!p4d_val(p4d))
604#define p4d_bad(p4d) (!(p4d_val(p4d) & 2))
605#define p4d_present(p4d) (p4d_val(p4d))
c79b954b 606
e9f63768 607static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
c79b954b 608{
e9f63768
MR
609 if (in_swapper_pgdir(p4dp)) {
610 set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
2330b7ca
JY
611 return;
612 }
613
e9f63768 614 WRITE_ONCE(*p4dp, p4d);
c79b954b 615 dsb(ishst);
eb6a4dcc 616 isb();
c79b954b
JL
617}
618
e9f63768 619static inline void p4d_clear(p4d_t *p4dp)
c79b954b 620{
e9f63768 621 set_p4d(p4dp, __p4d(0));
c79b954b
JL
622}
623
e9f63768 624static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
c79b954b 625{
e9f63768 626 return __p4d_to_phys(p4d);
c79b954b
JL
627}
628
7078db46
CM
629/* Find an entry in the frst-level page table. */
630#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
631
e9f63768 632#define pud_offset_phys(dir, addr) (p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
dca56dca 633#define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr))))
7078db46 634
961faac1 635#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
e9f63768 636#define pud_set_fixmap_offset(p4d, addr) pud_set_fixmap(pud_offset_phys(p4d, addr))
961faac1 637#define pud_clear_fixmap() clear_fixmap(FIX_PUD)
7078db46 638
e9f63768 639#define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
5d96e0cb 640
6533945a
AB
641/* use ONLY for statically allocated translation tables */
642#define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
643
dca56dca
MR
644#else
645
e9f63768 646#define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;})
dca56dca
MR
647#define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
648
961faac1
MR
649/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
650#define pud_set_fixmap(addr) NULL
651#define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp)
652#define pud_clear_fixmap()
653
6533945a
AB
654#define pud_offset_kimg(dir,addr) ((pud_t *)dir)
655
9f25e6ad 656#endif /* CONFIG_PGTABLE_LEVELS > 3 */
c79b954b 657
7078db46
CM
658#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
659
4f04d8f0
CM
660/* to find an entry in a page-table-directory */
661#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
662
dca56dca
MR
663#define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
664
665#define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr)))
4f04d8f0
CM
666
667/* to find an entry in a kernel page-table-directory */
668#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
669
961faac1
MR
670#define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
671#define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
672
4f04d8f0
CM
673static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
674{
a6fadf7e 675 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
8ef8f360 676 PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP;
2f4b829c
CM
677 /* preserve the hardware dirty information */
678 if (pte_hw_dirty(pte))
62d96c71 679 pte = pte_mkdirty(pte);
4f04d8f0
CM
680 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
681 return pte;
682}
683
9c7e535f
SC
684static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
685{
686 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
687}
688
66dbd6e6
CM
689#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
690extern int ptep_set_access_flags(struct vm_area_struct *vma,
691 unsigned long address, pte_t *ptep,
692 pte_t entry, int dirty);
693
282aa705
CM
694#ifdef CONFIG_TRANSPARENT_HUGEPAGE
695#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
696static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
697 unsigned long address, pmd_t *pmdp,
698 pmd_t entry, int dirty)
699{
700 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
701}
73b20c84
RM
702
703static inline int pud_devmap(pud_t pud)
704{
705 return 0;
706}
707
708static inline int pgd_devmap(pgd_t pgd)
709{
710 return 0;
711}
282aa705
CM
712#endif
713
2f4b829c
CM
714/*
715 * Atomic pte/pmd modifications.
716 */
717#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
06485053 718static inline int __ptep_test_and_clear_young(pte_t *ptep)
2f4b829c 719{
3bbf7157 720 pte_t old_pte, pte;
2f4b829c 721
3bbf7157
CM
722 pte = READ_ONCE(*ptep);
723 do {
724 old_pte = pte;
725 pte = pte_mkold(pte);
726 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
727 pte_val(old_pte), pte_val(pte));
728 } while (pte_val(pte) != pte_val(old_pte));
2f4b829c 729
3bbf7157 730 return pte_young(pte);
2f4b829c
CM
731}
732
06485053
CM
733static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
734 unsigned long address,
735 pte_t *ptep)
736{
737 return __ptep_test_and_clear_young(ptep);
738}
739
3403e56b
AVB
740#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
741static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
742 unsigned long address, pte_t *ptep)
743{
744 int young = ptep_test_and_clear_young(vma, address, ptep);
745
746 if (young) {
747 /*
748 * We can elide the trailing DSB here since the worst that can
749 * happen is that a CPU continues to use the young entry in its
750 * TLB and we mistakenly reclaim the associated page. The
751 * window for such an event is bounded by the next
752 * context-switch, which provides a DSB to complete the TLB
753 * invalidation.
754 */
755 flush_tlb_page_nosync(vma, address);
756 }
757
758 return young;
759}
760
2f4b829c
CM
761#ifdef CONFIG_TRANSPARENT_HUGEPAGE
762#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
763static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
764 unsigned long address,
765 pmd_t *pmdp)
766{
767 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
768}
769#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
770
771#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
772static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
773 unsigned long address, pte_t *ptep)
774{
3bbf7157 775 return __pte(xchg_relaxed(&pte_val(*ptep), 0));
2f4b829c
CM
776}
777
778#ifdef CONFIG_TRANSPARENT_HUGEPAGE
911f56ee
CM
779#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
780static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
781 unsigned long address, pmd_t *pmdp)
2f4b829c
CM
782{
783 return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
784}
785#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
786
787/*
8781bcbc
SC
788 * ptep_set_wrprotect - mark read-only while trasferring potential hardware
789 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
2f4b829c
CM
790 */
791#define __HAVE_ARCH_PTEP_SET_WRPROTECT
792static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
793{
3bbf7157
CM
794 pte_t old_pte, pte;
795
796 pte = READ_ONCE(*ptep);
797 do {
798 old_pte = pte;
8781bcbc
SC
799 /*
800 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
801 * clear), set the PTE_DIRTY bit.
802 */
803 if (pte_hw_dirty(pte))
804 pte = pte_mkdirty(pte);
3bbf7157
CM
805 pte = pte_wrprotect(pte);
806 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
807 pte_val(old_pte), pte_val(pte));
808 } while (pte_val(pte) != pte_val(old_pte));
2f4b829c
CM
809}
810
811#ifdef CONFIG_TRANSPARENT_HUGEPAGE
812#define __HAVE_ARCH_PMDP_SET_WRPROTECT
813static inline void pmdp_set_wrprotect(struct mm_struct *mm,
814 unsigned long address, pmd_t *pmdp)
815{
816 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
817}
1d78a62c
CM
818
819#define pmdp_establish pmdp_establish
820static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
821 unsigned long address, pmd_t *pmdp, pmd_t pmd)
822{
823 return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
824}
2f4b829c 825#endif
2f4b829c 826
4f04d8f0
CM
827/*
828 * Encode and decode a swap entry:
3676f9ef 829 * bits 0-1: present (must be zero)
9b3e661e
KS
830 * bits 2-7: swap type
831 * bits 8-57: swap offset
fdc69e7d 832 * bit 58: PTE_PROT_NONE (must be zero)
4f04d8f0 833 */
9b3e661e 834#define __SWP_TYPE_SHIFT 2
4f04d8f0 835#define __SWP_TYPE_BITS 6
9b3e661e 836#define __SWP_OFFSET_BITS 50
4f04d8f0
CM
837#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
838#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
3676f9ef 839#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
4f04d8f0
CM
840
841#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
3676f9ef 842#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
4f04d8f0
CM
843#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
844
845#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
846#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
847
848/*
849 * Ensure that there are not more swap files than can be encoded in the kernel
aad9061b 850 * PTEs.
4f04d8f0
CM
851 */
852#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
853
4f04d8f0
CM
854extern int kern_addr_valid(unsigned long addr);
855
856#include <asm-generic/pgtable.h>
857
cba3574f
WD
858/*
859 * On AArch64, the cache coherency is handled via the set_pte_at() function.
860 */
861static inline void update_mmu_cache(struct vm_area_struct *vma,
862 unsigned long addr, pte_t *ptep)
863{
864 /*
120798d2
WD
865 * We don't do anything here, so there's a very small chance of
866 * us retaking a user fault which we just fixed up. The alternative
867 * is doing a dsb(ishst), but that penalises the fastpath.
cba3574f 868 */
cba3574f
WD
869}
870
871#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
872
529c4b05
KM
873#ifdef CONFIG_ARM64_PA_BITS_52
874#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
875#else
876#define phys_to_ttbr(addr) (addr)
877#endif
878
6af31226
JH
879/*
880 * On arm64 without hardware Access Flag, copying from user will fail because
881 * the pte is old and cannot be marked young. So we always end up with zeroed
882 * page after fork() + CoW for pfn mappings. We don't always have a
883 * hardware-managed access flag on arm64.
884 */
885static inline bool arch_faults_on_old_pte(void)
886{
887 WARN_ON(preemptible());
888
889 return !cpu_has_hw_af();
890}
891#define arch_faults_on_old_pte arch_faults_on_old_pte
892
4f04d8f0
CM
893#endif /* !__ASSEMBLY__ */
894
895#endif /* __ASM_PGTABLE_H */