Merge tag 'mvebu-dt-4.19-1' of git://git.infradead.org/linux-mvebu into next/dt
[linux-block.git] / arch / arm64 / include / asm / pgtable.h
CommitLineData
4f04d8f0
CM
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_PGTABLE_H
17#define __ASM_PGTABLE_H
18
2f4b829c 19#include <asm/bug.h>
4f04d8f0
CM
20#include <asm/proc-fns.h>
21
22#include <asm/memory.h>
23#include <asm/pgtable-hwdef.h>
3eca86e7 24#include <asm/pgtable-prot.h>
4f04d8f0
CM
25
26/*
3e1907d5 27 * VMALLOC range.
08375198 28 *
f9040773 29 * VMALLOC_START: beginning of the kernel vmalloc space
3e1907d5
AB
30 * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space
31 * and fixed mappings
4f04d8f0 32 */
f9040773 33#define VMALLOC_START (MODULES_END)
08375198 34#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
4f04d8f0 35
3bab79ed 36#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
4f04d8f0 37
d016bf7e 38#define FIRST_USER_ADDRESS 0UL
4f04d8f0
CM
39
40#ifndef __ASSEMBLY__
2f4b829c 41
3bbf7157 42#include <asm/cmpxchg.h>
961faac1 43#include <asm/fixmap.h>
2f4b829c 44#include <linux/mmdebug.h>
86c9e812
WD
45#include <linux/mm_types.h>
46#include <linux/sched.h>
2f4b829c 47
4f04d8f0
CM
48extern void __pte_error(const char *file, int line, unsigned long val);
49extern void __pmd_error(const char *file, int line, unsigned long val);
c79b954b 50extern void __pud_error(const char *file, int line, unsigned long val);
4f04d8f0
CM
51extern void __pgd_error(const char *file, int line, unsigned long val);
52
4f04d8f0
CM
53/*
54 * ZERO_PAGE is a global shared page that is always zero: used
55 * for zero-mapped memory areas etc..
56 */
5227cfa7 57extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
2077be67 58#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
4f04d8f0 59
7078db46
CM
60#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
61
75387b92
KM
62/*
63 * Macros to convert between a physical address and its placement in a
64 * page table entry, taking care of 52-bit addresses.
65 */
66#ifdef CONFIG_ARM64_PA_BITS_52
67#define __pte_to_phys(pte) \
68 ((pte_val(pte) & PTE_ADDR_LOW) | ((pte_val(pte) & PTE_ADDR_HIGH) << 36))
69#define __phys_to_pte_val(phys) (((phys) | ((phys) >> 36)) & PTE_ADDR_MASK)
70#else
71#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK)
72#define __phys_to_pte_val(phys) (phys)
73#endif
4f04d8f0 74
75387b92
KM
75#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
76#define pfn_pte(pfn,prot) \
77 __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
4f04d8f0
CM
78
79#define pte_none(pte) (!pte_val(pte))
80#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
81#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
7078db46 82
4f04d8f0
CM
83/*
84 * The following only work if pte_present(). Undefined behaviour otherwise.
85 */
84fe6826 86#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
84fe6826
SC
87#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
88#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
89#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
ec663d96 90#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
93ef666a 91#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
4f04d8f0 92
d27cfa1f
AB
93#define pte_cont_addr_end(addr, end) \
94({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
95 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
96})
97
98#define pmd_cont_addr_end(addr, end) \
99({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \
100 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
101})
102
b847415c 103#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
2f4b829c
CM
104#define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
105#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
106
766ffb69 107#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
ec663d96
CM
108/*
109 * Execute-only user mappings do not have the PTE_USER bit set. All valid
110 * kernel mappings have the PTE_UXN bit set.
111 */
112#define pte_valid_not_user(pte) \
113 ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
76c714be
WD
114#define pte_valid_young(pte) \
115 ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
6218f96c
CM
116#define pte_valid_user(pte) \
117 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
76c714be
WD
118
119/*
120 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
121 * so that we don't erroneously return false for pages that have been
122 * remapped as PROT_NONE but are yet to be flushed from the TLB.
123 */
124#define pte_accessible(mm, pte) \
125 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
4f04d8f0 126
6218f96c
CM
127/*
128 * p??_access_permitted() is true for valid user mappings (subject to the
129 * write permission check) other than user execute-only which do not have the
130 * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
131 */
132#define pte_access_permitted(pte, write) \
133 (pte_valid_user(pte) && (!(write) || pte_write(pte)))
134#define pmd_access_permitted(pmd, write) \
135 (pte_access_permitted(pmd_pte(pmd), (write)))
136#define pud_access_permitted(pud, write) \
137 (pte_access_permitted(pud_pte(pud), (write)))
138
b6d4f280 139static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
44b6dfc5 140{
b6d4f280 141 pte_val(pte) &= ~pgprot_val(prot);
44b6dfc5
SC
142 return pte;
143}
144
b6d4f280 145static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
44b6dfc5 146{
b6d4f280 147 pte_val(pte) |= pgprot_val(prot);
44b6dfc5
SC
148 return pte;
149}
150
b6d4f280
LA
151static inline pte_t pte_wrprotect(pte_t pte)
152{
73e86cb0
CM
153 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
154 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
155 return pte;
b6d4f280
LA
156}
157
158static inline pte_t pte_mkwrite(pte_t pte)
159{
73e86cb0
CM
160 pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
161 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
162 return pte;
b6d4f280
LA
163}
164
44b6dfc5
SC
165static inline pte_t pte_mkclean(pte_t pte)
166{
8781bcbc
SC
167 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
168 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
169
170 return pte;
44b6dfc5
SC
171}
172
173static inline pte_t pte_mkdirty(pte_t pte)
174{
8781bcbc
SC
175 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
176
177 if (pte_write(pte))
178 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
179
180 return pte;
44b6dfc5
SC
181}
182
183static inline pte_t pte_mkold(pte_t pte)
184{
b6d4f280 185 return clear_pte_bit(pte, __pgprot(PTE_AF));
44b6dfc5
SC
186}
187
188static inline pte_t pte_mkyoung(pte_t pte)
189{
b6d4f280 190 return set_pte_bit(pte, __pgprot(PTE_AF));
44b6dfc5
SC
191}
192
193static inline pte_t pte_mkspecial(pte_t pte)
194{
b6d4f280 195 return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
44b6dfc5 196}
4f04d8f0 197
93ef666a
JL
198static inline pte_t pte_mkcont(pte_t pte)
199{
66b3923a
DW
200 pte = set_pte_bit(pte, __pgprot(PTE_CONT));
201 return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
93ef666a
JL
202}
203
204static inline pte_t pte_mknoncont(pte_t pte)
205{
206 return clear_pte_bit(pte, __pgprot(PTE_CONT));
207}
208
5ebe3a44
JM
209static inline pte_t pte_mkpresent(pte_t pte)
210{
211 return set_pte_bit(pte, __pgprot(PTE_VALID));
212}
213
66b3923a
DW
214static inline pmd_t pmd_mkcont(pmd_t pmd)
215{
216 return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
217}
218
4f04d8f0
CM
219static inline void set_pte(pte_t *ptep, pte_t pte)
220{
20a004e7 221 WRITE_ONCE(*ptep, pte);
7f0b1bf0
CM
222
223 /*
224 * Only if the new pte is valid and kernel, otherwise TLB maintenance
225 * or update_mmu_cache() have the necessary barriers.
226 */
24fe1b0e 227 if (pte_valid_not_user(pte))
7f0b1bf0 228 dsb(ishst);
4f04d8f0
CM
229}
230
907e21c1 231extern void __sync_icache_dcache(pte_t pteval);
4f04d8f0 232
2f4b829c
CM
233/*
234 * PTE bits configuration in the presence of hardware Dirty Bit Management
235 * (PTE_WRITE == PTE_DBM):
236 *
237 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw)
238 * 0 0 | 1 0 0
239 * 0 1 | 1 1 0
240 * 1 0 | 1 0 1
241 * 1 1 | 0 1 x
242 *
243 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
244 * the page fault mechanism. Checking the dirty status of a pte becomes:
245 *
b847415c 246 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
2f4b829c 247 */
4f04d8f0
CM
248static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
249 pte_t *ptep, pte_t pte)
250{
20a004e7
WD
251 pte_t old_pte;
252
73e86cb0 253 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
907e21c1 254 __sync_icache_dcache(pte);
02522463 255
2f4b829c
CM
256 /*
257 * If the existing pte is valid, check for potential race with
258 * hardware updates of the pte (ptep_set_access_flags safely changes
259 * valid ptes without going through an invalid entry).
260 */
20a004e7
WD
261 old_pte = READ_ONCE(*ptep);
262 if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(old_pte) && pte_valid(pte) &&
86c9e812 263 (mm == current->active_mm || atomic_read(&mm->mm_users) > 1)) {
82d34008
CM
264 VM_WARN_ONCE(!pte_young(pte),
265 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
20a004e7
WD
266 __func__, pte_val(old_pte), pte_val(pte));
267 VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
82d34008 268 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
20a004e7 269 __func__, pte_val(old_pte), pte_val(pte));
2f4b829c
CM
270 }
271
4f04d8f0
CM
272 set_pte(ptep, pte);
273}
274
747a70e6
SC
275#define __HAVE_ARCH_PTE_SAME
276static inline int pte_same(pte_t pte_a, pte_t pte_b)
277{
278 pteval_t lhs, rhs;
279
280 lhs = pte_val(pte_a);
281 rhs = pte_val(pte_b);
282
283 if (pte_present(pte_a))
284 lhs &= ~PTE_RDONLY;
285
286 if (pte_present(pte_b))
287 rhs &= ~PTE_RDONLY;
288
289 return (lhs == rhs);
290}
291
4f04d8f0
CM
292/*
293 * Huge pte definitions.
294 */
084bd298
SC
295#define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT))
296#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
297
298/*
299 * Hugetlb definitions.
300 */
66b3923a 301#define HUGE_MAX_HSTATE 4
084bd298
SC
302#define HPAGE_SHIFT PMD_SHIFT
303#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
304#define HPAGE_MASK (~(HPAGE_SIZE - 1))
305#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
4f04d8f0 306
75387b92
KM
307static inline pte_t pgd_pte(pgd_t pgd)
308{
309 return __pte(pgd_val(pgd));
310}
311
29e56940
SC
312static inline pte_t pud_pte(pud_t pud)
313{
314 return __pte(pud_val(pud));
315}
316
317static inline pmd_t pud_pmd(pud_t pud)
318{
319 return __pmd(pud_val(pud));
320}
321
9c7e535f
SC
322static inline pte_t pmd_pte(pmd_t pmd)
323{
324 return __pte(pmd_val(pmd));
325}
af074848 326
9c7e535f
SC
327static inline pmd_t pte_pmd(pte_t pte)
328{
329 return __pmd(pte_val(pte));
330}
af074848 331
8ce837ce
AB
332static inline pgprot_t mk_sect_prot(pgprot_t prot)
333{
334 return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT);
335}
336
56166230
GK
337#ifdef CONFIG_NUMA_BALANCING
338/*
339 * See the comment in include/asm-generic/pgtable.h
340 */
341static inline int pte_protnone(pte_t pte)
342{
343 return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
344}
345
346static inline int pmd_protnone(pmd_t pmd)
347{
348 return pte_protnone(pmd_pte(pmd));
349}
350#endif
351
af074848
SC
352/*
353 * THP definitions.
354 */
af074848
SC
355
356#ifdef CONFIG_TRANSPARENT_HUGEPAGE
357#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
29e56940 358#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
af074848 359
5bb1cc0f 360#define pmd_present(pmd) pte_present(pmd_pte(pmd))
c164e038 361#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
9c7e535f
SC
362#define pmd_young(pmd) pte_young(pmd_pte(pmd))
363#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
9c7e535f
SC
364#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
365#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
ab4db1f2 366#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
9c7e535f
SC
367#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
368#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
5bb1cc0f 369#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
af074848 370
0dbd3b18
SP
371#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
372
9c7e535f 373#define pmd_write(pmd) pte_write(pmd_pte(pmd))
af074848
SC
374
375#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
376
75387b92
KM
377#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
378#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
379#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
380#define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
af074848
SC
381#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
382
29e56940 383#define pud_write(pud) pte_write(pud_pte(pud))
75387b92
KM
384
385#define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud))
386#define __phys_to_pud_val(phys) __phys_to_pte_val(phys)
387#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
388#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
af074848 389
ceb21835 390#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
af074848 391
75387b92
KM
392#define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd))
393#define __phys_to_pgd_val(phys) __phys_to_pte_val(phys)
394
a501e324
CM
395#define __pgprot_modify(prot,mask,bits) \
396 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
397
4f04d8f0
CM
398/*
399 * Mark the prot value as uncacheable and unbufferable.
400 */
401#define pgprot_noncached(prot) \
de2db743 402 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
4f04d8f0 403#define pgprot_writecombine(prot) \
de2db743 404 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
d1e6dc91
LD
405#define pgprot_device(prot) \
406 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
4f04d8f0
CM
407#define __HAVE_PHYS_MEM_ACCESS_PROT
408struct file;
409extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
410 unsigned long size, pgprot_t vma_prot);
411
412#define pmd_none(pmd) (!pmd_val(pmd))
4f04d8f0 413
ab4db1f2 414#define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT))
4f04d8f0 415
36311607
MZ
416#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
417 PMD_TYPE_TABLE)
418#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
419 PMD_TYPE_SECT)
420
cac4b8cd 421#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
206a2a73 422#define pud_sect(pud) (0)
523d6e9f 423#define pud_table(pud) (1)
206a2a73
SC
424#else
425#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
426 PUD_TYPE_SECT)
523d6e9f 427#define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
428 PUD_TYPE_TABLE)
206a2a73 429#endif
36311607 430
4f04d8f0
CM
431static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
432{
20a004e7 433 WRITE_ONCE(*pmdp, pmd);
98f7685e 434 dsb(ishst);
4f04d8f0
CM
435}
436
437static inline void pmd_clear(pmd_t *pmdp)
438{
439 set_pmd(pmdp, __pmd(0));
440}
441
dca56dca 442static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
4f04d8f0 443{
75387b92 444 return __pmd_to_phys(pmd);
4f04d8f0
CM
445}
446
053520f7
MR
447/* Find an entry in the third-level page table. */
448#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
449
f069faba 450#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
dca56dca 451#define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
053520f7
MR
452
453#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
454#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
455#define pte_unmap(pte) do { } while (0)
456#define pte_unmap_nested(pte) do { } while (0)
457
961faac1
MR
458#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
459#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
460#define pte_clear_fixmap() clear_fixmap(FIX_PTE)
461
75387b92 462#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(__pmd_to_phys(pmd)))
4f04d8f0 463
6533945a
AB
464/* use ONLY for statically allocated translation tables */
465#define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
466
4f04d8f0
CM
467/*
468 * Conversion functions: convert a page and protection to a page entry,
469 * and a page entry and page directory to the page they refer to.
470 */
471#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
472
9f25e6ad 473#if CONFIG_PGTABLE_LEVELS > 2
4f04d8f0 474
7078db46
CM
475#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
476
4f04d8f0 477#define pud_none(pud) (!pud_val(pud))
ab4db1f2 478#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
f02ab08a 479#define pud_present(pud) pte_present(pud_pte(pud))
4f04d8f0
CM
480
481static inline void set_pud(pud_t *pudp, pud_t pud)
482{
20a004e7 483 WRITE_ONCE(*pudp, pud);
98f7685e 484 dsb(ishst);
4f04d8f0
CM
485}
486
487static inline void pud_clear(pud_t *pudp)
488{
489 set_pud(pudp, __pud(0));
490}
491
dca56dca 492static inline phys_addr_t pud_page_paddr(pud_t pud)
4f04d8f0 493{
75387b92 494 return __pud_to_phys(pud);
4f04d8f0
CM
495}
496
7078db46
CM
497/* Find an entry in the second-level page table. */
498#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
499
20a004e7 500#define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
dca56dca 501#define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
7078db46 502
961faac1
MR
503#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
504#define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
505#define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
7078db46 506
75387b92 507#define pud_page(pud) pfn_to_page(__phys_to_pfn(__pud_to_phys(pud)))
29e56940 508
6533945a
AB
509/* use ONLY for statically allocated translation tables */
510#define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
511
dca56dca
MR
512#else
513
514#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
515
961faac1
MR
516/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
517#define pmd_set_fixmap(addr) NULL
518#define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp)
519#define pmd_clear_fixmap()
520
6533945a
AB
521#define pmd_offset_kimg(dir,addr) ((pmd_t *)dir)
522
9f25e6ad 523#endif /* CONFIG_PGTABLE_LEVELS > 2 */
4f04d8f0 524
9f25e6ad 525#if CONFIG_PGTABLE_LEVELS > 3
c79b954b 526
7078db46
CM
527#define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud))
528
c79b954b
JL
529#define pgd_none(pgd) (!pgd_val(pgd))
530#define pgd_bad(pgd) (!(pgd_val(pgd) & 2))
531#define pgd_present(pgd) (pgd_val(pgd))
532
533static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
534{
20a004e7 535 WRITE_ONCE(*pgdp, pgd);
c79b954b
JL
536 dsb(ishst);
537}
538
539static inline void pgd_clear(pgd_t *pgdp)
540{
541 set_pgd(pgdp, __pgd(0));
542}
543
dca56dca 544static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
c79b954b 545{
75387b92 546 return __pgd_to_phys(pgd);
c79b954b
JL
547}
548
7078db46
CM
549/* Find an entry in the frst-level page table. */
550#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
551
20a004e7 552#define pud_offset_phys(dir, addr) (pgd_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
dca56dca 553#define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr))))
7078db46 554
961faac1
MR
555#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
556#define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr))
557#define pud_clear_fixmap() clear_fixmap(FIX_PUD)
7078db46 558
75387b92 559#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd)))
5d96e0cb 560
6533945a
AB
561/* use ONLY for statically allocated translation tables */
562#define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
563
dca56dca
MR
564#else
565
566#define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
567
961faac1
MR
568/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
569#define pud_set_fixmap(addr) NULL
570#define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp)
571#define pud_clear_fixmap()
572
6533945a
AB
573#define pud_offset_kimg(dir,addr) ((pud_t *)dir)
574
9f25e6ad 575#endif /* CONFIG_PGTABLE_LEVELS > 3 */
c79b954b 576
7078db46
CM
577#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
578
4f04d8f0
CM
579/* to find an entry in a page-table-directory */
580#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
581
dca56dca
MR
582#define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
583
584#define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr)))
4f04d8f0
CM
585
586/* to find an entry in a kernel page-table-directory */
587#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
588
961faac1
MR
589#define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
590#define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
591
4f04d8f0
CM
592static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
593{
a6fadf7e 594 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
1a541b4e 595 PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
2f4b829c
CM
596 /* preserve the hardware dirty information */
597 if (pte_hw_dirty(pte))
62d96c71 598 pte = pte_mkdirty(pte);
4f04d8f0
CM
599 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
600 return pte;
601}
602
9c7e535f
SC
603static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
604{
605 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
606}
607
66dbd6e6
CM
608#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
609extern int ptep_set_access_flags(struct vm_area_struct *vma,
610 unsigned long address, pte_t *ptep,
611 pte_t entry, int dirty);
612
282aa705
CM
613#ifdef CONFIG_TRANSPARENT_HUGEPAGE
614#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
615static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
616 unsigned long address, pmd_t *pmdp,
617 pmd_t entry, int dirty)
618{
619 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
620}
621#endif
622
2f4b829c
CM
623/*
624 * Atomic pte/pmd modifications.
625 */
626#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
06485053 627static inline int __ptep_test_and_clear_young(pte_t *ptep)
2f4b829c 628{
3bbf7157 629 pte_t old_pte, pte;
2f4b829c 630
3bbf7157
CM
631 pte = READ_ONCE(*ptep);
632 do {
633 old_pte = pte;
634 pte = pte_mkold(pte);
635 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
636 pte_val(old_pte), pte_val(pte));
637 } while (pte_val(pte) != pte_val(old_pte));
2f4b829c 638
3bbf7157 639 return pte_young(pte);
2f4b829c
CM
640}
641
06485053
CM
642static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
643 unsigned long address,
644 pte_t *ptep)
645{
646 return __ptep_test_and_clear_young(ptep);
647}
648
2f4b829c
CM
649#ifdef CONFIG_TRANSPARENT_HUGEPAGE
650#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
651static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
652 unsigned long address,
653 pmd_t *pmdp)
654{
655 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
656}
657#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
658
659#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
660static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
661 unsigned long address, pte_t *ptep)
662{
3bbf7157 663 return __pte(xchg_relaxed(&pte_val(*ptep), 0));
2f4b829c
CM
664}
665
666#ifdef CONFIG_TRANSPARENT_HUGEPAGE
911f56ee
CM
667#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
668static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
669 unsigned long address, pmd_t *pmdp)
2f4b829c
CM
670{
671 return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
672}
673#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
674
675/*
8781bcbc
SC
676 * ptep_set_wrprotect - mark read-only while trasferring potential hardware
677 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
2f4b829c
CM
678 */
679#define __HAVE_ARCH_PTEP_SET_WRPROTECT
680static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
681{
3bbf7157
CM
682 pte_t old_pte, pte;
683
684 pte = READ_ONCE(*ptep);
685 do {
686 old_pte = pte;
8781bcbc
SC
687 /*
688 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
689 * clear), set the PTE_DIRTY bit.
690 */
691 if (pte_hw_dirty(pte))
692 pte = pte_mkdirty(pte);
3bbf7157
CM
693 pte = pte_wrprotect(pte);
694 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
695 pte_val(old_pte), pte_val(pte));
696 } while (pte_val(pte) != pte_val(old_pte));
2f4b829c
CM
697}
698
699#ifdef CONFIG_TRANSPARENT_HUGEPAGE
700#define __HAVE_ARCH_PMDP_SET_WRPROTECT
701static inline void pmdp_set_wrprotect(struct mm_struct *mm,
702 unsigned long address, pmd_t *pmdp)
703{
704 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
705}
1d78a62c
CM
706
707#define pmdp_establish pmdp_establish
708static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
709 unsigned long address, pmd_t *pmdp, pmd_t pmd)
710{
711 return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
712}
2f4b829c 713#endif
2f4b829c 714
4f04d8f0 715extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
0370b31e 716extern pgd_t swapper_pg_end[];
4f04d8f0 717extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
51a0048b 718extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
4f04d8f0 719
4f04d8f0
CM
720/*
721 * Encode and decode a swap entry:
3676f9ef 722 * bits 0-1: present (must be zero)
9b3e661e
KS
723 * bits 2-7: swap type
724 * bits 8-57: swap offset
fdc69e7d 725 * bit 58: PTE_PROT_NONE (must be zero)
4f04d8f0 726 */
9b3e661e 727#define __SWP_TYPE_SHIFT 2
4f04d8f0 728#define __SWP_TYPE_BITS 6
9b3e661e 729#define __SWP_OFFSET_BITS 50
4f04d8f0
CM
730#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
731#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
3676f9ef 732#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
4f04d8f0
CM
733
734#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
3676f9ef 735#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
4f04d8f0
CM
736#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
737
738#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
739#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
740
741/*
742 * Ensure that there are not more swap files than can be encoded in the kernel
aad9061b 743 * PTEs.
4f04d8f0
CM
744 */
745#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
746
4f04d8f0
CM
747extern int kern_addr_valid(unsigned long addr);
748
749#include <asm-generic/pgtable.h>
750
39b5be9b
WD
751void pgd_cache_init(void);
752#define pgtable_cache_init pgd_cache_init
4f04d8f0 753
cba3574f
WD
754/*
755 * On AArch64, the cache coherency is handled via the set_pte_at() function.
756 */
757static inline void update_mmu_cache(struct vm_area_struct *vma,
758 unsigned long addr, pte_t *ptep)
759{
760 /*
120798d2
WD
761 * We don't do anything here, so there's a very small chance of
762 * us retaking a user fault which we just fixed up. The alternative
763 * is doing a dsb(ishst), but that penalises the fastpath.
cba3574f 764 */
cba3574f
WD
765}
766
767#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
768
7db743c6
CM
769#define kc_vaddr_to_offset(v) ((v) & ~VA_START)
770#define kc_offset_to_vaddr(o) ((o) | VA_START)
771
529c4b05
KM
772#ifdef CONFIG_ARM64_PA_BITS_52
773#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
774#else
775#define phys_to_ttbr(addr) (addr)
776#endif
777
4f04d8f0
CM
778#endif /* !__ASSEMBLY__ */
779
780#endif /* __ASM_PGTABLE_H */