Merge tag 'phy-for-6.6' of git://git.kernel.org/pub/scm/linux/kernel/git/phy/linux-phy
[linux-block.git] / include / linux / pgtable.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
ca5999fd
MR
2#ifndef _LINUX_PGTABLE_H
3#define _LINUX_PGTABLE_H
1da177e4 4
f25748e3 5#include <linux/pfn.h>
ca5999fd 6#include <asm/pgtable.h>
f25748e3 7
051ddcfe
MWO
8#define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
9#define PUD_ORDER (PUD_SHIFT - PAGE_SHIFT)
10
673eae82 11#ifndef __ASSEMBLY__
9535239f 12#ifdef CONFIG_MMU
673eae82 13
fbd71844 14#include <linux/mm_types.h>
187f1882 15#include <linux/bug.h>
e61ce6ad 16#include <linux/errno.h>
5a281062 17#include <asm-generic/pgtable_uffd.h>
de8c8e52 18#include <linux/page_table_check.h>
fbd71844 19
c2febafc
KS
20#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
21 defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
22#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
235a8f02
KS
23#endif
24
6ee8630e
HD
25/*
26 * On almost all architectures and configurations, 0 can be used as the
27 * upper ceiling to free_pgtables(): on many architectures it has the same
28 * effect as using TASK_SIZE. However, there is one configuration which
29 * must impose a more careful limit, to avoid freeing kernel pgtables.
30 */
31#ifndef USER_PGTABLES_CEILING
32#define USER_PGTABLES_CEILING 0UL
33#endif
fac7757e
AK
34
35/*
36 * This defines the first usable user address. Platforms
37 * can override its value with custom FIRST_USER_ADDRESS
38 * defined in their respective <asm/pgtable.h>.
39 */
40#ifndef FIRST_USER_ADDRESS
41#define FIRST_USER_ADDRESS 0UL
42#endif
1c2f7d14
AK
43
44/*
45 * This defines the generic helper for accessing PMD page
46 * table page. Although platforms can still override this
47 * via their respective <asm/pgtable.h>.
48 */
49#ifndef pmd_pgtable
50#define pmd_pgtable(pmd) pmd_page(pmd)
51#endif
6ee8630e 52
974b9b2c
MR
53/*
54 * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
55 *
56 * The pXx_index() functions return the index of the entry in the page
57 * table page which would control the given virtual address
58 *
59 * As these functions may be used by the same code for different levels of
60 * the page table folding, they are always available, regardless of
61 * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0
62 * because in such cases PTRS_PER_PxD equals 1.
63 */
64
65static inline unsigned long pte_index(unsigned long address)
66{
67 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
68}
69
70#ifndef pmd_index
71static inline unsigned long pmd_index(unsigned long address)
72{
73 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
74}
75#define pmd_index pmd_index
76#endif
77
78#ifndef pud_index
79static inline unsigned long pud_index(unsigned long address)
80{
81 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
82}
83#define pud_index pud_index
84#endif
85
86#ifndef pgd_index
87/* Must be a compile-time constant, so implement it as a macro */
88#define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
89#endif
90
91#ifndef pte_offset_kernel
92static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
93{
94 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
95}
96#define pte_offset_kernel pte_offset_kernel
97#endif
98
0d940a9b
HD
99#ifdef CONFIG_HIGHPTE
100#define __pte_map(pmd, address) \
101 ((pte_t *)kmap_local_page(pmd_page(*(pmd))) + pte_index((address)))
102#define pte_unmap(pte) do { \
103 kunmap_local((pte)); \
a349d72f 104 rcu_read_unlock(); \
0d940a9b 105} while (0)
974b9b2c 106#else
0d940a9b
HD
107static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address)
108{
109 return pte_offset_kernel(pmd, address);
110}
111static inline void pte_unmap(pte_t *pte)
112{
a349d72f 113 rcu_read_unlock();
0d940a9b 114}
974b9b2c
MR
115#endif
116
13cf577e
HD
117void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable);
118
974b9b2c
MR
119/* Find an entry in the second-level page table.. */
120#ifndef pmd_offset
121static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
122{
9cf6fa24 123 return pud_pgtable(*pud) + pmd_index(address);
974b9b2c
MR
124}
125#define pmd_offset pmd_offset
126#endif
127
128#ifndef pud_offset
129static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
130{
dc4875f0 131 return p4d_pgtable(*p4d) + pud_index(address);
974b9b2c
MR
132}
133#define pud_offset pud_offset
134#endif
135
136static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address)
137{
138 return (pgd + pgd_index(address));
139};
140
141/*
142 * a shortcut to get a pgd_t in a given mm
143 */
144#ifndef pgd_offset
145#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
146#endif
147
148/*
149 * a shortcut which implies the use of the kernel's pgd, instead
150 * of a process's
151 */
bd05220c 152#ifndef pgd_offset_k
974b9b2c 153#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
bd05220c 154#endif
974b9b2c 155
e05c7b1f
MR
156/*
157 * In many cases it is known that a virtual address is mapped at PMD or PTE
158 * level, so instead of traversing all the page table levels, we can get a
159 * pointer to the PMD entry in user or kernel page table or translate a virtual
160 * address to the pointer in the PTE in the kernel page tables with simple
161 * helpers.
162 */
163static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va)
164{
165 return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va);
166}
167
168static inline pmd_t *pmd_off_k(unsigned long va)
169{
170 return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va);
171}
172
173static inline pte_t *virt_to_kpte(unsigned long vaddr)
174{
175 pmd_t *pmd = pmd_off_k(vaddr);
176
177 return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
178}
179
6617da8f
JG
180#ifndef pmd_young
181static inline int pmd_young(pmd_t pmd)
182{
183 return 0;
184}
185#endif
186
bcc6cc83
MWO
187/*
188 * A facility to provide lazy MMU batching. This allows PTE updates and
189 * page invalidations to be delayed until a call to leave lazy MMU mode
190 * is issued. Some architectures may benefit from doing this, and it is
191 * beneficial for both shadow and direct mode hypervisors, which may batch
192 * the PTE updates which happen during this window. Note that using this
193 * interface requires that read hazards be removed from the code. A read
194 * hazard could result in the direct mode hypervisor case, since the actual
195 * write to the page tables may not yet have taken place, so reads though
196 * a raw PTE pointer after it has been modified are not guaranteed to be
197 * up to date. This mode can only be entered and left under the protection of
198 * the page table locks for all page tables which may be modified. In the UP
199 * case, this is required so that preemption is disabled, and in the SMP case,
200 * it must synchronize the delayed page table writes properly on other CPUs.
201 */
202#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
203#define arch_enter_lazy_mmu_mode() do {} while (0)
204#define arch_leave_lazy_mmu_mode() do {} while (0)
205#define arch_flush_lazy_mmu_mode() do {} while (0)
206#endif
207
208#ifndef set_ptes
bcc6cc83
MWO
209/**
210 * set_ptes - Map consecutive pages to a contiguous range of addresses.
211 * @mm: Address space to map the pages into.
212 * @addr: Address to map the first page at.
213 * @ptep: Page table pointer for the first entry.
214 * @pte: Page table entry for the first page.
215 * @nr: Number of pages to map.
216 *
217 * May be overridden by the architecture, or the architecture can define
218 * set_pte() and PFN_PTE_SHIFT.
219 *
220 * Context: The caller holds the page table lock. The pages all belong
221 * to the same folio. The PTEs are all in the same PMD.
222 */
223static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
224 pte_t *ptep, pte_t pte, unsigned int nr)
225{
226 page_table_check_ptes_set(mm, ptep, pte, nr);
227
228 arch_enter_lazy_mmu_mode();
229 for (;;) {
230 set_pte(ptep, pte);
231 if (--nr == 0)
232 break;
233 ptep++;
234 pte = __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT));
235 }
236 arch_leave_lazy_mmu_mode();
237}
bcc6cc83 238#endif
bcc6cc83 239#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
bcc6cc83 240
1da177e4 241#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
e2cda322
AA
242extern int ptep_set_access_flags(struct vm_area_struct *vma,
243 unsigned long address, pte_t *ptep,
244 pte_t entry, int dirty);
245#endif
246
247#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
bd5e88ad 248#ifdef CONFIG_TRANSPARENT_HUGEPAGE
e2cda322
AA
249extern int pmdp_set_access_flags(struct vm_area_struct *vma,
250 unsigned long address, pmd_t *pmdp,
251 pmd_t entry, int dirty);
a00cc7d9
MW
252extern int pudp_set_access_flags(struct vm_area_struct *vma,
253 unsigned long address, pud_t *pudp,
254 pud_t entry, int dirty);
bd5e88ad
VG
255#else
256static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
257 unsigned long address, pmd_t *pmdp,
258 pmd_t entry, int dirty)
259{
260 BUILD_BUG();
261 return 0;
262}
a00cc7d9
MW
263static inline int pudp_set_access_flags(struct vm_area_struct *vma,
264 unsigned long address, pud_t *pudp,
265 pud_t entry, int dirty)
266{
267 BUILD_BUG();
268 return 0;
269}
bd5e88ad 270#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1da177e4
LT
271#endif
272
6c1d2a07
RR
273#ifndef ptep_get
274static inline pte_t ptep_get(pte_t *ptep)
275{
276 return READ_ONCE(*ptep);
277}
278#endif
279
280#ifndef pmdp_get
281static inline pmd_t pmdp_get(pmd_t *pmdp)
282{
283 return READ_ONCE(*pmdp);
284}
285#endif
286
1da177e4 287#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
e2cda322
AA
288static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
289 unsigned long address,
290 pte_t *ptep)
291{
c33c7948 292 pte_t pte = ptep_get(ptep);
e2cda322
AA
293 int r = 1;
294 if (!pte_young(pte))
295 r = 0;
296 else
297 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
298 return r;
299}
300#endif
301
302#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
eed9a328 303#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
e2cda322
AA
304static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
305 unsigned long address,
306 pmd_t *pmdp)
307{
308 pmd_t pmd = *pmdp;
309 int r = 1;
310 if (!pmd_young(pmd))
311 r = 0;
312 else
313 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
314 return r;
315}
bd5e88ad 316#else
e2cda322
AA
317static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
318 unsigned long address,
319 pmd_t *pmdp)
320{
bd5e88ad 321 BUILD_BUG();
e2cda322
AA
322 return 0;
323}
eed9a328 324#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
1da177e4
LT
325#endif
326
327#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
e2cda322
AA
328int ptep_clear_flush_young(struct vm_area_struct *vma,
329 unsigned long address, pte_t *ptep);
330#endif
331
332#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
bd5e88ad
VG
333#ifdef CONFIG_TRANSPARENT_HUGEPAGE
334extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
335 unsigned long address, pmd_t *pmdp);
336#else
337/*
338 * Despite relevant to THP only, this API is called from generic rmap code
339 * under PageTransHuge(), hence needs a dummy implementation for !THP
340 */
341static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
342 unsigned long address, pmd_t *pmdp)
343{
344 BUILD_BUG();
345 return 0;
346}
347#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1da177e4
LT
348#endif
349
4aaf269c
JG
350#ifndef arch_has_hw_nonleaf_pmd_young
351/*
352 * Return whether the accessed bit in non-leaf PMD entries is supported on the
353 * local CPU.
354 */
355static inline bool arch_has_hw_nonleaf_pmd_young(void)
356{
357 return IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG);
358}
359#endif
360
e1fd09e3
YZ
361#ifndef arch_has_hw_pte_young
362/*
363 * Return whether the accessed bit is supported on the local CPU.
364 *
365 * This stub assumes accessing through an old PTE triggers a page fault.
366 * Architectures that automatically set the access bit should overwrite it.
367 */
368static inline bool arch_has_hw_pte_young(void)
369{
370 return false;
371}
372#endif
373
e5136e87
RE
374#ifndef arch_check_zapped_pte
375static inline void arch_check_zapped_pte(struct vm_area_struct *vma,
376 pte_t pte)
377{
378}
379#endif
380
381#ifndef arch_check_zapped_pmd
382static inline void arch_check_zapped_pmd(struct vm_area_struct *vma,
383 pmd_t pmd)
384{
385}
386#endif
387
1da177e4 388#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
e2cda322
AA
389static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
390 unsigned long address,
391 pte_t *ptep)
392{
c33c7948 393 pte_t pte = ptep_get(ptep);
e2cda322 394 pte_clear(mm, address, ptep);
aa232204 395 page_table_check_pte_clear(mm, pte);
e2cda322
AA
396 return pte;
397}
398#endif
399
de8c8e52
TT
400static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
401 pte_t *ptep)
402{
403 ptep_get_and_clear(mm, addr, ptep);
404}
de8c8e52 405
6ca297d4 406#ifdef CONFIG_GUP_GET_PXX_LOW_HIGH
2a4a06da 407/*
93b3037a
PZ
408 * For walking the pagetables without holding any locks. Some architectures
409 * (eg x86-32 PAE) cannot load the entries atomically without using expensive
410 * instructions. We are guaranteed that a PTE will only either go from not
411 * present to present, or present to not present -- it will not switch to a
412 * completely different present page without a TLB flush inbetween; which we
413 * are blocking by holding interrupts off.
2a4a06da
PZ
414 *
415 * Setting ptes from not present to present goes:
416 *
417 * ptep->pte_high = h;
418 * smp_wmb();
419 * ptep->pte_low = l;
420 *
421 * And present to not present goes:
422 *
423 * ptep->pte_low = 0;
424 * smp_wmb();
425 * ptep->pte_high = 0;
426 *
427 * We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'.
428 * We load pte_high *after* loading pte_low, which ensures we don't see an older
429 * value of pte_high. *Then* we recheck pte_low, which ensures that we haven't
430 * picked up a changed pte high. We might have gotten rubbish values from
431 * pte_low and pte_high, but we are guaranteed that pte_low will not have the
432 * present bit set *unless* it is 'l'. Because get_user_pages_fast() only
433 * operates on present ptes we're safe.
434 */
435static inline pte_t ptep_get_lockless(pte_t *ptep)
436{
437 pte_t pte;
438
439 do {
440 pte.pte_low = ptep->pte_low;
441 smp_rmb();
442 pte.pte_high = ptep->pte_high;
443 smp_rmb();
444 } while (unlikely(pte.pte_low != ptep->pte_low));
445
446 return pte;
447}
024d232a
PZ
448#define ptep_get_lockless ptep_get_lockless
449
450#if CONFIG_PGTABLE_LEVELS > 2
451static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
452{
453 pmd_t pmd;
454
455 do {
456 pmd.pmd_low = pmdp->pmd_low;
457 smp_rmb();
458 pmd.pmd_high = pmdp->pmd_high;
459 smp_rmb();
460 } while (unlikely(pmd.pmd_low != pmdp->pmd_low));
461
462 return pmd;
463}
464#define pmdp_get_lockless pmdp_get_lockless
146b42e0 465#define pmdp_get_lockless_sync() tlb_remove_table_sync_one()
024d232a 466#endif /* CONFIG_PGTABLE_LEVELS > 2 */
6ca297d4 467#endif /* CONFIG_GUP_GET_PXX_LOW_HIGH */
024d232a 468
2a4a06da
PZ
469/*
470 * We require that the PTE can be read atomically.
471 */
024d232a 472#ifndef ptep_get_lockless
2a4a06da
PZ
473static inline pte_t ptep_get_lockless(pte_t *ptep)
474{
475 return ptep_get(ptep);
476}
024d232a
PZ
477#endif
478
479#ifndef pmdp_get_lockless
480static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
481{
482 return pmdp_get(pmdp);
483}
146b42e0
HD
484static inline void pmdp_get_lockless_sync(void)
485{
486}
024d232a 487#endif
2a4a06da 488
e2cda322 489#ifdef CONFIG_TRANSPARENT_HUGEPAGE
a00cc7d9 490#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
8809aa2d
AK
491static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
492 unsigned long address,
493 pmd_t *pmdp)
e2cda322
AA
494{
495 pmd_t pmd = *pmdp;
de8c8e52 496
2d28a227 497 pmd_clear(pmdp);
1831414c 498 page_table_check_pmd_clear(mm, pmd);
de8c8e52 499
e2cda322 500 return pmd;
49b24d6b 501}
a00cc7d9
MW
502#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
503#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
504static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
505 unsigned long address,
506 pud_t *pudp)
507{
508 pud_t pud = *pudp;
509
510 pud_clear(pudp);
931c38e1 511 page_table_check_pud_clear(mm, pud);
de8c8e52 512
a00cc7d9
MW
513 return pud;
514}
515#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
e2cda322 516#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1da177e4 517
fcbe08d6 518#ifdef CONFIG_TRANSPARENT_HUGEPAGE
a00cc7d9 519#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
93a98695 520static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
fcbe08d6
MS
521 unsigned long address, pmd_t *pmdp,
522 int full)
523{
93a98695 524 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
fcbe08d6 525}
fcbe08d6
MS
526#endif
527
a00cc7d9 528#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
f32928ab 529static inline pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma,
a00cc7d9
MW
530 unsigned long address, pud_t *pudp,
531 int full)
532{
f32928ab 533 return pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
a00cc7d9
MW
534}
535#endif
536#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
537
a600388d 538#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
e2cda322
AA
539static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
540 unsigned long address, pte_t *ptep,
541 int full)
542{
d3a89233 543 return ptep_get_and_clear(mm, address, ptep);
e2cda322 544}
a600388d
ZA
545#endif
546
7df67697
BM
547
548/*
549 * If two threads concurrently fault at the same page, the thread that
550 * won the race updates the PTE and its local TLB/Cache. The other thread
551 * gives up, simply does nothing, and continues; on architectures where
552 * software can update TLB, local TLB can be updated here to avoid next page
553 * fault. This function updates TLB only, do nothing with cache or others.
554 * It is the difference with function update_mmu_cache.
555 */
556#ifndef __HAVE_ARCH_UPDATE_MMU_TLB
557static inline void update_mmu_tlb(struct vm_area_struct *vma,
558 unsigned long address, pte_t *ptep)
559{
560}
561#define __HAVE_ARCH_UPDATE_MMU_TLB
562#endif
563
9888a1ca
ZA
564/*
565 * Some architectures may be able to avoid expensive synchronization
566 * primitives when modifications are made to PTE's which are already
567 * not present, or in the process of an address space destruction.
568 */
569#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
e2cda322
AA
570static inline void pte_clear_not_present_full(struct mm_struct *mm,
571 unsigned long address,
572 pte_t *ptep,
573 int full)
574{
575 pte_clear(mm, address, ptep);
576}
a600388d
ZA
577#endif
578
1da177e4 579#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
e2cda322
AA
580extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
581 unsigned long address,
582 pte_t *ptep);
583#endif
584
8809aa2d
AK
585#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
586extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
e2cda322
AA
587 unsigned long address,
588 pmd_t *pmdp);
a00cc7d9
MW
589extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
590 unsigned long address,
591 pud_t *pudp);
1da177e4
LT
592#endif
593
2f0584f3 594#ifndef pte_mkwrite
161e393c 595static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
2f0584f3
RE
596{
597 return pte_mkwrite_novma(pte);
598}
599#endif
600
601#if defined(CONFIG_ARCH_WANT_PMD_MKWRITE) && !defined(pmd_mkwrite)
161e393c 602static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
2f0584f3
RE
603{
604 return pmd_mkwrite_novma(pmd);
605}
606#endif
607
1da177e4 608#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
8c65b4a6 609struct mm_struct;
1da177e4
LT
610static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
611{
c33c7948 612 pte_t old_pte = ptep_get(ptep);
1da177e4
LT
613 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
614}
615#endif
616
44bf431b
BM
617/*
618 * On some architectures hardware does not set page access bit when accessing
2eb70aab 619 * memory page, it is responsibility of software setting this bit. It brings
44bf431b
BM
620 * out extra page fault penalty to track page access bit. For optimization page
621 * access bit can be set during all page fault flow on these arches.
622 * To be differentiate with macro pte_mkyoung, this macro is used on platforms
623 * where software maintains page access bit.
624 */
50c25ee9
TB
625#ifndef pte_sw_mkyoung
626static inline pte_t pte_sw_mkyoung(pte_t pte)
627{
628 return pte;
629}
630#define pte_sw_mkyoung pte_sw_mkyoung
631#endif
632
e2cda322
AA
633#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
634#ifdef CONFIG_TRANSPARENT_HUGEPAGE
635static inline void pmdp_set_wrprotect(struct mm_struct *mm,
636 unsigned long address, pmd_t *pmdp)
637{
638 pmd_t old_pmd = *pmdp;
639 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
640}
bd5e88ad 641#else
e2cda322
AA
642static inline void pmdp_set_wrprotect(struct mm_struct *mm,
643 unsigned long address, pmd_t *pmdp)
644{
bd5e88ad 645 BUILD_BUG();
e2cda322
AA
646}
647#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
648#endif
a00cc7d9
MW
649#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
650#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
54a948a1 651#ifdef CONFIG_TRANSPARENT_HUGEPAGE
a00cc7d9
MW
652static inline void pudp_set_wrprotect(struct mm_struct *mm,
653 unsigned long address, pud_t *pudp)
654{
655 pud_t old_pud = *pudp;
656
657 set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
658}
659#else
660static inline void pudp_set_wrprotect(struct mm_struct *mm,
661 unsigned long address, pud_t *pudp)
662{
663 BUILD_BUG();
664}
54a948a1 665#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
a00cc7d9
MW
666#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
667#endif
e2cda322 668
15a25b2e
AK
669#ifndef pmdp_collapse_flush
670#ifdef CONFIG_TRANSPARENT_HUGEPAGE
f28b6ff8
AK
671extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
672 unsigned long address, pmd_t *pmdp);
15a25b2e
AK
673#else
674static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
675 unsigned long address,
676 pmd_t *pmdp)
677{
678 BUILD_BUG();
679 return *pmdp;
680}
681#define pmdp_collapse_flush pmdp_collapse_flush
682#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
683#endif
684
e3ebcf64 685#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
6b0b50b0
AK
686extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
687 pgtable_t pgtable);
e3ebcf64
GS
688#endif
689
690#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
6b0b50b0 691extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
e3ebcf64
GS
692#endif
693
feda5c39
HD
694#ifndef arch_needs_pgtable_deposit
695#define arch_needs_pgtable_deposit() (false)
696#endif
697
c58f0bb7
KS
698#ifdef CONFIG_TRANSPARENT_HUGEPAGE
699/*
700 * This is an implementation of pmdp_establish() that is only suitable for an
701 * architecture that doesn't have hardware dirty/accessed bits. In this case we
2eb70aab 702 * can't race with CPU which sets these bits and non-atomic approach is fine.
c58f0bb7
KS
703 */
704static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
705 unsigned long address, pmd_t *pmdp, pmd_t pmd)
706{
707 pmd_t old_pmd = *pmdp;
708 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
709 return old_pmd;
710}
711#endif
712
46dcde73 713#ifndef __HAVE_ARCH_PMDP_INVALIDATE
d52605d7 714extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
46dcde73
GS
715 pmd_t *pmdp);
716#endif
717
4f831457
NA
718#ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
719
720/*
721 * pmdp_invalidate_ad() invalidates the PMD while changing a transparent
722 * hugepage mapping in the page tables. This function is similar to
723 * pmdp_invalidate(), but should only be used if the access and dirty bits would
724 * not be cleared by the software in the new PMD value. The function ensures
725 * that hardware changes of the access and dirty bits updates would not be lost.
726 *
727 * Doing so can allow in certain architectures to avoid a TLB flush in most
728 * cases. Yet, another TLB flush might be necessary later if the PMD update
729 * itself requires such flush (e.g., if protection was set to be stricter). Yet,
730 * even when a TLB flush is needed because of the update, the caller may be able
731 * to batch these TLB flushing operations, so fewer TLB flush operations are
732 * needed.
733 */
734extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
735 unsigned long address, pmd_t *pmdp);
736#endif
737
1da177e4 738#ifndef __HAVE_ARCH_PTE_SAME
e2cda322
AA
739static inline int pte_same(pte_t pte_a, pte_t pte_b)
740{
741 return pte_val(pte_a) == pte_val(pte_b);
742}
743#endif
744
45961722
KW
745#ifndef __HAVE_ARCH_PTE_UNUSED
746/*
747 * Some architectures provide facilities to virtualization guests
748 * so that they can flag allocated pages as unused. This allows the
749 * host to transparently reclaim unused pages. This function returns
750 * whether the pte's page is unused.
751 */
752static inline int pte_unused(pte_t pte)
753{
754 return 0;
755}
756#endif
757
e7884f8e
KS
758#ifndef pte_access_permitted
759#define pte_access_permitted(pte, write) \
760 (pte_present(pte) && (!(write) || pte_write(pte)))
761#endif
762
763#ifndef pmd_access_permitted
764#define pmd_access_permitted(pmd, write) \
765 (pmd_present(pmd) && (!(write) || pmd_write(pmd)))
766#endif
767
768#ifndef pud_access_permitted
769#define pud_access_permitted(pud, write) \
770 (pud_present(pud) && (!(write) || pud_write(pud)))
771#endif
772
773#ifndef p4d_access_permitted
774#define p4d_access_permitted(p4d, write) \
775 (p4d_present(p4d) && (!(write) || p4d_write(p4d)))
776#endif
777
778#ifndef pgd_access_permitted
779#define pgd_access_permitted(pgd, write) \
780 (pgd_present(pgd) && (!(write) || pgd_write(pgd)))
781#endif
782
e2cda322 783#ifndef __HAVE_ARCH_PMD_SAME
e2cda322
AA
784static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
785{
786 return pmd_val(pmd_a) == pmd_val(pmd_b);
787}
973bf680 788#endif
a00cc7d9 789
973bf680 790#ifndef pud_same
a00cc7d9
MW
791static inline int pud_same(pud_t pud_a, pud_t pud_b)
792{
793 return pud_val(pud_a) == pud_val(pud_b);
794}
973bf680 795#define pud_same pud_same
1da177e4
LT
796#endif
797
0cebbb60
DW
798#ifndef __HAVE_ARCH_P4D_SAME
799static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
800{
801 return p4d_val(p4d_a) == p4d_val(p4d_b);
802}
803#endif
804
805#ifndef __HAVE_ARCH_PGD_SAME
806static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
807{
808 return pgd_val(pgd_a) == pgd_val(pgd_b);
809}
810#endif
811
4369deaa
DW
812/*
813 * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
814 * TLB flush will be required as a result of the "set". For example, use
815 * in scenarios where it is known ahead of time that the routine is
816 * setting non-present entries, or re-setting an existing entry to the
817 * same value. Otherwise, use the typical "set" helpers and flush the
818 * TLB.
819 */
820#define set_pte_safe(ptep, pte) \
821({ \
822 WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
823 set_pte(ptep, pte); \
824})
825
826#define set_pmd_safe(pmdp, pmd) \
827({ \
828 WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
829 set_pmd(pmdp, pmd); \
830})
831
832#define set_pud_safe(pudp, pud) \
833({ \
834 WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
835 set_pud(pudp, pud); \
836})
837
838#define set_p4d_safe(p4dp, p4d) \
839({ \
840 WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
841 set_p4d(p4dp, p4d); \
842})
843
844#define set_pgd_safe(pgdp, pgd) \
845({ \
846 WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
847 set_pgd(pgdp, pgd); \
848})
849
ca827d55
KA
850#ifndef __HAVE_ARCH_DO_SWAP_PAGE
851/*
852 * Some architectures support metadata associated with a page. When a
853 * page is being swapped out, this metadata must be saved so it can be
854 * restored when the page is swapped back in. SPARC M7 and newer
855 * processors support an ADI (Application Data Integrity) tag for the
856 * page as metadata for the page. arch_do_swap_page() can restore this
857 * metadata when a page is swapped back in.
858 */
859static inline void arch_do_swap_page(struct mm_struct *mm,
860 struct vm_area_struct *vma,
861 unsigned long addr,
862 pte_t pte, pte_t oldpte)
863{
864
865}
866#endif
867
868#ifndef __HAVE_ARCH_UNMAP_ONE
869/*
870 * Some architectures support metadata associated with a page. When a
871 * page is being swapped out, this metadata must be saved so it can be
872 * restored when the page is swapped back in. SPARC M7 and newer
873 * processors support an ADI (Application Data Integrity) tag for the
874 * page as metadata for the page. arch_unmap_one() can save this
875 * metadata on a swap-out of a page.
876 */
877static inline int arch_unmap_one(struct mm_struct *mm,
878 struct vm_area_struct *vma,
879 unsigned long addr,
880 pte_t orig_pte)
881{
882 return 0;
883}
884#endif
885
8a84802e
SP
886/*
887 * Allow architectures to preserve additional metadata associated with
888 * swapped-out pages. The corresponding __HAVE_ARCH_SWAP_* macros and function
889 * prototypes must be defined in the arch-specific asm/pgtable.h file.
890 */
891#ifndef __HAVE_ARCH_PREPARE_TO_SWAP
892static inline int arch_prepare_to_swap(struct page *page)
893{
894 return 0;
895}
896#endif
897
898#ifndef __HAVE_ARCH_SWAP_INVALIDATE
899static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
900{
901}
902
903static inline void arch_swap_invalidate_area(int type)
904{
905}
906#endif
907
908#ifndef __HAVE_ARCH_SWAP_RESTORE
da08e9b7 909static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
8a84802e
SP
910{
911}
912#endif
913
1da177e4
LT
914#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
915#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
916#endif
917
0b0968a3 918#ifndef __HAVE_ARCH_MOVE_PTE
8b1f3124 919#define move_pte(pte, prot, old_addr, new_addr) (pte)
8b1f3124
NP
920#endif
921
2c3cf556 922#ifndef pte_accessible
20841405 923# define pte_accessible(mm, pte) ((void)(pte), 1)
2c3cf556
RR
924#endif
925
61c77326 926#ifndef flush_tlb_fix_spurious_fault
99c29133 927#define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address)
61c77326
SL
928#endif
929
1da177e4 930/*
8f6c99c1
HD
931 * When walking page tables, get the address of the next boundary,
932 * or the end address of the range if that comes earlier. Although no
933 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
1da177e4
LT
934 */
935
1da177e4
LT
936#define pgd_addr_end(addr, end) \
937({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
938 (__boundary - 1 < (end) - 1)? __boundary: (end); \
939})
1da177e4 940
c2febafc
KS
941#ifndef p4d_addr_end
942#define p4d_addr_end(addr, end) \
943({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \
944 (__boundary - 1 < (end) - 1)? __boundary: (end); \
945})
946#endif
947
1da177e4
LT
948#ifndef pud_addr_end
949#define pud_addr_end(addr, end) \
950({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
951 (__boundary - 1 < (end) - 1)? __boundary: (end); \
952})
953#endif
954
955#ifndef pmd_addr_end
956#define pmd_addr_end(addr, end) \
957({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
958 (__boundary - 1 < (end) - 1)? __boundary: (end); \
959})
960#endif
961
1da177e4
LT
962/*
963 * When walking page tables, we usually want to skip any p?d_none entries;
964 * and any p?d_bad entries - reporting the error before resetting to none.
965 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
966 */
967void pgd_clear_bad(pgd_t *);
f2400abc
VG
968
969#ifndef __PAGETABLE_P4D_FOLDED
c2febafc 970void p4d_clear_bad(p4d_t *);
f2400abc
VG
971#else
972#define p4d_clear_bad(p4d) do { } while (0)
973#endif
974
975#ifndef __PAGETABLE_PUD_FOLDED
1da177e4 976void pud_clear_bad(pud_t *);
f2400abc
VG
977#else
978#define pud_clear_bad(p4d) do { } while (0)
979#endif
980
1da177e4
LT
981void pmd_clear_bad(pmd_t *);
982
983static inline int pgd_none_or_clear_bad(pgd_t *pgd)
984{
985 if (pgd_none(*pgd))
986 return 1;
987 if (unlikely(pgd_bad(*pgd))) {
988 pgd_clear_bad(pgd);
989 return 1;
990 }
991 return 0;
992}
993
c2febafc
KS
994static inline int p4d_none_or_clear_bad(p4d_t *p4d)
995{
996 if (p4d_none(*p4d))
997 return 1;
998 if (unlikely(p4d_bad(*p4d))) {
999 p4d_clear_bad(p4d);
1000 return 1;
1001 }
1002 return 0;
1003}
1004
1da177e4
LT
1005static inline int pud_none_or_clear_bad(pud_t *pud)
1006{
1007 if (pud_none(*pud))
1008 return 1;
1009 if (unlikely(pud_bad(*pud))) {
1010 pud_clear_bad(pud);
1011 return 1;
1012 }
1013 return 0;
1014}
1015
1016static inline int pmd_none_or_clear_bad(pmd_t *pmd)
1017{
1018 if (pmd_none(*pmd))
1019 return 1;
1020 if (unlikely(pmd_bad(*pmd))) {
1021 pmd_clear_bad(pmd);
1022 return 1;
1023 }
1024 return 0;
1025}
9535239f 1026
0cbe3e26 1027static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
1ea0704e
JF
1028 unsigned long addr,
1029 pte_t *ptep)
1030{
1031 /*
1032 * Get the current pte state, but zero it out to make it
1033 * non-present, preventing the hardware from asynchronously
1034 * updating it.
1035 */
0cbe3e26 1036 return ptep_get_and_clear(vma->vm_mm, addr, ptep);
1ea0704e
JF
1037}
1038
0cbe3e26 1039static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
1ea0704e
JF
1040 unsigned long addr,
1041 pte_t *ptep, pte_t pte)
1042{
1043 /*
1044 * The pte is non-present, so there's no hardware state to
1045 * preserve.
1046 */
0cbe3e26 1047 set_pte_at(vma->vm_mm, addr, ptep, pte);
1ea0704e
JF
1048}
1049
1050#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1051/*
1052 * Start a pte protection read-modify-write transaction, which
1053 * protects against asynchronous hardware modifications to the pte.
1054 * The intention is not to prevent the hardware from making pte
1055 * updates, but to prevent any updates it may make from being lost.
1056 *
1057 * This does not protect against other software modifications of the
2eb70aab 1058 * pte; the appropriate pte lock must be held over the transaction.
1ea0704e
JF
1059 *
1060 * Note that this interface is intended to be batchable, meaning that
1061 * ptep_modify_prot_commit may not actually update the pte, but merely
1062 * queue the update to be done at some later time. The update must be
1063 * actually committed before the pte lock is released, however.
1064 */
0cbe3e26 1065static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
1ea0704e
JF
1066 unsigned long addr,
1067 pte_t *ptep)
1068{
0cbe3e26 1069 return __ptep_modify_prot_start(vma, addr, ptep);
1ea0704e
JF
1070}
1071
1072/*
1073 * Commit an update to a pte, leaving any hardware-controlled bits in
1074 * the PTE unmodified.
1075 */
0cbe3e26 1076static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
1ea0704e 1077 unsigned long addr,
04a86453 1078 pte_t *ptep, pte_t old_pte, pte_t pte)
1ea0704e 1079{
0cbe3e26 1080 __ptep_modify_prot_commit(vma, addr, ptep, pte);
1ea0704e
JF
1081}
1082#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
fe1a6875 1083#endif /* CONFIG_MMU */
1ea0704e 1084
21729f81
TL
1085/*
1086 * No-op macros that just return the current protection value. Defined here
1067b261 1087 * because these macros can be used even if CONFIG_MMU is not defined.
21729f81 1088 */
63bb76de
PE
1089
1090#ifndef pgprot_nx
1091#define pgprot_nx(prot) (prot)
1092#endif
1093
1094#ifndef pgprot_noncached
1095#define pgprot_noncached(prot) (prot)
1096#endif
1097
1098#ifndef pgprot_writecombine
1099#define pgprot_writecombine pgprot_noncached
1100#endif
1101
1102#ifndef pgprot_writethrough
1103#define pgprot_writethrough pgprot_noncached
1104#endif
1105
1106#ifndef pgprot_device
1107#define pgprot_device pgprot_noncached
1108#endif
1109
d15dfd31
CM
1110#ifndef pgprot_mhp
1111#define pgprot_mhp(prot) (prot)
1112#endif
1113
63bb76de
PE
1114#ifdef CONFIG_MMU
1115#ifndef pgprot_modify
1116#define pgprot_modify pgprot_modify
1117static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
1118{
1119 if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
1120 newprot = pgprot_noncached(newprot);
1121 if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
1122 newprot = pgprot_writecombine(newprot);
1123 if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
1124 newprot = pgprot_device(newprot);
1125 return newprot;
1126}
1127#endif
1128#endif /* CONFIG_MMU */
1129
21729f81
TL
1130#ifndef pgprot_encrypted
1131#define pgprot_encrypted(prot) (prot)
1132#endif
1133
1134#ifndef pgprot_decrypted
1135#define pgprot_decrypted(prot) (prot)
1136#endif
1137
9535239f 1138/*
7fd7d83d
JF
1139 * A facility to provide batching of the reload of page tables and
1140 * other process state with the actual context switch code for
1141 * paravirtualized guests. By convention, only one of the batched
1142 * update (lazy) modes (CPU, MMU) should be active at any given time,
1143 * entry should never be nested, and entry and exits should always be
1144 * paired. This is for sanity of maintaining and reasoning about the
1145 * kernel code. In this case, the exit (end of the context switch) is
1146 * in architecture-specific code, and so doesn't need a generic
1147 * definition.
9535239f 1148 */
7fd7d83d 1149#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
224101ed 1150#define arch_start_context_switch(prev) do {} while (0)
9535239f
GU
1151#endif
1152
ab6e3d09
NH
1153#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1154#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
1155static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1156{
1157 return pmd;
1158}
1159
1160static inline int pmd_swp_soft_dirty(pmd_t pmd)
1161{
1162 return 0;
1163}
1164
1165static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1166{
1167 return pmd;
1168}
1169#endif
1170#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
0f8975ec
PE
1171static inline int pte_soft_dirty(pte_t pte)
1172{
1173 return 0;
1174}
1175
1176static inline int pmd_soft_dirty(pmd_t pmd)
1177{
1178 return 0;
1179}
1180
1181static inline pte_t pte_mksoft_dirty(pte_t pte)
1182{
1183 return pte;
1184}
1185
1186static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
1187{
1188 return pmd;
1189}
179ef71c 1190
a7b76174
MS
1191static inline pte_t pte_clear_soft_dirty(pte_t pte)
1192{
1193 return pte;
1194}
1195
1196static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
1197{
1198 return pmd;
1199}
1200
179ef71c
CG
1201static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1202{
1203 return pte;
1204}
1205
1206static inline int pte_swp_soft_dirty(pte_t pte)
1207{
1208 return 0;
1209}
1210
1211static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1212{
1213 return pte;
1214}
ab6e3d09
NH
1215
1216static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1217{
1218 return pmd;
1219}
1220
1221static inline int pmd_swp_soft_dirty(pmd_t pmd)
1222{
1223 return 0;
1224}
1225
1226static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1227{
1228 return pmd;
1229}
0f8975ec
PE
1230#endif
1231
34801ba9 1232#ifndef __HAVE_PFNMAP_TRACKING
1233/*
5180da41
SS
1234 * Interfaces that can be used by architecture code to keep track of
1235 * memory type of pfn mappings specified by the remap_pfn_range,
67fa1666 1236 * vmf_insert_pfn.
5180da41
SS
1237 */
1238
1239/*
1240 * track_pfn_remap is called when a _new_ pfn mapping is being established
1241 * by remap_pfn_range() for physical range indicated by pfn and size.
34801ba9 1242 */
5180da41 1243static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
b3b9c293
KK
1244 unsigned long pfn, unsigned long addr,
1245 unsigned long size)
34801ba9 1246{
1247 return 0;
1248}
1249
1250/*
5180da41 1251 * track_pfn_insert is called when a _new_ single pfn is established
67fa1666 1252 * by vmf_insert_pfn().
5180da41 1253 */
308a047c
BP
1254static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
1255 pfn_t pfn)
5180da41 1256{
5180da41
SS
1257}
1258
1259/*
1260 * track_pfn_copy is called when vma that is covering the pfnmap gets
34801ba9 1261 * copied through copy_page_range().
1262 */
5180da41 1263static inline int track_pfn_copy(struct vm_area_struct *vma)
34801ba9 1264{
1265 return 0;
1266}
1267
1268/*
d9fe4fab 1269 * untrack_pfn is called while unmapping a pfnmap for a region.
34801ba9 1270 * untrack can be called for a specific region indicated by pfn and size or
5180da41 1271 * can be for the entire vma (in which case pfn, size are zero).
34801ba9 1272 */
5180da41 1273static inline void untrack_pfn(struct vm_area_struct *vma,
68f48381
SB
1274 unsigned long pfn, unsigned long size,
1275 bool mm_wr_locked)
34801ba9 1276{
1277}
d9fe4fab
TK
1278
1279/*
d155df53
MW
1280 * untrack_pfn_clear is called while mremapping a pfnmap for a new region
1281 * or fails to copy pgtable during duplicate vm area.
d9fe4fab 1282 */
d155df53 1283static inline void untrack_pfn_clear(struct vm_area_struct *vma)
d9fe4fab
TK
1284{
1285}
34801ba9 1286#else
5180da41 1287extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
b3b9c293
KK
1288 unsigned long pfn, unsigned long addr,
1289 unsigned long size);
308a047c
BP
1290extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
1291 pfn_t pfn);
5180da41
SS
1292extern int track_pfn_copy(struct vm_area_struct *vma);
1293extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
68f48381 1294 unsigned long size, bool mm_wr_locked);
d155df53 1295extern void untrack_pfn_clear(struct vm_area_struct *vma);
34801ba9 1296#endif
1297
9afaf30f 1298#ifdef CONFIG_MMU
816422ad
KS
1299#ifdef __HAVE_COLOR_ZERO_PAGE
1300static inline int is_zero_pfn(unsigned long pfn)
1301{
1302 extern unsigned long zero_pfn;
1303 unsigned long offset_from_zero_pfn = pfn - zero_pfn;
1304 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
1305}
1306
2f91ec8c
KS
1307#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
1308
816422ad
KS
1309#else
1310static inline int is_zero_pfn(unsigned long pfn)
1311{
1312 extern unsigned long zero_pfn;
1313 return pfn == zero_pfn;
1314}
1315
1316static inline unsigned long my_zero_pfn(unsigned long addr)
1317{
1318 extern unsigned long zero_pfn;
1319 return zero_pfn;
1320}
1321#endif
9afaf30f
PT
1322#else
1323static inline int is_zero_pfn(unsigned long pfn)
1324{
1325 return 0;
1326}
1327
1328static inline unsigned long my_zero_pfn(unsigned long addr)
1329{
1330 return 0;
1331}
1332#endif /* CONFIG_MMU */
816422ad 1333
1a5a9906
AA
1334#ifdef CONFIG_MMU
1335
5f6e8da7
AA
1336#ifndef CONFIG_TRANSPARENT_HUGEPAGE
1337static inline int pmd_trans_huge(pmd_t pmd)
1338{
1339 return 0;
1340}
e4e40e02 1341#ifndef pmd_write
e2cda322
AA
1342static inline int pmd_write(pmd_t pmd)
1343{
1344 BUG();
1345 return 0;
1346}
e4e40e02 1347#endif /* pmd_write */
1a5a9906
AA
1348#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1349
1501899a
DW
1350#ifndef pud_write
1351static inline int pud_write(pud_t pud)
1352{
1353 BUG();
1354 return 0;
1355}
1356#endif /* pud_write */
1357
bf1a12a8
TH
1358#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
1359static inline int pmd_devmap(pmd_t pmd)
1360{
1361 return 0;
1362}
1363static inline int pud_devmap(pud_t pud)
1364{
1365 return 0;
1366}
1367static inline int pgd_devmap(pgd_t pgd)
1368{
1369 return 0;
1370}
1371#endif
1372
a00cc7d9 1373#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
bcd0dea5 1374 !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
a00cc7d9
MW
1375static inline int pud_trans_huge(pud_t pud)
1376{
1377 return 0;
1378}
1379#endif
1380
feda5c39 1381static inline int pud_trans_unstable(pud_t *pud)
625110b5 1382{
feda5c39
HD
1383#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
1384 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
625110b5
TH
1385 pud_t pudval = READ_ONCE(*pud);
1386
1387 if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
1388 return 1;
1389 if (unlikely(pud_bad(pudval))) {
1390 pud_clear_bad(pud);
1391 return 1;
1392 }
625110b5 1393#endif
1a5a9906
AA
1394 return 0;
1395}
1396
e7bb4b6d
MG
1397#ifndef CONFIG_NUMA_BALANCING
1398/*
14fb1fd7
DH
1399 * In an inaccessible (PROT_NONE) VMA, pte_protnone() may indicate "yes". It is
1400 * perfectly valid to indicate "no" in that case, which is why our default
1401 * implementation defaults to "always no".
1402 *
1403 * In an accessible VMA, however, pte_protnone() reliably indicates PROT_NONE
1404 * page protection due to NUMA hinting. NUMA hinting faults only apply in
1405 * accessible VMAs.
1406 *
1407 * So, to reliably identify PROT_NONE PTEs that require a NUMA hinting fault,
1408 * looking at the VMA accessibility is sufficient.
e7bb4b6d
MG
1409 */
1410static inline int pte_protnone(pte_t pte)
1411{
1412 return 0;
1413}
1414
1415static inline int pmd_protnone(pmd_t pmd)
1416{
1417 return 0;
1418}
1419#endif /* CONFIG_NUMA_BALANCING */
1420
1a5a9906 1421#endif /* CONFIG_MMU */
5f6e8da7 1422
e61ce6ad 1423#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
c2febafc
KS
1424
1425#ifndef __PAGETABLE_P4D_FOLDED
1426int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
c8db8c26 1427void p4d_clear_huge(p4d_t *p4d);
c2febafc
KS
1428#else
1429static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1430{
1431 return 0;
1432}
c8db8c26 1433static inline void p4d_clear_huge(p4d_t *p4d) { }
c2febafc
KS
1434#endif /* !__PAGETABLE_P4D_FOLDED */
1435
e61ce6ad 1436int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
c742199a 1437int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
d8a71905 1438int pud_clear_huge(pud_t *pud);
b9820d8f 1439int pmd_clear_huge(pmd_t *pmd);
8e2d4340 1440int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
785a19f9
CP
1441int pud_free_pmd_page(pud_t *pud, unsigned long addr);
1442int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
e61ce6ad 1443#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
c2febafc
KS
1444static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1445{
1446 return 0;
1447}
e61ce6ad
TK
1448static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1449{
1450 return 0;
1451}
1452static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1453{
1454 return 0;
1455}
c8db8c26 1456static inline void p4d_clear_huge(p4d_t *p4d) { }
b9820d8f
TK
1457static inline int pud_clear_huge(pud_t *pud)
1458{
1459 return 0;
1460}
1461static inline int pmd_clear_huge(pmd_t *pmd)
1462{
1463 return 0;
1464}
8e2d4340
WD
1465static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1466{
1467 return 0;
1468}
785a19f9 1469static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
b6bdb751
TK
1470{
1471 return 0;
1472}
785a19f9 1473static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
b6bdb751
TK
1474{
1475 return 0;
1476}
e61ce6ad
TK
1477#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
1478
458aa76d
AK
1479#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
1480#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1481/*
1482 * ARCHes with special requirements for evicting THP backing TLB entries can
1483 * implement this. Otherwise also, it can help optimize normal TLB flush in
1067b261
RD
1484 * THP regime. Stock flush_tlb_range() typically has optimization to nuke the
1485 * entire TLB if flush span is greater than a threshold, which will
1486 * likely be true for a single huge page. Thus a single THP flush will
1487 * invalidate the entire TLB which is not desirable.
458aa76d
AK
1488 * e.g. see arch/arc: flush_pmd_tlb_range
1489 */
1490#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
a00cc7d9 1491#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
458aa76d
AK
1492#else
1493#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
a00cc7d9 1494#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG()
458aa76d
AK
1495#endif
1496#endif
1497
08ea8c07
BX
1498struct file;
1499int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
1500 unsigned long size, pgprot_t *vma_prot);
613e396b
TG
1501
1502#ifndef CONFIG_X86_ESPFIX64
1503static inline void init_espfix_bsp(void) { }
1504#endif
1505
782de70c 1506extern void __init pgtable_cache_init(void);
caa84136 1507
6c26fcd2
JK
1508#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
1509static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
1510{
1511 return true;
1512}
1513
1514static inline bool arch_has_pfn_modify_check(void)
1515{
1516 return false;
1517}
1518#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
1519
a3266bd4
LR
1520/*
1521 * Architecture PAGE_KERNEL_* fallbacks
1522 *
1523 * Some architectures don't define certain PAGE_KERNEL_* flags. This is either
1524 * because they really don't support them, or the port needs to be updated to
1525 * reflect the required functionality. Below are a set of relatively safe
1526 * fallbacks, as best effort, which we can count on in lieu of the architectures
1527 * not defining them on their own yet.
1528 */
1529
1530#ifndef PAGE_KERNEL_RO
1531# define PAGE_KERNEL_RO PAGE_KERNEL
1532#endif
1533
1a9b4b3d
LR
1534#ifndef PAGE_KERNEL_EXEC
1535# define PAGE_KERNEL_EXEC PAGE_KERNEL
1536#endif
1537
d8626138
JR
1538/*
1539 * Page Table Modification bits for pgtbl_mod_mask.
1540 *
1541 * These are used by the p?d_alloc_track*() set of functions an in the generic
1542 * vmalloc/ioremap code to track at which page-table levels entries have been
1543 * modified. Based on that the code can better decide when vmalloc and ioremap
1544 * mapping changes need to be synchronized to other page-tables in the system.
1545 */
1546#define __PGTBL_PGD_MODIFIED 0
1547#define __PGTBL_P4D_MODIFIED 1
1548#define __PGTBL_PUD_MODIFIED 2
1549#define __PGTBL_PMD_MODIFIED 3
1550#define __PGTBL_PTE_MODIFIED 4
1551
1552#define PGTBL_PGD_MODIFIED BIT(__PGTBL_PGD_MODIFIED)
1553#define PGTBL_P4D_MODIFIED BIT(__PGTBL_P4D_MODIFIED)
1554#define PGTBL_PUD_MODIFIED BIT(__PGTBL_PUD_MODIFIED)
1555#define PGTBL_PMD_MODIFIED BIT(__PGTBL_PMD_MODIFIED)
1556#define PGTBL_PTE_MODIFIED BIT(__PGTBL_PTE_MODIFIED)
1557
1558/* Page-Table Modification Mask */
1559typedef unsigned int pgtbl_mod_mask;
1560
1da177e4
LT
1561#endif /* !__ASSEMBLY__ */
1562
cef39703
AB
1563#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
1564#ifdef CONFIG_PHYS_ADDR_T_64BIT
1565/*
1566 * ZSMALLOC needs to know the highest PFN on 32-bit architectures
1567 * with physical address space extension, but falls back to
1568 * BITS_PER_LONG otherwise.
1569 */
1570#error Missing MAX_POSSIBLE_PHYSMEM_BITS definition
1571#else
1572#define MAX_POSSIBLE_PHYSMEM_BITS 32
1573#endif
1574#endif
1575
fd8cfd30 1576#ifndef has_transparent_hugepage
a38c94ed 1577#define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE)
fd8cfd30
HD
1578#endif
1579
348ad160
AK
1580#ifndef has_transparent_pud_hugepage
1581#define has_transparent_pud_hugepage() IS_BUILTIN(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
1582#endif
1071fc57
MS
1583/*
1584 * On some architectures it depends on the mm if the p4d/pud or pmd
1585 * layer of the page table hierarchy is folded or not.
1586 */
1587#ifndef mm_p4d_folded
1588#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
1589#endif
1590
1591#ifndef mm_pud_folded
1592#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
1593#endif
1594
1595#ifndef mm_pmd_folded
1596#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
1597#endif
1598
d3f7b1bb
VG
1599#ifndef p4d_offset_lockless
1600#define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address)
1601#endif
1602#ifndef pud_offset_lockless
1603#define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address)
1604#endif
1605#ifndef pmd_offset_lockless
1606#define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address)
1607#endif
1608
93fab1b2
SP
1609/*
1610 * p?d_leaf() - true if this entry is a final mapping to a physical address.
1611 * This differs from p?d_huge() by the fact that they are always available (if
1612 * the architecture supports large pages at the appropriate level) even
1613 * if CONFIG_HUGETLB_PAGE is not defined.
1614 * Only meaningful when called on a valid entry.
1615 */
1616#ifndef pgd_leaf
1617#define pgd_leaf(x) 0
1618#endif
1619#ifndef p4d_leaf
1620#define p4d_leaf(x) 0
1621#endif
1622#ifndef pud_leaf
1623#define pud_leaf(x) 0
1624#endif
1625#ifndef pmd_leaf
1626#define pmd_leaf(x) 0
1627#endif
1628
560dabbd
PZ
1629#ifndef pgd_leaf_size
1630#define pgd_leaf_size(x) (1ULL << PGDIR_SHIFT)
1631#endif
1632#ifndef p4d_leaf_size
1633#define p4d_leaf_size(x) P4D_SIZE
1634#endif
1635#ifndef pud_leaf_size
1636#define pud_leaf_size(x) PUD_SIZE
1637#endif
1638#ifndef pmd_leaf_size
1639#define pmd_leaf_size(x) PMD_SIZE
1640#endif
1641#ifndef pte_leaf_size
1642#define pte_leaf_size(x) PAGE_SIZE
1643#endif
1644
c0f8aa4f
DA
1645/*
1646 * Some architectures have MMUs that are configurable or selectable at boot
1647 * time. These lead to variable PTRS_PER_x. For statically allocated arrays it
1648 * helps to have a static maximum value.
1649 */
1650
1651#ifndef MAX_PTRS_PER_PTE
1652#define MAX_PTRS_PER_PTE PTRS_PER_PTE
1653#endif
1654
1655#ifndef MAX_PTRS_PER_PMD
1656#define MAX_PTRS_PER_PMD PTRS_PER_PMD
1657#endif
1658
1659#ifndef MAX_PTRS_PER_PUD
1660#define MAX_PTRS_PER_PUD PTRS_PER_PUD
1661#endif
1662
1663#ifndef MAX_PTRS_PER_P4D
1664#define MAX_PTRS_PER_P4D PTRS_PER_P4D
1665#endif
1666
43957b5d
AK
1667/* description of effects of mapping type and prot in current implementation.
1668 * this is due to the limited x86 page protection hardware. The expected
1669 * behavior is in parens:
1670 *
1671 * map_type prot
1672 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
1673 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
1674 * w: (no) no w: (no) no w: (yes) yes w: (no) no
1675 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
1676 *
1677 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
1678 * w: (no) no w: (no) no w: (copy) copy w: (no) no
1679 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
1680 *
1681 * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
1682 * MAP_PRIVATE (with Enhanced PAN supported):
1683 * r: (no) no
1684 * w: (no) no
1685 * x: (yes) yes
1686 */
1687#define DECLARE_VM_GET_PAGE_PROT \
1688pgprot_t vm_get_page_prot(unsigned long vm_flags) \
1689{ \
1690 return protection_map[vm_flags & \
1691 (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \
1692} \
1693EXPORT_SYMBOL(vm_get_page_prot);
1694
ca5999fd 1695#endif /* _LINUX_PGTABLE_H */