Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
e2cda322 AA |
2 | /* |
3 | * mm/pgtable-generic.c | |
4 | * | |
ca5999fd | 5 | * Generic pgtable methods declared in linux/pgtable.h |
e2cda322 AA |
6 | * |
7 | * Copyright (C) 2010 Linus Torvalds | |
8 | */ | |
9 | ||
f95ba941 | 10 | #include <linux/pagemap.h> |
a31acd3e | 11 | #include <linux/hugetlb.h> |
ca5999fd | 12 | #include <linux/pgtable.h> |
36090def | 13 | #include <linux/mm_inline.h> |
e2cda322 | 14 | #include <asm/tlb.h> |
e2cda322 | 15 | |
bc4b4448 JK |
16 | /* |
17 | * If a p?d_bad entry is found while walking page tables, report | |
18 | * the error, before resetting entry to p?d_none. Usually (but | |
19 | * very seldom) called out from the p?d_none_or_clear_bad macros. | |
20 | */ | |
21 | ||
22 | void pgd_clear_bad(pgd_t *pgd) | |
23 | { | |
24 | pgd_ERROR(*pgd); | |
25 | pgd_clear(pgd); | |
26 | } | |
27 | ||
f2400abc | 28 | #ifndef __PAGETABLE_P4D_FOLDED |
c2febafc KS |
29 | void p4d_clear_bad(p4d_t *p4d) |
30 | { | |
31 | p4d_ERROR(*p4d); | |
32 | p4d_clear(p4d); | |
33 | } | |
f2400abc | 34 | #endif |
c2febafc | 35 | |
f2400abc | 36 | #ifndef __PAGETABLE_PUD_FOLDED |
bc4b4448 JK |
37 | void pud_clear_bad(pud_t *pud) |
38 | { | |
39 | pud_ERROR(*pud); | |
40 | pud_clear(pud); | |
41 | } | |
f2400abc | 42 | #endif |
bc4b4448 | 43 | |
f2400abc VG |
44 | /* |
45 | * Note that the pmd variant below can't be stub'ed out just as for p4d/pud | |
46 | * above. pmd folding is special and typically pmd_* macros refer to upper | |
47 | * level even when folded | |
48 | */ | |
bc4b4448 JK |
49 | void pmd_clear_bad(pmd_t *pmd) |
50 | { | |
51 | pmd_ERROR(*pmd); | |
52 | pmd_clear(pmd); | |
53 | } | |
54 | ||
e2cda322 AA |
55 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
56 | /* | |
ca5999fd | 57 | * Only sets the access flags (dirty, accessed), as well as write |
cef23d9d | 58 | * permission. Furthermore, we know it always gets set to a "more |
e2cda322 AA |
59 | * permissive" setting, which allows most architectures to optimize |
60 | * this. We return whether the PTE actually changed, which in turn | |
61 | * instructs the caller to do things like update__mmu_cache. This | |
62 | * used to be done in the caller, but sparc needs minor faults to | |
63 | * force that call on sun4c so we changed this macro slightly | |
64 | */ | |
65 | int ptep_set_access_flags(struct vm_area_struct *vma, | |
66 | unsigned long address, pte_t *ptep, | |
67 | pte_t entry, int dirty) | |
68 | { | |
69 | int changed = !pte_same(*ptep, entry); | |
70 | if (changed) { | |
71 | set_pte_at(vma->vm_mm, address, ptep, entry); | |
cef23d9d | 72 | flush_tlb_fix_spurious_fault(vma, address); |
e2cda322 AA |
73 | } |
74 | return changed; | |
75 | } | |
76 | #endif | |
77 | ||
52585bcc VG |
78 | #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
79 | int ptep_clear_flush_young(struct vm_area_struct *vma, | |
80 | unsigned long address, pte_t *ptep) | |
81 | { | |
82 | int young; | |
83 | young = ptep_test_and_clear_young(vma, address, ptep); | |
84 | if (young) | |
85 | flush_tlb_page(vma, address); | |
86 | return young; | |
87 | } | |
88 | #endif | |
89 | ||
90 | #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH | |
91 | pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, | |
92 | pte_t *ptep) | |
93 | { | |
94 | struct mm_struct *mm = (vma)->vm_mm; | |
95 | pte_t pte; | |
96 | pte = ptep_get_and_clear(mm, address, ptep); | |
97 | if (pte_accessible(mm, pte)) | |
98 | flush_tlb_page(vma, address); | |
99 | return pte; | |
100 | } | |
101 | #endif | |
102 | ||
bd5e88ad VG |
103 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
104 | ||
e2cda322 AA |
105 | #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS |
106 | int pmdp_set_access_flags(struct vm_area_struct *vma, | |
107 | unsigned long address, pmd_t *pmdp, | |
108 | pmd_t entry, int dirty) | |
109 | { | |
e2cda322 AA |
110 | int changed = !pmd_same(*pmdp, entry); |
111 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
112 | if (changed) { | |
113 | set_pmd_at(vma->vm_mm, address, pmdp, entry); | |
12ebc158 | 114 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
e2cda322 AA |
115 | } |
116 | return changed; | |
e2cda322 AA |
117 | } |
118 | #endif | |
119 | ||
e2cda322 AA |
120 | #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH |
121 | int pmdp_clear_flush_young(struct vm_area_struct *vma, | |
122 | unsigned long address, pmd_t *pmdp) | |
123 | { | |
124 | int young; | |
d8c37c48 | 125 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
e2cda322 AA |
126 | young = pmdp_test_and_clear_young(vma, address, pmdp); |
127 | if (young) | |
12ebc158 | 128 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
e2cda322 AA |
129 | return young; |
130 | } | |
131 | #endif | |
132 | ||
8809aa2d | 133 | #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH |
8809aa2d AK |
134 | pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, |
135 | pmd_t *pmdp) | |
e2cda322 AA |
136 | { |
137 | pmd_t pmd; | |
e2cda322 | 138 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
99fa8a48 HD |
139 | VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) && |
140 | !pmd_devmap(*pmdp)); | |
8809aa2d | 141 | pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); |
12ebc158 | 142 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
e2cda322 AA |
143 | return pmd; |
144 | } | |
a00cc7d9 MW |
145 | |
146 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | |
147 | pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, | |
148 | pud_t *pudp) | |
149 | { | |
150 | pud_t pud; | |
151 | ||
152 | VM_BUG_ON(address & ~HPAGE_PUD_MASK); | |
153 | VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp)); | |
154 | pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); | |
155 | flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE); | |
156 | return pud; | |
157 | } | |
158 | #endif | |
e2cda322 AA |
159 | #endif |
160 | ||
e3ebcf64 | 161 | #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT |
6b0b50b0 AK |
162 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
163 | pgtable_t pgtable) | |
e3ebcf64 | 164 | { |
c4088ebd | 165 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
e3ebcf64 GS |
166 | |
167 | /* FIFO */ | |
c389a250 | 168 | if (!pmd_huge_pte(mm, pmdp)) |
e3ebcf64 GS |
169 | INIT_LIST_HEAD(&pgtable->lru); |
170 | else | |
c389a250 KS |
171 | list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); |
172 | pmd_huge_pte(mm, pmdp) = pgtable; | |
e3ebcf64 | 173 | } |
e3ebcf64 GS |
174 | #endif |
175 | ||
176 | #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW | |
e3ebcf64 | 177 | /* no "address" argument so destroys page coloring of some arch */ |
6b0b50b0 | 178 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) |
e3ebcf64 GS |
179 | { |
180 | pgtable_t pgtable; | |
181 | ||
c4088ebd | 182 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
e3ebcf64 GS |
183 | |
184 | /* FIFO */ | |
c389a250 | 185 | pgtable = pmd_huge_pte(mm, pmdp); |
14669347 GT |
186 | pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru, |
187 | struct page, lru); | |
188 | if (pmd_huge_pte(mm, pmdp)) | |
e3ebcf64 | 189 | list_del(&pgtable->lru); |
e3ebcf64 GS |
190 | return pgtable; |
191 | } | |
e3ebcf64 | 192 | #endif |
46dcde73 GS |
193 | |
194 | #ifndef __HAVE_ARCH_PMDP_INVALIDATE | |
d52605d7 | 195 | pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
46dcde73 GS |
196 | pmd_t *pmdp) |
197 | { | |
86ec2da0 | 198 | pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp)); |
12ebc158 | 199 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
d52605d7 | 200 | return old; |
46dcde73 | 201 | } |
46dcde73 | 202 | #endif |
f28b6ff8 | 203 | |
4f831457 NA |
204 | #ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD |
205 | pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address, | |
206 | pmd_t *pmdp) | |
207 | { | |
208 | return pmdp_invalidate(vma, address, pmdp); | |
209 | } | |
210 | #endif | |
211 | ||
f28b6ff8 | 212 | #ifndef pmdp_collapse_flush |
f28b6ff8 AK |
213 | pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, |
214 | pmd_t *pmdp) | |
215 | { | |
8809aa2d AK |
216 | /* |
217 | * pmd and hugepage pte format are same. So we could | |
218 | * use the same function. | |
219 | */ | |
f28b6ff8 AK |
220 | pmd_t pmd; |
221 | ||
222 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
223 | VM_BUG_ON(pmd_trans_huge(*pmdp)); | |
8809aa2d | 224 | pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); |
6a6ac72f VG |
225 | |
226 | /* collapse entails shooting down ptes not pmd */ | |
227 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | |
f28b6ff8 AK |
228 | return pmd; |
229 | } | |
f28b6ff8 | 230 | #endif |
bd5e88ad | 231 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |