mm: page_vma_mapped_walk(): prettify PVMW_MIGRATION block
[linux-block.git] / mm / page_vma_mapped.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
ace71a19
KS
2#include <linux/mm.h>
3#include <linux/rmap.h>
4#include <linux/hugetlb.h>
5#include <linux/swap.h>
6#include <linux/swapops.h>
7
8#include "internal.h"
9
ace71a19
KS
10static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11{
12 page_vma_mapped_walk_done(pvmw);
13 return false;
14}
15
16static bool map_pte(struct page_vma_mapped_walk *pvmw)
17{
18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19 if (!(pvmw->flags & PVMW_SYNC)) {
20 if (pvmw->flags & PVMW_MIGRATION) {
21 if (!is_swap_pte(*pvmw->pte))
22 return false;
23 } else {
aab8d052
RC
24 /*
25 * We get here when we are trying to unmap a private
26 * device page from the process address space. Such
27 * page is not CPU accessible and thus is mapped as
28 * a special swap entry, nonetheless it still does
29 * count as a valid regular mapping for the page (and
30 * is accounted as such in page maps count).
31 *
32 * So handle this special case as if it was a normal
33 * page mapping ie lock CPU page table and returns
34 * true.
35 *
36 * For more details on device private memory see HMM
37 * (include/linux/hmm.h or mm/hmm.c).
38 */
39 if (is_swap_pte(*pvmw->pte)) {
40 swp_entry_t entry;
41
42 /* Handle un-addressable ZONE_DEVICE memory */
43 entry = pte_to_swp_entry(*pvmw->pte);
44 if (!is_device_private_entry(entry))
45 return false;
46 } else if (!pte_present(*pvmw->pte))
ace71a19
KS
47 return false;
48 }
49 }
50 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
51 spin_lock(pvmw->ptl);
52 return true;
53}
54
5b8d6e37 55static inline bool pfn_is_match(struct page *page, unsigned long pfn)
7222708e 56{
5b8d6e37
LX
57 unsigned long page_pfn = page_to_pfn(page);
58
59 /* normal page and hugetlbfs page */
60 if (!PageTransCompound(page) || PageHuge(page))
61 return page_pfn == pfn;
7222708e
KS
62
63 /* THP can be referenced by any subpage */
6c357848 64 return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page);
7222708e
KS
65}
66
0d665e7b
KS
67/**
68 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
777f303c 69 * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
0d665e7b
KS
70 *
71 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
72 * mapped. check_pte() has to validate this.
73 *
777f303c
AS
74 * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
75 * arbitrary page.
0d665e7b
KS
76 *
77 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
78 * entry that points to @pvmw->page or any subpage in case of THP.
79 *
777f303c
AS
80 * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
81 * pvmw->page or any subpage in case of THP.
0d665e7b
KS
82 *
83 * Otherwise, return false.
84 *
85 */
ace71a19
KS
86static bool check_pte(struct page_vma_mapped_walk *pvmw)
87{
0d665e7b
KS
88 unsigned long pfn;
89
ace71a19 90 if (pvmw->flags & PVMW_MIGRATION) {
ace71a19
KS
91 swp_entry_t entry;
92 if (!is_swap_pte(*pvmw->pte))
93 return false;
94 entry = pte_to_swp_entry(*pvmw->pte);
a5430dda 95
ace71a19
KS
96 if (!is_migration_entry(entry))
97 return false;
a5430dda 98
0d665e7b
KS
99 pfn = migration_entry_to_pfn(entry);
100 } else if (is_swap_pte(*pvmw->pte)) {
101 swp_entry_t entry;
a5430dda 102
0d665e7b
KS
103 /* Handle un-addressable ZONE_DEVICE memory */
104 entry = pte_to_swp_entry(*pvmw->pte);
105 if (!is_device_private_entry(entry))
ace71a19
KS
106 return false;
107
0d665e7b
KS
108 pfn = device_private_entry_to_pfn(entry);
109 } else {
110 if (!pte_present(*pvmw->pte))
ace71a19 111 return false;
0d665e7b
KS
112
113 pfn = pte_pfn(*pvmw->pte);
ace71a19
KS
114 }
115
5b8d6e37 116 return pfn_is_match(pvmw->page, pfn);
ace71a19
KS
117}
118
119/**
120 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
121 * @pvmw->address
122 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
123 * must be set. pmd, pte and ptl must be NULL.
124 *
125 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
126 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
127 * adjusted if needed (for PTE-mapped THPs).
128 *
129 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
130 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
131 * a loop to find all PTEs that map the THP.
132 *
133 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
134 * regardless of which page table level the page is mapped at. @pvmw->pmd is
135 * NULL.
136 *
baf2f90b 137 * Returns false if there are no more page table entries for the page in
ace71a19
KS
138 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
139 *
140 * If you need to stop the walk before page_vma_mapped_walk() returned false,
141 * use page_vma_mapped_walk_done(). It will do the housekeeping.
142 */
143bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
144{
145 struct mm_struct *mm = pvmw->vma->vm_mm;
146 struct page *page = pvmw->page;
147 pgd_t *pgd;
c2febafc 148 p4d_t *p4d;
ace71a19 149 pud_t *pud;
a7b10095 150 pmd_t pmde;
ace71a19
KS
151
152 /* The only possible pmd mapping has been handled on last iteration */
153 if (pvmw->pmd && !pvmw->pte)
154 return not_found(pvmw);
155
f003c03b 156 if (unlikely(PageHuge(page))) {
6d0fd598
HD
157 /* The only possible mapping was handled on last iteration */
158 if (pvmw->pte)
159 return not_found(pvmw);
160
ace71a19 161 /* when pud is not present, pte will be NULL */
a50b854e 162 pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
ace71a19
KS
163 if (!pvmw->pte)
164 return false;
165
166 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
167 spin_lock(pvmw->ptl);
168 if (!check_pte(pvmw))
169 return not_found(pvmw);
170 return true;
171 }
6d0fd598
HD
172
173 if (pvmw->pte)
174 goto next_pte;
ace71a19
KS
175restart:
176 pgd = pgd_offset(mm, pvmw->address);
177 if (!pgd_present(*pgd))
178 return false;
c2febafc
KS
179 p4d = p4d_offset(pgd, pvmw->address);
180 if (!p4d_present(*p4d))
181 return false;
182 pud = pud_offset(p4d, pvmw->address);
ace71a19
KS
183 if (!pud_present(*pud))
184 return false;
185 pvmw->pmd = pmd_offset(pud, pvmw->address);
a7b10095
WD
186 /*
187 * Make sure the pmd value isn't cached in a register by the
188 * compiler and used as a stale value after we've observed a
189 * subsequent update.
190 */
191 pmde = READ_ONCE(*pvmw->pmd);
192 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
ace71a19 193 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
3306d311
HD
194 pmde = *pvmw->pmd;
195 if (likely(pmd_trans_huge(pmde))) {
ace71a19
KS
196 if (pvmw->flags & PVMW_MIGRATION)
197 return not_found(pvmw);
3306d311 198 if (pmd_page(pmde) != page)
ace71a19
KS
199 return not_found(pvmw);
200 return true;
ace71a19 201 }
e2e1d407
HD
202 if (!pmd_present(pmde)) {
203 swp_entry_t entry;
204
205 if (!thp_migration_supported() ||
206 !(pvmw->flags & PVMW_MIGRATION))
207 return not_found(pvmw);
208 entry = pmd_to_swp_entry(pmde);
209 if (!is_migration_entry(entry) ||
210 migration_entry_to_page(entry) != page)
211 return not_found(pvmw);
212 return true;
213 }
214 /* THP pmd was split under us: handle on pte level */
215 spin_unlock(pvmw->ptl);
216 pvmw->ptl = NULL;
a7b10095 217 } else if (!pmd_present(pmde)) {
732ed558
HD
218 /*
219 * If PVMW_SYNC, take and drop THP pmd lock so that we
220 * cannot return prematurely, while zap_huge_pmd() has
221 * cleared *pmd but not decremented compound_mapcount().
222 */
f003c03b 223 if ((pvmw->flags & PVMW_SYNC) && PageTransCompound(page)) {
732ed558
HD
224 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
225
226 spin_unlock(ptl);
227 }
a7b10095 228 return false;
ace71a19
KS
229 }
230 if (!map_pte(pvmw))
231 goto next_pte;
232 while (1) {
494334e4
HD
233 unsigned long end;
234
ace71a19
KS
235 if (check_pte(pvmw))
236 return true;
d75450ff
HD
237next_pte:
238 /* Seek to next pte only makes sense for THP */
6d0fd598 239 if (!PageTransHuge(page))
d75450ff 240 return not_found(pvmw);
f003c03b 241 end = vma_address_end(page, pvmw->vma);
d75450ff 242 do {
ace71a19 243 pvmw->address += PAGE_SIZE;
494334e4 244 if (pvmw->address >= end)
ace71a19
KS
245 return not_found(pvmw);
246 /* Did we cross page table boundary? */
247 if (pvmw->address % PMD_SIZE == 0) {
248 pte_unmap(pvmw->pte);
249 if (pvmw->ptl) {
250 spin_unlock(pvmw->ptl);
251 pvmw->ptl = NULL;
252 }
253 goto restart;
254 } else {
255 pvmw->pte++;
256 }
257 } while (pte_none(*pvmw->pte));
258
259 if (!pvmw->ptl) {
260 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
261 spin_lock(pvmw->ptl);
262 }
263 }
264}
6a328a62
KS
265
266/**
267 * page_mapped_in_vma - check whether a page is really mapped in a VMA
268 * @page: the page to test
269 * @vma: the VMA to test
270 *
271 * Returns 1 if the page is mapped into the page tables of the VMA, 0
272 * if the page is not mapped into the page tables of this VMA. Only
273 * valid for normal file or anonymous VMAs.
274 */
275int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
276{
277 struct page_vma_mapped_walk pvmw = {
278 .page = page,
279 .vma = vma,
280 .flags = PVMW_SYNC,
281 };
6a328a62 282
494334e4
HD
283 pvmw.address = vma_address(page, vma);
284 if (pvmw.address == -EFAULT)
6a328a62 285 return 0;
6a328a62
KS
286 if (!page_vma_mapped_walk(&pvmw))
287 return 0;
288 page_vma_mapped_walk_done(&pvmw);
289 return 1;
290}