Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
ace71a19 KS |
2 | #include <linux/mm.h> |
3 | #include <linux/rmap.h> | |
4 | #include <linux/hugetlb.h> | |
5 | #include <linux/swap.h> | |
6 | #include <linux/swapops.h> | |
7 | ||
8 | #include "internal.h" | |
9 | ||
ace71a19 KS |
10 | static inline bool not_found(struct page_vma_mapped_walk *pvmw) |
11 | { | |
12 | page_vma_mapped_walk_done(pvmw); | |
13 | return false; | |
14 | } | |
15 | ||
16 | static bool map_pte(struct page_vma_mapped_walk *pvmw) | |
17 | { | |
18 | pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); | |
19 | if (!(pvmw->flags & PVMW_SYNC)) { | |
20 | if (pvmw->flags & PVMW_MIGRATION) { | |
21 | if (!is_swap_pte(*pvmw->pte)) | |
22 | return false; | |
23 | } else { | |
aab8d052 RC |
24 | /* |
25 | * We get here when we are trying to unmap a private | |
26 | * device page from the process address space. Such | |
27 | * page is not CPU accessible and thus is mapped as | |
28 | * a special swap entry, nonetheless it still does | |
29 | * count as a valid regular mapping for the page (and | |
30 | * is accounted as such in page maps count). | |
31 | * | |
32 | * So handle this special case as if it was a normal | |
33 | * page mapping ie lock CPU page table and returns | |
34 | * true. | |
35 | * | |
36 | * For more details on device private memory see HMM | |
37 | * (include/linux/hmm.h or mm/hmm.c). | |
38 | */ | |
39 | if (is_swap_pte(*pvmw->pte)) { | |
40 | swp_entry_t entry; | |
41 | ||
42 | /* Handle un-addressable ZONE_DEVICE memory */ | |
43 | entry = pte_to_swp_entry(*pvmw->pte); | |
44 | if (!is_device_private_entry(entry)) | |
45 | return false; | |
46 | } else if (!pte_present(*pvmw->pte)) | |
ace71a19 KS |
47 | return false; |
48 | } | |
49 | } | |
50 | pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); | |
51 | spin_lock(pvmw->ptl); | |
52 | return true; | |
53 | } | |
54 | ||
5b8d6e37 | 55 | static inline bool pfn_is_match(struct page *page, unsigned long pfn) |
7222708e | 56 | { |
5b8d6e37 LX |
57 | unsigned long page_pfn = page_to_pfn(page); |
58 | ||
59 | /* normal page and hugetlbfs page */ | |
60 | if (!PageTransCompound(page) || PageHuge(page)) | |
61 | return page_pfn == pfn; | |
7222708e KS |
62 | |
63 | /* THP can be referenced by any subpage */ | |
6c357848 | 64 | return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page); |
7222708e KS |
65 | } |
66 | ||
0d665e7b KS |
67 | /** |
68 | * check_pte - check if @pvmw->page is mapped at the @pvmw->pte | |
777f303c | 69 | * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking |
0d665e7b KS |
70 | * |
71 | * page_vma_mapped_walk() found a place where @pvmw->page is *potentially* | |
72 | * mapped. check_pte() has to validate this. | |
73 | * | |
777f303c AS |
74 | * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to |
75 | * arbitrary page. | |
0d665e7b KS |
76 | * |
77 | * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration | |
78 | * entry that points to @pvmw->page or any subpage in case of THP. | |
79 | * | |
777f303c AS |
80 | * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to |
81 | * pvmw->page or any subpage in case of THP. | |
0d665e7b KS |
82 | * |
83 | * Otherwise, return false. | |
84 | * | |
85 | */ | |
ace71a19 KS |
86 | static bool check_pte(struct page_vma_mapped_walk *pvmw) |
87 | { | |
0d665e7b KS |
88 | unsigned long pfn; |
89 | ||
ace71a19 | 90 | if (pvmw->flags & PVMW_MIGRATION) { |
ace71a19 KS |
91 | swp_entry_t entry; |
92 | if (!is_swap_pte(*pvmw->pte)) | |
93 | return false; | |
94 | entry = pte_to_swp_entry(*pvmw->pte); | |
a5430dda | 95 | |
ace71a19 KS |
96 | if (!is_migration_entry(entry)) |
97 | return false; | |
a5430dda | 98 | |
0d665e7b KS |
99 | pfn = migration_entry_to_pfn(entry); |
100 | } else if (is_swap_pte(*pvmw->pte)) { | |
101 | swp_entry_t entry; | |
a5430dda | 102 | |
0d665e7b KS |
103 | /* Handle un-addressable ZONE_DEVICE memory */ |
104 | entry = pte_to_swp_entry(*pvmw->pte); | |
105 | if (!is_device_private_entry(entry)) | |
ace71a19 KS |
106 | return false; |
107 | ||
0d665e7b KS |
108 | pfn = device_private_entry_to_pfn(entry); |
109 | } else { | |
110 | if (!pte_present(*pvmw->pte)) | |
ace71a19 | 111 | return false; |
0d665e7b KS |
112 | |
113 | pfn = pte_pfn(*pvmw->pte); | |
ace71a19 KS |
114 | } |
115 | ||
5b8d6e37 | 116 | return pfn_is_match(pvmw->page, pfn); |
ace71a19 KS |
117 | } |
118 | ||
a9a7504d HD |
119 | static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size) |
120 | { | |
121 | pvmw->address = (pvmw->address + size) & ~(size - 1); | |
122 | if (!pvmw->address) | |
123 | pvmw->address = ULONG_MAX; | |
124 | } | |
125 | ||
ace71a19 KS |
126 | /** |
127 | * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at | |
128 | * @pvmw->address | |
129 | * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags | |
130 | * must be set. pmd, pte and ptl must be NULL. | |
131 | * | |
132 | * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point | |
133 | * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is | |
134 | * adjusted if needed (for PTE-mapped THPs). | |
135 | * | |
136 | * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page | |
137 | * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in | |
138 | * a loop to find all PTEs that map the THP. | |
139 | * | |
140 | * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry | |
141 | * regardless of which page table level the page is mapped at. @pvmw->pmd is | |
142 | * NULL. | |
143 | * | |
baf2f90b | 144 | * Returns false if there are no more page table entries for the page in |
ace71a19 KS |
145 | * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped. |
146 | * | |
147 | * If you need to stop the walk before page_vma_mapped_walk() returned false, | |
148 | * use page_vma_mapped_walk_done(). It will do the housekeeping. | |
149 | */ | |
150 | bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) | |
151 | { | |
152 | struct mm_struct *mm = pvmw->vma->vm_mm; | |
153 | struct page *page = pvmw->page; | |
47446630 | 154 | unsigned long end; |
ace71a19 | 155 | pgd_t *pgd; |
c2febafc | 156 | p4d_t *p4d; |
ace71a19 | 157 | pud_t *pud; |
a7b10095 | 158 | pmd_t pmde; |
ace71a19 KS |
159 | |
160 | /* The only possible pmd mapping has been handled on last iteration */ | |
161 | if (pvmw->pmd && !pvmw->pte) | |
162 | return not_found(pvmw); | |
163 | ||
f003c03b | 164 | if (unlikely(PageHuge(page))) { |
6d0fd598 HD |
165 | /* The only possible mapping was handled on last iteration */ |
166 | if (pvmw->pte) | |
167 | return not_found(pvmw); | |
168 | ||
ace71a19 | 169 | /* when pud is not present, pte will be NULL */ |
a50b854e | 170 | pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page)); |
ace71a19 KS |
171 | if (!pvmw->pte) |
172 | return false; | |
173 | ||
174 | pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte); | |
175 | spin_lock(pvmw->ptl); | |
176 | if (!check_pte(pvmw)) | |
177 | return not_found(pvmw); | |
178 | return true; | |
179 | } | |
6d0fd598 | 180 | |
a765c417 HD |
181 | /* |
182 | * Seek to next pte only makes sense for THP. | |
183 | * But more important than that optimization, is to filter out | |
184 | * any PageKsm page: whose page->index misleads vma_address() | |
185 | * and vma_address_end() to disaster. | |
186 | */ | |
187 | end = PageTransCompound(page) ? | |
188 | vma_address_end(page, pvmw->vma) : | |
189 | pvmw->address + PAGE_SIZE; | |
6d0fd598 HD |
190 | if (pvmw->pte) |
191 | goto next_pte; | |
ace71a19 | 192 | restart: |
a9a7504d | 193 | do { |
b3807a91 | 194 | pgd = pgd_offset(mm, pvmw->address); |
a9a7504d HD |
195 | if (!pgd_present(*pgd)) { |
196 | step_forward(pvmw, PGDIR_SIZE); | |
197 | continue; | |
198 | } | |
b3807a91 | 199 | p4d = p4d_offset(pgd, pvmw->address); |
a9a7504d HD |
200 | if (!p4d_present(*p4d)) { |
201 | step_forward(pvmw, P4D_SIZE); | |
202 | continue; | |
203 | } | |
b3807a91 | 204 | pud = pud_offset(p4d, pvmw->address); |
a9a7504d HD |
205 | if (!pud_present(*pud)) { |
206 | step_forward(pvmw, PUD_SIZE); | |
207 | continue; | |
208 | } | |
e2e1d407 | 209 | |
b3807a91 | 210 | pvmw->pmd = pmd_offset(pud, pvmw->address); |
732ed558 | 211 | /* |
b3807a91 HD |
212 | * Make sure the pmd value isn't cached in a register by the |
213 | * compiler and used as a stale value after we've observed a | |
214 | * subsequent update. | |
732ed558 | 215 | */ |
b3807a91 HD |
216 | pmde = READ_ONCE(*pvmw->pmd); |
217 | ||
218 | if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) { | |
219 | pvmw->ptl = pmd_lock(mm, pvmw->pmd); | |
220 | pmde = *pvmw->pmd; | |
221 | if (likely(pmd_trans_huge(pmde))) { | |
222 | if (pvmw->flags & PVMW_MIGRATION) | |
223 | return not_found(pvmw); | |
224 | if (pmd_page(pmde) != page) | |
225 | return not_found(pvmw); | |
226 | return true; | |
227 | } | |
228 | if (!pmd_present(pmde)) { | |
229 | swp_entry_t entry; | |
230 | ||
231 | if (!thp_migration_supported() || | |
232 | !(pvmw->flags & PVMW_MIGRATION)) | |
233 | return not_found(pvmw); | |
234 | entry = pmd_to_swp_entry(pmde); | |
235 | if (!is_migration_entry(entry) || | |
236 | migration_entry_to_page(entry) != page) | |
237 | return not_found(pvmw); | |
238 | return true; | |
239 | } | |
240 | /* THP pmd was split under us: handle on pte level */ | |
241 | spin_unlock(pvmw->ptl); | |
242 | pvmw->ptl = NULL; | |
243 | } else if (!pmd_present(pmde)) { | |
244 | /* | |
245 | * If PVMW_SYNC, take and drop THP pmd lock so that we | |
246 | * cannot return prematurely, while zap_huge_pmd() has | |
247 | * cleared *pmd but not decremented compound_mapcount(). | |
248 | */ | |
249 | if ((pvmw->flags & PVMW_SYNC) && | |
250 | PageTransCompound(page)) { | |
251 | spinlock_t *ptl = pmd_lock(mm, pvmw->pmd); | |
732ed558 | 252 | |
b3807a91 HD |
253 | spin_unlock(ptl); |
254 | } | |
a9a7504d HD |
255 | step_forward(pvmw, PMD_SIZE); |
256 | continue; | |
732ed558 | 257 | } |
b3807a91 HD |
258 | if (!map_pte(pvmw)) |
259 | goto next_pte; | |
47446630 | 260 | this_pte: |
ace71a19 KS |
261 | if (check_pte(pvmw)) |
262 | return true; | |
d75450ff | 263 | next_pte: |
d75450ff | 264 | do { |
ace71a19 | 265 | pvmw->address += PAGE_SIZE; |
494334e4 | 266 | if (pvmw->address >= end) |
ace71a19 KS |
267 | return not_found(pvmw); |
268 | /* Did we cross page table boundary? */ | |
44828248 | 269 | if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) { |
ace71a19 KS |
270 | if (pvmw->ptl) { |
271 | spin_unlock(pvmw->ptl); | |
272 | pvmw->ptl = NULL; | |
273 | } | |
44828248 HD |
274 | pte_unmap(pvmw->pte); |
275 | pvmw->pte = NULL; | |
ace71a19 | 276 | goto restart; |
ace71a19 | 277 | } |
44828248 | 278 | pvmw->pte++; |
a7a69d8b HD |
279 | if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) { |
280 | pvmw->ptl = pte_lockptr(mm, pvmw->pmd); | |
281 | spin_lock(pvmw->ptl); | |
282 | } | |
ace71a19 KS |
283 | } while (pte_none(*pvmw->pte)); |
284 | ||
285 | if (!pvmw->ptl) { | |
286 | pvmw->ptl = pte_lockptr(mm, pvmw->pmd); | |
287 | spin_lock(pvmw->ptl); | |
288 | } | |
47446630 | 289 | goto this_pte; |
a9a7504d HD |
290 | } while (pvmw->address < end); |
291 | ||
292 | return false; | |
ace71a19 | 293 | } |
6a328a62 KS |
294 | |
295 | /** | |
296 | * page_mapped_in_vma - check whether a page is really mapped in a VMA | |
297 | * @page: the page to test | |
298 | * @vma: the VMA to test | |
299 | * | |
300 | * Returns 1 if the page is mapped into the page tables of the VMA, 0 | |
301 | * if the page is not mapped into the page tables of this VMA. Only | |
302 | * valid for normal file or anonymous VMAs. | |
303 | */ | |
304 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) | |
305 | { | |
306 | struct page_vma_mapped_walk pvmw = { | |
307 | .page = page, | |
308 | .vma = vma, | |
309 | .flags = PVMW_SYNC, | |
310 | }; | |
6a328a62 | 311 | |
494334e4 HD |
312 | pvmw.address = vma_address(page, vma); |
313 | if (pvmw.address == -EFAULT) | |
6a328a62 | 314 | return 0; |
6a328a62 KS |
315 | if (!page_vma_mapped_walk(&pvmw)) |
316 | return 0; | |
317 | page_vma_mapped_walk_done(&pvmw); | |
318 | return 1; | |
319 | } |