Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
ace71a19 KS |
2 | #include <linux/mm.h> |
3 | #include <linux/rmap.h> | |
4 | #include <linux/hugetlb.h> | |
5 | #include <linux/swap.h> | |
6 | #include <linux/swapops.h> | |
7 | ||
8 | #include "internal.h" | |
9 | ||
ace71a19 KS |
10 | static inline bool not_found(struct page_vma_mapped_walk *pvmw) |
11 | { | |
12 | page_vma_mapped_walk_done(pvmw); | |
13 | return false; | |
14 | } | |
15 | ||
2798bbe7 | 16 | static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) |
ace71a19 | 17 | { |
c33c7948 RR |
18 | pte_t ptent; |
19 | ||
90f43b0a HD |
20 | if (pvmw->flags & PVMW_SYNC) { |
21 | /* Use the stricter lookup */ | |
22 | pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd, | |
23 | pvmw->address, &pvmw->ptl); | |
2798bbe7 HD |
24 | *ptlp = pvmw->ptl; |
25 | return !!pvmw->pte; | |
90f43b0a | 26 | } |
aab8d052 | 27 | |
2798bbe7 HD |
28 | /* |
29 | * It is important to return the ptl corresponding to pte, | |
30 | * in case *pvmw->pmd changes underneath us; so we need to | |
31 | * return it even when choosing not to lock, in case caller | |
32 | * proceeds to loop over next ptes, and finds a match later. | |
33 | * Though, in most cases, page lock already protects this. | |
34 | */ | |
35 | pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd, | |
36 | pvmw->address, ptlp); | |
37 | if (!pvmw->pte) | |
38 | return false; | |
39 | ||
c33c7948 RR |
40 | ptent = ptep_get(pvmw->pte); |
41 | ||
90f43b0a | 42 | if (pvmw->flags & PVMW_MIGRATION) { |
c33c7948 | 43 | if (!is_swap_pte(ptent)) |
90f43b0a | 44 | return false; |
c33c7948 | 45 | } else if (is_swap_pte(ptent)) { |
90f43b0a HD |
46 | swp_entry_t entry; |
47 | /* | |
48 | * Handle un-addressable ZONE_DEVICE memory. | |
49 | * | |
50 | * We get here when we are trying to unmap a private | |
51 | * device page from the process address space. Such | |
52 | * page is not CPU accessible and thus is mapped as | |
53 | * a special swap entry, nonetheless it still does | |
54 | * count as a valid regular mapping for the page | |
55 | * (and is accounted as such in page maps count). | |
56 | * | |
57 | * So handle this special case as if it was a normal | |
58 | * page mapping ie lock CPU page table and return true. | |
59 | * | |
60 | * For more details on device private memory see HMM | |
61 | * (include/linux/hmm.h or mm/hmm.c). | |
62 | */ | |
c33c7948 | 63 | entry = pte_to_swp_entry(ptent); |
90f43b0a HD |
64 | if (!is_device_private_entry(entry) && |
65 | !is_device_exclusive_entry(entry)) | |
66 | return false; | |
c33c7948 | 67 | } else if (!pte_present(ptent)) { |
90f43b0a | 68 | return false; |
ace71a19 | 69 | } |
2798bbe7 | 70 | pvmw->ptl = *ptlp; |
ace71a19 KS |
71 | spin_lock(pvmw->ptl); |
72 | return true; | |
73 | } | |
74 | ||
0d665e7b | 75 | /** |
9651eeab KS |
76 | * check_pte - check if [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) is |
77 | * mapped at the @pvmw->pte | |
78 | * @pvmw: page_vma_mapped_walk struct, includes a pair pte and pfn range | |
79 | * for checking | |
0d665e7b | 80 | * |
9651eeab | 81 | * page_vma_mapped_walk() found a place where pfn range is *potentially* |
0d665e7b KS |
82 | * mapped. check_pte() has to validate this. |
83 | * | |
777f303c AS |
84 | * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to |
85 | * arbitrary page. | |
0d665e7b KS |
86 | * |
87 | * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration | |
9651eeab | 88 | * entry that points to [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) |
0d665e7b | 89 | * |
777f303c | 90 | * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to |
9651eeab | 91 | * [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) |
0d665e7b KS |
92 | * |
93 | * Otherwise, return false. | |
94 | * | |
95 | */ | |
ace71a19 KS |
96 | static bool check_pte(struct page_vma_mapped_walk *pvmw) |
97 | { | |
0d665e7b | 98 | unsigned long pfn; |
c33c7948 | 99 | pte_t ptent = ptep_get(pvmw->pte); |
0d665e7b | 100 | |
ace71a19 | 101 | if (pvmw->flags & PVMW_MIGRATION) { |
ace71a19 | 102 | swp_entry_t entry; |
c33c7948 | 103 | if (!is_swap_pte(ptent)) |
ace71a19 | 104 | return false; |
c33c7948 | 105 | entry = pte_to_swp_entry(ptent); |
a5430dda | 106 | |
b756a3b5 AP |
107 | if (!is_migration_entry(entry) && |
108 | !is_device_exclusive_entry(entry)) | |
ace71a19 | 109 | return false; |
a5430dda | 110 | |
0d206b5d | 111 | pfn = swp_offset_pfn(entry); |
c33c7948 | 112 | } else if (is_swap_pte(ptent)) { |
0d665e7b | 113 | swp_entry_t entry; |
a5430dda | 114 | |
0d665e7b | 115 | /* Handle un-addressable ZONE_DEVICE memory */ |
c33c7948 | 116 | entry = pte_to_swp_entry(ptent); |
b756a3b5 AP |
117 | if (!is_device_private_entry(entry) && |
118 | !is_device_exclusive_entry(entry)) | |
ace71a19 KS |
119 | return false; |
120 | ||
0d206b5d | 121 | pfn = swp_offset_pfn(entry); |
0d665e7b | 122 | } else { |
c33c7948 | 123 | if (!pte_present(ptent)) |
ace71a19 | 124 | return false; |
0d665e7b | 125 | |
c33c7948 | 126 | pfn = pte_pfn(ptent); |
ace71a19 KS |
127 | } |
128 | ||
2aff7a47 MWO |
129 | return (pfn - pvmw->pfn) < pvmw->nr_pages; |
130 | } | |
131 | ||
132 | /* Returns true if the two ranges overlap. Careful to not overflow. */ | |
133 | static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw) | |
134 | { | |
135 | if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn) | |
136 | return false; | |
137 | if (pfn > pvmw->pfn + pvmw->nr_pages - 1) | |
138 | return false; | |
139 | return true; | |
ace71a19 KS |
140 | } |
141 | ||
a9a7504d HD |
142 | static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size) |
143 | { | |
144 | pvmw->address = (pvmw->address + size) & ~(size - 1); | |
145 | if (!pvmw->address) | |
146 | pvmw->address = ULONG_MAX; | |
147 | } | |
148 | ||
ace71a19 | 149 | /** |
2aff7a47 | 150 | * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at |
ace71a19 KS |
151 | * @pvmw->address |
152 | * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags | |
153 | * must be set. pmd, pte and ptl must be NULL. | |
154 | * | |
155 | * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point | |
156 | * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is | |
157 | * adjusted if needed (for PTE-mapped THPs). | |
158 | * | |
159 | * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page | |
160 | * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in | |
161 | * a loop to find all PTEs that map the THP. | |
162 | * | |
163 | * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry | |
164 | * regardless of which page table level the page is mapped at. @pvmw->pmd is | |
165 | * NULL. | |
166 | * | |
baf2f90b | 167 | * Returns false if there are no more page table entries for the page in |
ace71a19 KS |
168 | * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped. |
169 | * | |
170 | * If you need to stop the walk before page_vma_mapped_walk() returned false, | |
171 | * use page_vma_mapped_walk_done(). It will do the housekeeping. | |
172 | */ | |
173 | bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) | |
174 | { | |
2aff7a47 MWO |
175 | struct vm_area_struct *vma = pvmw->vma; |
176 | struct mm_struct *mm = vma->vm_mm; | |
47446630 | 177 | unsigned long end; |
2798bbe7 | 178 | spinlock_t *ptl; |
ace71a19 | 179 | pgd_t *pgd; |
c2febafc | 180 | p4d_t *p4d; |
ace71a19 | 181 | pud_t *pud; |
a7b10095 | 182 | pmd_t pmde; |
ace71a19 KS |
183 | |
184 | /* The only possible pmd mapping has been handled on last iteration */ | |
185 | if (pvmw->pmd && !pvmw->pte) | |
186 | return not_found(pvmw); | |
187 | ||
2aff7a47 | 188 | if (unlikely(is_vm_hugetlb_page(vma))) { |
98ea0259 | 189 | struct hstate *hstate = hstate_vma(vma); |
190 | unsigned long size = huge_page_size(hstate); | |
6d0fd598 HD |
191 | /* The only possible mapping was handled on last iteration */ |
192 | if (pvmw->pte) | |
193 | return not_found(pvmw); | |
9c67a207 PX |
194 | /* |
195 | * All callers that get here will already hold the | |
196 | * i_mmap_rwsem. Therefore, no additional locks need to be | |
197 | * taken before calling hugetlb_walk(). | |
198 | */ | |
199 | pvmw->pte = hugetlb_walk(vma, pvmw->address, size); | |
ace71a19 KS |
200 | if (!pvmw->pte) |
201 | return false; | |
202 | ||
8f0b747d | 203 | pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte); |
ace71a19 KS |
204 | if (!check_pte(pvmw)) |
205 | return not_found(pvmw); | |
206 | return true; | |
207 | } | |
6d0fd598 | 208 | |
2aff7a47 | 209 | end = vma_address_end(pvmw); |
6d0fd598 HD |
210 | if (pvmw->pte) |
211 | goto next_pte; | |
ace71a19 | 212 | restart: |
a9a7504d | 213 | do { |
b3807a91 | 214 | pgd = pgd_offset(mm, pvmw->address); |
a9a7504d HD |
215 | if (!pgd_present(*pgd)) { |
216 | step_forward(pvmw, PGDIR_SIZE); | |
217 | continue; | |
218 | } | |
b3807a91 | 219 | p4d = p4d_offset(pgd, pvmw->address); |
a9a7504d HD |
220 | if (!p4d_present(*p4d)) { |
221 | step_forward(pvmw, P4D_SIZE); | |
222 | continue; | |
223 | } | |
b3807a91 | 224 | pud = pud_offset(p4d, pvmw->address); |
a9a7504d HD |
225 | if (!pud_present(*pud)) { |
226 | step_forward(pvmw, PUD_SIZE); | |
227 | continue; | |
228 | } | |
e2e1d407 | 229 | |
b3807a91 | 230 | pvmw->pmd = pmd_offset(pud, pvmw->address); |
732ed558 | 231 | /* |
b3807a91 HD |
232 | * Make sure the pmd value isn't cached in a register by the |
233 | * compiler and used as a stale value after we've observed a | |
234 | * subsequent update. | |
732ed558 | 235 | */ |
26e1a0c3 | 236 | pmde = pmdp_get_lockless(pvmw->pmd); |
b3807a91 | 237 | |
6472f6d2 MS |
238 | if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) || |
239 | (pmd_present(pmde) && pmd_devmap(pmde))) { | |
b3807a91 HD |
240 | pvmw->ptl = pmd_lock(mm, pvmw->pmd); |
241 | pmde = *pvmw->pmd; | |
b3807a91 HD |
242 | if (!pmd_present(pmde)) { |
243 | swp_entry_t entry; | |
244 | ||
245 | if (!thp_migration_supported() || | |
246 | !(pvmw->flags & PVMW_MIGRATION)) | |
247 | return not_found(pvmw); | |
248 | entry = pmd_to_swp_entry(pmde); | |
249 | if (!is_migration_entry(entry) || | |
0d206b5d | 250 | !check_pmd(swp_offset_pfn(entry), pvmw)) |
b3807a91 HD |
251 | return not_found(pvmw); |
252 | return true; | |
253 | } | |
6472f6d2 MS |
254 | if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) { |
255 | if (pvmw->flags & PVMW_MIGRATION) | |
256 | return not_found(pvmw); | |
257 | if (!check_pmd(pmd_pfn(pmde), pvmw)) | |
258 | return not_found(pvmw); | |
259 | return true; | |
260 | } | |
b3807a91 HD |
261 | /* THP pmd was split under us: handle on pte level */ |
262 | spin_unlock(pvmw->ptl); | |
263 | pvmw->ptl = NULL; | |
264 | } else if (!pmd_present(pmde)) { | |
265 | /* | |
266 | * If PVMW_SYNC, take and drop THP pmd lock so that we | |
267 | * cannot return prematurely, while zap_huge_pmd() has | |
268 | * cleared *pmd but not decremented compound_mapcount(). | |
269 | */ | |
270 | if ((pvmw->flags & PVMW_SYNC) && | |
3485b883 RR |
271 | thp_vma_suitable_order(vma, pvmw->address, |
272 | PMD_ORDER) && | |
2aff7a47 | 273 | (pvmw->nr_pages >= HPAGE_PMD_NR)) { |
b3807a91 | 274 | spinlock_t *ptl = pmd_lock(mm, pvmw->pmd); |
732ed558 | 275 | |
b3807a91 HD |
276 | spin_unlock(ptl); |
277 | } | |
a9a7504d HD |
278 | step_forward(pvmw, PMD_SIZE); |
279 | continue; | |
732ed558 | 280 | } |
2798bbe7 HD |
281 | if (!map_pte(pvmw, &ptl)) { |
282 | if (!pvmw->pte) | |
283 | goto restart; | |
b3807a91 | 284 | goto next_pte; |
2798bbe7 | 285 | } |
47446630 | 286 | this_pte: |
ace71a19 KS |
287 | if (check_pte(pvmw)) |
288 | return true; | |
d75450ff | 289 | next_pte: |
d75450ff | 290 | do { |
ace71a19 | 291 | pvmw->address += PAGE_SIZE; |
494334e4 | 292 | if (pvmw->address >= end) |
ace71a19 KS |
293 | return not_found(pvmw); |
294 | /* Did we cross page table boundary? */ | |
44828248 | 295 | if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) { |
ace71a19 KS |
296 | if (pvmw->ptl) { |
297 | spin_unlock(pvmw->ptl); | |
298 | pvmw->ptl = NULL; | |
299 | } | |
44828248 HD |
300 | pte_unmap(pvmw->pte); |
301 | pvmw->pte = NULL; | |
ace71a19 | 302 | goto restart; |
ace71a19 | 303 | } |
44828248 | 304 | pvmw->pte++; |
c33c7948 | 305 | } while (pte_none(ptep_get(pvmw->pte))); |
ace71a19 KS |
306 | |
307 | if (!pvmw->ptl) { | |
2798bbe7 | 308 | pvmw->ptl = ptl; |
ace71a19 KS |
309 | spin_lock(pvmw->ptl); |
310 | } | |
47446630 | 311 | goto this_pte; |
a9a7504d HD |
312 | } while (pvmw->address < end); |
313 | ||
314 | return false; | |
ace71a19 | 315 | } |
6a328a62 KS |
316 | |
317 | /** | |
318 | * page_mapped_in_vma - check whether a page is really mapped in a VMA | |
319 | * @page: the page to test | |
320 | * @vma: the VMA to test | |
321 | * | |
322 | * Returns 1 if the page is mapped into the page tables of the VMA, 0 | |
323 | * if the page is not mapped into the page tables of this VMA. Only | |
324 | * valid for normal file or anonymous VMAs. | |
325 | */ | |
326 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) | |
327 | { | |
328 | struct page_vma_mapped_walk pvmw = { | |
2aff7a47 MWO |
329 | .pfn = page_to_pfn(page), |
330 | .nr_pages = 1, | |
6a328a62 KS |
331 | .vma = vma, |
332 | .flags = PVMW_SYNC, | |
333 | }; | |
6a328a62 | 334 | |
494334e4 HD |
335 | pvmw.address = vma_address(page, vma); |
336 | if (pvmw.address == -EFAULT) | |
6a328a62 | 337 | return 0; |
6a328a62 KS |
338 | if (!page_vma_mapped_walk(&pvmw)) |
339 | return 0; | |
340 | page_vma_mapped_walk_done(&pvmw); | |
341 | return 1; | |
342 | } |