Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
ace71a19 KS |
2 | #include <linux/mm.h> |
3 | #include <linux/rmap.h> | |
4 | #include <linux/hugetlb.h> | |
5 | #include <linux/swap.h> | |
6 | #include <linux/swapops.h> | |
7 | ||
8 | #include "internal.h" | |
9 | ||
ace71a19 KS |
10 | static inline bool not_found(struct page_vma_mapped_walk *pvmw) |
11 | { | |
12 | page_vma_mapped_walk_done(pvmw); | |
13 | return false; | |
14 | } | |
15 | ||
2798bbe7 | 16 | static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) |
ace71a19 | 17 | { |
c33c7948 RR |
18 | pte_t ptent; |
19 | ||
90f43b0a HD |
20 | if (pvmw->flags & PVMW_SYNC) { |
21 | /* Use the stricter lookup */ | |
22 | pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd, | |
23 | pvmw->address, &pvmw->ptl); | |
2798bbe7 HD |
24 | *ptlp = pvmw->ptl; |
25 | return !!pvmw->pte; | |
90f43b0a | 26 | } |
aab8d052 | 27 | |
2798bbe7 HD |
28 | /* |
29 | * It is important to return the ptl corresponding to pte, | |
30 | * in case *pvmw->pmd changes underneath us; so we need to | |
31 | * return it even when choosing not to lock, in case caller | |
32 | * proceeds to loop over next ptes, and finds a match later. | |
33 | * Though, in most cases, page lock already protects this. | |
34 | */ | |
35 | pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd, | |
36 | pvmw->address, ptlp); | |
37 | if (!pvmw->pte) | |
38 | return false; | |
39 | ||
c33c7948 RR |
40 | ptent = ptep_get(pvmw->pte); |
41 | ||
90f43b0a | 42 | if (pvmw->flags & PVMW_MIGRATION) { |
c33c7948 | 43 | if (!is_swap_pte(ptent)) |
90f43b0a | 44 | return false; |
c33c7948 | 45 | } else if (is_swap_pte(ptent)) { |
90f43b0a HD |
46 | swp_entry_t entry; |
47 | /* | |
48 | * Handle un-addressable ZONE_DEVICE memory. | |
49 | * | |
50 | * We get here when we are trying to unmap a private | |
51 | * device page from the process address space. Such | |
52 | * page is not CPU accessible and thus is mapped as | |
53 | * a special swap entry, nonetheless it still does | |
54 | * count as a valid regular mapping for the page | |
55 | * (and is accounted as such in page maps count). | |
56 | * | |
57 | * So handle this special case as if it was a normal | |
58 | * page mapping ie lock CPU page table and return true. | |
59 | * | |
60 | * For more details on device private memory see HMM | |
61 | * (include/linux/hmm.h or mm/hmm.c). | |
62 | */ | |
c33c7948 | 63 | entry = pte_to_swp_entry(ptent); |
90f43b0a HD |
64 | if (!is_device_private_entry(entry) && |
65 | !is_device_exclusive_entry(entry)) | |
66 | return false; | |
c33c7948 | 67 | } else if (!pte_present(ptent)) { |
90f43b0a | 68 | return false; |
ace71a19 | 69 | } |
2798bbe7 | 70 | pvmw->ptl = *ptlp; |
ace71a19 KS |
71 | spin_lock(pvmw->ptl); |
72 | return true; | |
73 | } | |
74 | ||
0d665e7b KS |
75 | /** |
76 | * check_pte - check if @pvmw->page is mapped at the @pvmw->pte | |
777f303c | 77 | * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking |
0d665e7b KS |
78 | * |
79 | * page_vma_mapped_walk() found a place where @pvmw->page is *potentially* | |
80 | * mapped. check_pte() has to validate this. | |
81 | * | |
777f303c AS |
82 | * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to |
83 | * arbitrary page. | |
0d665e7b KS |
84 | * |
85 | * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration | |
86 | * entry that points to @pvmw->page or any subpage in case of THP. | |
87 | * | |
777f303c AS |
88 | * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to |
89 | * pvmw->page or any subpage in case of THP. | |
0d665e7b KS |
90 | * |
91 | * Otherwise, return false. | |
92 | * | |
93 | */ | |
ace71a19 KS |
94 | static bool check_pte(struct page_vma_mapped_walk *pvmw) |
95 | { | |
0d665e7b | 96 | unsigned long pfn; |
c33c7948 | 97 | pte_t ptent = ptep_get(pvmw->pte); |
0d665e7b | 98 | |
ace71a19 | 99 | if (pvmw->flags & PVMW_MIGRATION) { |
ace71a19 | 100 | swp_entry_t entry; |
c33c7948 | 101 | if (!is_swap_pte(ptent)) |
ace71a19 | 102 | return false; |
c33c7948 | 103 | entry = pte_to_swp_entry(ptent); |
a5430dda | 104 | |
b756a3b5 AP |
105 | if (!is_migration_entry(entry) && |
106 | !is_device_exclusive_entry(entry)) | |
ace71a19 | 107 | return false; |
a5430dda | 108 | |
0d206b5d | 109 | pfn = swp_offset_pfn(entry); |
c33c7948 | 110 | } else if (is_swap_pte(ptent)) { |
0d665e7b | 111 | swp_entry_t entry; |
a5430dda | 112 | |
0d665e7b | 113 | /* Handle un-addressable ZONE_DEVICE memory */ |
c33c7948 | 114 | entry = pte_to_swp_entry(ptent); |
b756a3b5 AP |
115 | if (!is_device_private_entry(entry) && |
116 | !is_device_exclusive_entry(entry)) | |
ace71a19 KS |
117 | return false; |
118 | ||
0d206b5d | 119 | pfn = swp_offset_pfn(entry); |
0d665e7b | 120 | } else { |
c33c7948 | 121 | if (!pte_present(ptent)) |
ace71a19 | 122 | return false; |
0d665e7b | 123 | |
c33c7948 | 124 | pfn = pte_pfn(ptent); |
ace71a19 KS |
125 | } |
126 | ||
2aff7a47 MWO |
127 | return (pfn - pvmw->pfn) < pvmw->nr_pages; |
128 | } | |
129 | ||
130 | /* Returns true if the two ranges overlap. Careful to not overflow. */ | |
131 | static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw) | |
132 | { | |
133 | if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn) | |
134 | return false; | |
135 | if (pfn > pvmw->pfn + pvmw->nr_pages - 1) | |
136 | return false; | |
137 | return true; | |
ace71a19 KS |
138 | } |
139 | ||
a9a7504d HD |
140 | static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size) |
141 | { | |
142 | pvmw->address = (pvmw->address + size) & ~(size - 1); | |
143 | if (!pvmw->address) | |
144 | pvmw->address = ULONG_MAX; | |
145 | } | |
146 | ||
ace71a19 | 147 | /** |
2aff7a47 | 148 | * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at |
ace71a19 KS |
149 | * @pvmw->address |
150 | * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags | |
151 | * must be set. pmd, pte and ptl must be NULL. | |
152 | * | |
153 | * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point | |
154 | * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is | |
155 | * adjusted if needed (for PTE-mapped THPs). | |
156 | * | |
157 | * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page | |
158 | * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in | |
159 | * a loop to find all PTEs that map the THP. | |
160 | * | |
161 | * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry | |
162 | * regardless of which page table level the page is mapped at. @pvmw->pmd is | |
163 | * NULL. | |
164 | * | |
baf2f90b | 165 | * Returns false if there are no more page table entries for the page in |
ace71a19 KS |
166 | * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped. |
167 | * | |
168 | * If you need to stop the walk before page_vma_mapped_walk() returned false, | |
169 | * use page_vma_mapped_walk_done(). It will do the housekeeping. | |
170 | */ | |
171 | bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) | |
172 | { | |
2aff7a47 MWO |
173 | struct vm_area_struct *vma = pvmw->vma; |
174 | struct mm_struct *mm = vma->vm_mm; | |
47446630 | 175 | unsigned long end; |
2798bbe7 | 176 | spinlock_t *ptl; |
ace71a19 | 177 | pgd_t *pgd; |
c2febafc | 178 | p4d_t *p4d; |
ace71a19 | 179 | pud_t *pud; |
a7b10095 | 180 | pmd_t pmde; |
ace71a19 KS |
181 | |
182 | /* The only possible pmd mapping has been handled on last iteration */ | |
183 | if (pvmw->pmd && !pvmw->pte) | |
184 | return not_found(pvmw); | |
185 | ||
2aff7a47 | 186 | if (unlikely(is_vm_hugetlb_page(vma))) { |
98ea0259 | 187 | struct hstate *hstate = hstate_vma(vma); |
188 | unsigned long size = huge_page_size(hstate); | |
6d0fd598 HD |
189 | /* The only possible mapping was handled on last iteration */ |
190 | if (pvmw->pte) | |
191 | return not_found(pvmw); | |
9c67a207 PX |
192 | /* |
193 | * All callers that get here will already hold the | |
194 | * i_mmap_rwsem. Therefore, no additional locks need to be | |
195 | * taken before calling hugetlb_walk(). | |
196 | */ | |
197 | pvmw->pte = hugetlb_walk(vma, pvmw->address, size); | |
ace71a19 KS |
198 | if (!pvmw->pte) |
199 | return false; | |
200 | ||
8f0b747d | 201 | pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte); |
ace71a19 KS |
202 | if (!check_pte(pvmw)) |
203 | return not_found(pvmw); | |
204 | return true; | |
205 | } | |
6d0fd598 | 206 | |
2aff7a47 | 207 | end = vma_address_end(pvmw); |
6d0fd598 HD |
208 | if (pvmw->pte) |
209 | goto next_pte; | |
ace71a19 | 210 | restart: |
a9a7504d | 211 | do { |
b3807a91 | 212 | pgd = pgd_offset(mm, pvmw->address); |
a9a7504d HD |
213 | if (!pgd_present(*pgd)) { |
214 | step_forward(pvmw, PGDIR_SIZE); | |
215 | continue; | |
216 | } | |
b3807a91 | 217 | p4d = p4d_offset(pgd, pvmw->address); |
a9a7504d HD |
218 | if (!p4d_present(*p4d)) { |
219 | step_forward(pvmw, P4D_SIZE); | |
220 | continue; | |
221 | } | |
b3807a91 | 222 | pud = pud_offset(p4d, pvmw->address); |
a9a7504d HD |
223 | if (!pud_present(*pud)) { |
224 | step_forward(pvmw, PUD_SIZE); | |
225 | continue; | |
226 | } | |
e2e1d407 | 227 | |
b3807a91 | 228 | pvmw->pmd = pmd_offset(pud, pvmw->address); |
732ed558 | 229 | /* |
b3807a91 HD |
230 | * Make sure the pmd value isn't cached in a register by the |
231 | * compiler and used as a stale value after we've observed a | |
232 | * subsequent update. | |
732ed558 | 233 | */ |
26e1a0c3 | 234 | pmde = pmdp_get_lockless(pvmw->pmd); |
b3807a91 | 235 | |
6472f6d2 MS |
236 | if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) || |
237 | (pmd_present(pmde) && pmd_devmap(pmde))) { | |
b3807a91 HD |
238 | pvmw->ptl = pmd_lock(mm, pvmw->pmd); |
239 | pmde = *pvmw->pmd; | |
b3807a91 HD |
240 | if (!pmd_present(pmde)) { |
241 | swp_entry_t entry; | |
242 | ||
243 | if (!thp_migration_supported() || | |
244 | !(pvmw->flags & PVMW_MIGRATION)) | |
245 | return not_found(pvmw); | |
246 | entry = pmd_to_swp_entry(pmde); | |
247 | if (!is_migration_entry(entry) || | |
0d206b5d | 248 | !check_pmd(swp_offset_pfn(entry), pvmw)) |
b3807a91 HD |
249 | return not_found(pvmw); |
250 | return true; | |
251 | } | |
6472f6d2 MS |
252 | if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) { |
253 | if (pvmw->flags & PVMW_MIGRATION) | |
254 | return not_found(pvmw); | |
255 | if (!check_pmd(pmd_pfn(pmde), pvmw)) | |
256 | return not_found(pvmw); | |
257 | return true; | |
258 | } | |
b3807a91 HD |
259 | /* THP pmd was split under us: handle on pte level */ |
260 | spin_unlock(pvmw->ptl); | |
261 | pvmw->ptl = NULL; | |
262 | } else if (!pmd_present(pmde)) { | |
263 | /* | |
264 | * If PVMW_SYNC, take and drop THP pmd lock so that we | |
265 | * cannot return prematurely, while zap_huge_pmd() has | |
266 | * cleared *pmd but not decremented compound_mapcount(). | |
267 | */ | |
268 | if ((pvmw->flags & PVMW_SYNC) && | |
c453d8c7 | 269 | transhuge_vma_suitable(vma, pvmw->address) && |
2aff7a47 | 270 | (pvmw->nr_pages >= HPAGE_PMD_NR)) { |
b3807a91 | 271 | spinlock_t *ptl = pmd_lock(mm, pvmw->pmd); |
732ed558 | 272 | |
b3807a91 HD |
273 | spin_unlock(ptl); |
274 | } | |
a9a7504d HD |
275 | step_forward(pvmw, PMD_SIZE); |
276 | continue; | |
732ed558 | 277 | } |
2798bbe7 HD |
278 | if (!map_pte(pvmw, &ptl)) { |
279 | if (!pvmw->pte) | |
280 | goto restart; | |
b3807a91 | 281 | goto next_pte; |
2798bbe7 | 282 | } |
47446630 | 283 | this_pte: |
ace71a19 KS |
284 | if (check_pte(pvmw)) |
285 | return true; | |
d75450ff | 286 | next_pte: |
d75450ff | 287 | do { |
ace71a19 | 288 | pvmw->address += PAGE_SIZE; |
494334e4 | 289 | if (pvmw->address >= end) |
ace71a19 KS |
290 | return not_found(pvmw); |
291 | /* Did we cross page table boundary? */ | |
44828248 | 292 | if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) { |
ace71a19 KS |
293 | if (pvmw->ptl) { |
294 | spin_unlock(pvmw->ptl); | |
295 | pvmw->ptl = NULL; | |
296 | } | |
44828248 HD |
297 | pte_unmap(pvmw->pte); |
298 | pvmw->pte = NULL; | |
ace71a19 | 299 | goto restart; |
ace71a19 | 300 | } |
44828248 | 301 | pvmw->pte++; |
c33c7948 | 302 | } while (pte_none(ptep_get(pvmw->pte))); |
ace71a19 KS |
303 | |
304 | if (!pvmw->ptl) { | |
2798bbe7 | 305 | pvmw->ptl = ptl; |
ace71a19 KS |
306 | spin_lock(pvmw->ptl); |
307 | } | |
47446630 | 308 | goto this_pte; |
a9a7504d HD |
309 | } while (pvmw->address < end); |
310 | ||
311 | return false; | |
ace71a19 | 312 | } |
6a328a62 KS |
313 | |
314 | /** | |
315 | * page_mapped_in_vma - check whether a page is really mapped in a VMA | |
316 | * @page: the page to test | |
317 | * @vma: the VMA to test | |
318 | * | |
319 | * Returns 1 if the page is mapped into the page tables of the VMA, 0 | |
320 | * if the page is not mapped into the page tables of this VMA. Only | |
321 | * valid for normal file or anonymous VMAs. | |
322 | */ | |
323 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) | |
324 | { | |
325 | struct page_vma_mapped_walk pvmw = { | |
2aff7a47 MWO |
326 | .pfn = page_to_pfn(page), |
327 | .nr_pages = 1, | |
6a328a62 KS |
328 | .vma = vma, |
329 | .flags = PVMW_SYNC, | |
330 | }; | |
6a328a62 | 331 | |
494334e4 HD |
332 | pvmw.address = vma_address(page, vma); |
333 | if (pvmw.address == -EFAULT) | |
6a328a62 | 334 | return 0; |
6a328a62 KS |
335 | if (!page_vma_mapped_walk(&pvmw)) |
336 | return 0; | |
337 | page_vma_mapped_walk_done(&pvmw); | |
338 | return 1; | |
339 | } |