Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
ace71a19 KS |
2 | #include <linux/mm.h> |
3 | #include <linux/rmap.h> | |
4 | #include <linux/hugetlb.h> | |
5 | #include <linux/swap.h> | |
6 | #include <linux/swapops.h> | |
7 | ||
8 | #include "internal.h" | |
9 | ||
ace71a19 KS |
10 | static inline bool not_found(struct page_vma_mapped_walk *pvmw) |
11 | { | |
12 | page_vma_mapped_walk_done(pvmw); | |
13 | return false; | |
14 | } | |
15 | ||
16 | static bool map_pte(struct page_vma_mapped_walk *pvmw) | |
17 | { | |
18 | pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); | |
19 | if (!(pvmw->flags & PVMW_SYNC)) { | |
20 | if (pvmw->flags & PVMW_MIGRATION) { | |
21 | if (!is_swap_pte(*pvmw->pte)) | |
22 | return false; | |
23 | } else { | |
aab8d052 RC |
24 | /* |
25 | * We get here when we are trying to unmap a private | |
26 | * device page from the process address space. Such | |
27 | * page is not CPU accessible and thus is mapped as | |
28 | * a special swap entry, nonetheless it still does | |
29 | * count as a valid regular mapping for the page (and | |
30 | * is accounted as such in page maps count). | |
31 | * | |
32 | * So handle this special case as if it was a normal | |
33 | * page mapping ie lock CPU page table and returns | |
34 | * true. | |
35 | * | |
36 | * For more details on device private memory see HMM | |
37 | * (include/linux/hmm.h or mm/hmm.c). | |
38 | */ | |
39 | if (is_swap_pte(*pvmw->pte)) { | |
40 | swp_entry_t entry; | |
41 | ||
42 | /* Handle un-addressable ZONE_DEVICE memory */ | |
43 | entry = pte_to_swp_entry(*pvmw->pte); | |
44 | if (!is_device_private_entry(entry)) | |
45 | return false; | |
46 | } else if (!pte_present(*pvmw->pte)) | |
ace71a19 KS |
47 | return false; |
48 | } | |
49 | } | |
50 | pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); | |
51 | spin_lock(pvmw->ptl); | |
52 | return true; | |
53 | } | |
54 | ||
5b8d6e37 | 55 | static inline bool pfn_is_match(struct page *page, unsigned long pfn) |
7222708e | 56 | { |
5b8d6e37 LX |
57 | unsigned long page_pfn = page_to_pfn(page); |
58 | ||
59 | /* normal page and hugetlbfs page */ | |
60 | if (!PageTransCompound(page) || PageHuge(page)) | |
61 | return page_pfn == pfn; | |
7222708e KS |
62 | |
63 | /* THP can be referenced by any subpage */ | |
6c357848 | 64 | return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page); |
7222708e KS |
65 | } |
66 | ||
0d665e7b KS |
67 | /** |
68 | * check_pte - check if @pvmw->page is mapped at the @pvmw->pte | |
777f303c | 69 | * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking |
0d665e7b KS |
70 | * |
71 | * page_vma_mapped_walk() found a place where @pvmw->page is *potentially* | |
72 | * mapped. check_pte() has to validate this. | |
73 | * | |
777f303c AS |
74 | * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to |
75 | * arbitrary page. | |
0d665e7b KS |
76 | * |
77 | * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration | |
78 | * entry that points to @pvmw->page or any subpage in case of THP. | |
79 | * | |
777f303c AS |
80 | * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to |
81 | * pvmw->page or any subpage in case of THP. | |
0d665e7b KS |
82 | * |
83 | * Otherwise, return false. | |
84 | * | |
85 | */ | |
ace71a19 KS |
86 | static bool check_pte(struct page_vma_mapped_walk *pvmw) |
87 | { | |
0d665e7b KS |
88 | unsigned long pfn; |
89 | ||
ace71a19 | 90 | if (pvmw->flags & PVMW_MIGRATION) { |
ace71a19 KS |
91 | swp_entry_t entry; |
92 | if (!is_swap_pte(*pvmw->pte)) | |
93 | return false; | |
94 | entry = pte_to_swp_entry(*pvmw->pte); | |
a5430dda | 95 | |
ace71a19 KS |
96 | if (!is_migration_entry(entry)) |
97 | return false; | |
a5430dda | 98 | |
0d665e7b KS |
99 | pfn = migration_entry_to_pfn(entry); |
100 | } else if (is_swap_pte(*pvmw->pte)) { | |
101 | swp_entry_t entry; | |
a5430dda | 102 | |
0d665e7b KS |
103 | /* Handle un-addressable ZONE_DEVICE memory */ |
104 | entry = pte_to_swp_entry(*pvmw->pte); | |
105 | if (!is_device_private_entry(entry)) | |
ace71a19 KS |
106 | return false; |
107 | ||
0d665e7b KS |
108 | pfn = device_private_entry_to_pfn(entry); |
109 | } else { | |
110 | if (!pte_present(*pvmw->pte)) | |
ace71a19 | 111 | return false; |
0d665e7b KS |
112 | |
113 | pfn = pte_pfn(*pvmw->pte); | |
ace71a19 KS |
114 | } |
115 | ||
5b8d6e37 | 116 | return pfn_is_match(pvmw->page, pfn); |
ace71a19 KS |
117 | } |
118 | ||
119 | /** | |
120 | * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at | |
121 | * @pvmw->address | |
122 | * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags | |
123 | * must be set. pmd, pte and ptl must be NULL. | |
124 | * | |
125 | * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point | |
126 | * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is | |
127 | * adjusted if needed (for PTE-mapped THPs). | |
128 | * | |
129 | * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page | |
130 | * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in | |
131 | * a loop to find all PTEs that map the THP. | |
132 | * | |
133 | * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry | |
134 | * regardless of which page table level the page is mapped at. @pvmw->pmd is | |
135 | * NULL. | |
136 | * | |
baf2f90b | 137 | * Returns false if there are no more page table entries for the page in |
ace71a19 KS |
138 | * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped. |
139 | * | |
140 | * If you need to stop the walk before page_vma_mapped_walk() returned false, | |
141 | * use page_vma_mapped_walk_done(). It will do the housekeeping. | |
142 | */ | |
143 | bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) | |
144 | { | |
145 | struct mm_struct *mm = pvmw->vma->vm_mm; | |
146 | struct page *page = pvmw->page; | |
147 | pgd_t *pgd; | |
c2febafc | 148 | p4d_t *p4d; |
ace71a19 | 149 | pud_t *pud; |
a7b10095 | 150 | pmd_t pmde; |
ace71a19 KS |
151 | |
152 | /* The only possible pmd mapping has been handled on last iteration */ | |
153 | if (pvmw->pmd && !pvmw->pte) | |
154 | return not_found(pvmw); | |
155 | ||
f003c03b | 156 | if (unlikely(PageHuge(page))) { |
6d0fd598 HD |
157 | /* The only possible mapping was handled on last iteration */ |
158 | if (pvmw->pte) | |
159 | return not_found(pvmw); | |
160 | ||
ace71a19 | 161 | /* when pud is not present, pte will be NULL */ |
a50b854e | 162 | pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page)); |
ace71a19 KS |
163 | if (!pvmw->pte) |
164 | return false; | |
165 | ||
166 | pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte); | |
167 | spin_lock(pvmw->ptl); | |
168 | if (!check_pte(pvmw)) | |
169 | return not_found(pvmw); | |
170 | return true; | |
171 | } | |
6d0fd598 HD |
172 | |
173 | if (pvmw->pte) | |
174 | goto next_pte; | |
ace71a19 | 175 | restart: |
b3807a91 HD |
176 | { |
177 | pgd = pgd_offset(mm, pvmw->address); | |
178 | if (!pgd_present(*pgd)) | |
179 | return false; | |
180 | p4d = p4d_offset(pgd, pvmw->address); | |
181 | if (!p4d_present(*p4d)) | |
182 | return false; | |
183 | pud = pud_offset(p4d, pvmw->address); | |
184 | if (!pud_present(*pud)) | |
185 | return false; | |
e2e1d407 | 186 | |
b3807a91 | 187 | pvmw->pmd = pmd_offset(pud, pvmw->address); |
732ed558 | 188 | /* |
b3807a91 HD |
189 | * Make sure the pmd value isn't cached in a register by the |
190 | * compiler and used as a stale value after we've observed a | |
191 | * subsequent update. | |
732ed558 | 192 | */ |
b3807a91 HD |
193 | pmde = READ_ONCE(*pvmw->pmd); |
194 | ||
195 | if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) { | |
196 | pvmw->ptl = pmd_lock(mm, pvmw->pmd); | |
197 | pmde = *pvmw->pmd; | |
198 | if (likely(pmd_trans_huge(pmde))) { | |
199 | if (pvmw->flags & PVMW_MIGRATION) | |
200 | return not_found(pvmw); | |
201 | if (pmd_page(pmde) != page) | |
202 | return not_found(pvmw); | |
203 | return true; | |
204 | } | |
205 | if (!pmd_present(pmde)) { | |
206 | swp_entry_t entry; | |
207 | ||
208 | if (!thp_migration_supported() || | |
209 | !(pvmw->flags & PVMW_MIGRATION)) | |
210 | return not_found(pvmw); | |
211 | entry = pmd_to_swp_entry(pmde); | |
212 | if (!is_migration_entry(entry) || | |
213 | migration_entry_to_page(entry) != page) | |
214 | return not_found(pvmw); | |
215 | return true; | |
216 | } | |
217 | /* THP pmd was split under us: handle on pte level */ | |
218 | spin_unlock(pvmw->ptl); | |
219 | pvmw->ptl = NULL; | |
220 | } else if (!pmd_present(pmde)) { | |
221 | /* | |
222 | * If PVMW_SYNC, take and drop THP pmd lock so that we | |
223 | * cannot return prematurely, while zap_huge_pmd() has | |
224 | * cleared *pmd but not decremented compound_mapcount(). | |
225 | */ | |
226 | if ((pvmw->flags & PVMW_SYNC) && | |
227 | PageTransCompound(page)) { | |
228 | spinlock_t *ptl = pmd_lock(mm, pvmw->pmd); | |
732ed558 | 229 | |
b3807a91 HD |
230 | spin_unlock(ptl); |
231 | } | |
232 | return false; | |
732ed558 | 233 | } |
b3807a91 HD |
234 | if (!map_pte(pvmw)) |
235 | goto next_pte; | |
ace71a19 | 236 | } |
ace71a19 | 237 | while (1) { |
494334e4 HD |
238 | unsigned long end; |
239 | ||
ace71a19 KS |
240 | if (check_pte(pvmw)) |
241 | return true; | |
d75450ff HD |
242 | next_pte: |
243 | /* Seek to next pte only makes sense for THP */ | |
6d0fd598 | 244 | if (!PageTransHuge(page)) |
d75450ff | 245 | return not_found(pvmw); |
f003c03b | 246 | end = vma_address_end(page, pvmw->vma); |
d75450ff | 247 | do { |
ace71a19 | 248 | pvmw->address += PAGE_SIZE; |
494334e4 | 249 | if (pvmw->address >= end) |
ace71a19 KS |
250 | return not_found(pvmw); |
251 | /* Did we cross page table boundary? */ | |
44828248 | 252 | if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) { |
ace71a19 KS |
253 | if (pvmw->ptl) { |
254 | spin_unlock(pvmw->ptl); | |
255 | pvmw->ptl = NULL; | |
256 | } | |
44828248 HD |
257 | pte_unmap(pvmw->pte); |
258 | pvmw->pte = NULL; | |
ace71a19 | 259 | goto restart; |
ace71a19 | 260 | } |
44828248 | 261 | pvmw->pte++; |
ace71a19 KS |
262 | } while (pte_none(*pvmw->pte)); |
263 | ||
264 | if (!pvmw->ptl) { | |
265 | pvmw->ptl = pte_lockptr(mm, pvmw->pmd); | |
266 | spin_lock(pvmw->ptl); | |
267 | } | |
268 | } | |
269 | } | |
6a328a62 KS |
270 | |
271 | /** | |
272 | * page_mapped_in_vma - check whether a page is really mapped in a VMA | |
273 | * @page: the page to test | |
274 | * @vma: the VMA to test | |
275 | * | |
276 | * Returns 1 if the page is mapped into the page tables of the VMA, 0 | |
277 | * if the page is not mapped into the page tables of this VMA. Only | |
278 | * valid for normal file or anonymous VMAs. | |
279 | */ | |
280 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) | |
281 | { | |
282 | struct page_vma_mapped_walk pvmw = { | |
283 | .page = page, | |
284 | .vma = vma, | |
285 | .flags = PVMW_SYNC, | |
286 | }; | |
6a328a62 | 287 | |
494334e4 HD |
288 | pvmw.address = vma_address(page, vma); |
289 | if (pvmw.address == -EFAULT) | |
6a328a62 | 290 | return 0; |
6a328a62 KS |
291 | if (!page_vma_mapped_walk(&pvmw)) |
292 | return 0; | |
293 | page_vma_mapped_walk_done(&pvmw); | |
294 | return 1; | |
295 | } |