mm: page_vma_mapped_walk(): use goto instead of while (1)
[linux-block.git] / mm / page_vma_mapped.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
7
8 #include "internal.h"
9
10 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11 {
12         page_vma_mapped_walk_done(pvmw);
13         return false;
14 }
15
16 static bool map_pte(struct page_vma_mapped_walk *pvmw)
17 {
18         pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19         if (!(pvmw->flags & PVMW_SYNC)) {
20                 if (pvmw->flags & PVMW_MIGRATION) {
21                         if (!is_swap_pte(*pvmw->pte))
22                                 return false;
23                 } else {
24                         /*
25                          * We get here when we are trying to unmap a private
26                          * device page from the process address space. Such
27                          * page is not CPU accessible and thus is mapped as
28                          * a special swap entry, nonetheless it still does
29                          * count as a valid regular mapping for the page (and
30                          * is accounted as such in page maps count).
31                          *
32                          * So handle this special case as if it was a normal
33                          * page mapping ie lock CPU page table and returns
34                          * true.
35                          *
36                          * For more details on device private memory see HMM
37                          * (include/linux/hmm.h or mm/hmm.c).
38                          */
39                         if (is_swap_pte(*pvmw->pte)) {
40                                 swp_entry_t entry;
41
42                                 /* Handle un-addressable ZONE_DEVICE memory */
43                                 entry = pte_to_swp_entry(*pvmw->pte);
44                                 if (!is_device_private_entry(entry))
45                                         return false;
46                         } else if (!pte_present(*pvmw->pte))
47                                 return false;
48                 }
49         }
50         pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
51         spin_lock(pvmw->ptl);
52         return true;
53 }
54
55 static inline bool pfn_is_match(struct page *page, unsigned long pfn)
56 {
57         unsigned long page_pfn = page_to_pfn(page);
58
59         /* normal page and hugetlbfs page */
60         if (!PageTransCompound(page) || PageHuge(page))
61                 return page_pfn == pfn;
62
63         /* THP can be referenced by any subpage */
64         return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page);
65 }
66
67 /**
68  * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
69  * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
70  *
71  * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
72  * mapped. check_pte() has to validate this.
73  *
74  * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
75  * arbitrary page.
76  *
77  * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
78  * entry that points to @pvmw->page or any subpage in case of THP.
79  *
80  * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
81  * pvmw->page or any subpage in case of THP.
82  *
83  * Otherwise, return false.
84  *
85  */
86 static bool check_pte(struct page_vma_mapped_walk *pvmw)
87 {
88         unsigned long pfn;
89
90         if (pvmw->flags & PVMW_MIGRATION) {
91                 swp_entry_t entry;
92                 if (!is_swap_pte(*pvmw->pte))
93                         return false;
94                 entry = pte_to_swp_entry(*pvmw->pte);
95
96                 if (!is_migration_entry(entry))
97                         return false;
98
99                 pfn = migration_entry_to_pfn(entry);
100         } else if (is_swap_pte(*pvmw->pte)) {
101                 swp_entry_t entry;
102
103                 /* Handle un-addressable ZONE_DEVICE memory */
104                 entry = pte_to_swp_entry(*pvmw->pte);
105                 if (!is_device_private_entry(entry))
106                         return false;
107
108                 pfn = device_private_entry_to_pfn(entry);
109         } else {
110                 if (!pte_present(*pvmw->pte))
111                         return false;
112
113                 pfn = pte_pfn(*pvmw->pte);
114         }
115
116         return pfn_is_match(pvmw->page, pfn);
117 }
118
119 /**
120  * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
121  * @pvmw->address
122  * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
123  * must be set. pmd, pte and ptl must be NULL.
124  *
125  * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
126  * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
127  * adjusted if needed (for PTE-mapped THPs).
128  *
129  * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
130  * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
131  * a loop to find all PTEs that map the THP.
132  *
133  * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
134  * regardless of which page table level the page is mapped at. @pvmw->pmd is
135  * NULL.
136  *
137  * Returns false if there are no more page table entries for the page in
138  * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
139  *
140  * If you need to stop the walk before page_vma_mapped_walk() returned false,
141  * use page_vma_mapped_walk_done(). It will do the housekeeping.
142  */
143 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
144 {
145         struct mm_struct *mm = pvmw->vma->vm_mm;
146         struct page *page = pvmw->page;
147         unsigned long end;
148         pgd_t *pgd;
149         p4d_t *p4d;
150         pud_t *pud;
151         pmd_t pmde;
152
153         /* The only possible pmd mapping has been handled on last iteration */
154         if (pvmw->pmd && !pvmw->pte)
155                 return not_found(pvmw);
156
157         if (unlikely(PageHuge(page))) {
158                 /* The only possible mapping was handled on last iteration */
159                 if (pvmw->pte)
160                         return not_found(pvmw);
161
162                 /* when pud is not present, pte will be NULL */
163                 pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
164                 if (!pvmw->pte)
165                         return false;
166
167                 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
168                 spin_lock(pvmw->ptl);
169                 if (!check_pte(pvmw))
170                         return not_found(pvmw);
171                 return true;
172         }
173
174         if (pvmw->pte)
175                 goto next_pte;
176 restart:
177         {
178                 pgd = pgd_offset(mm, pvmw->address);
179                 if (!pgd_present(*pgd))
180                         return false;
181                 p4d = p4d_offset(pgd, pvmw->address);
182                 if (!p4d_present(*p4d))
183                         return false;
184                 pud = pud_offset(p4d, pvmw->address);
185                 if (!pud_present(*pud))
186                         return false;
187
188                 pvmw->pmd = pmd_offset(pud, pvmw->address);
189                 /*
190                  * Make sure the pmd value isn't cached in a register by the
191                  * compiler and used as a stale value after we've observed a
192                  * subsequent update.
193                  */
194                 pmde = READ_ONCE(*pvmw->pmd);
195
196                 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
197                         pvmw->ptl = pmd_lock(mm, pvmw->pmd);
198                         pmde = *pvmw->pmd;
199                         if (likely(pmd_trans_huge(pmde))) {
200                                 if (pvmw->flags & PVMW_MIGRATION)
201                                         return not_found(pvmw);
202                                 if (pmd_page(pmde) != page)
203                                         return not_found(pvmw);
204                                 return true;
205                         }
206                         if (!pmd_present(pmde)) {
207                                 swp_entry_t entry;
208
209                                 if (!thp_migration_supported() ||
210                                     !(pvmw->flags & PVMW_MIGRATION))
211                                         return not_found(pvmw);
212                                 entry = pmd_to_swp_entry(pmde);
213                                 if (!is_migration_entry(entry) ||
214                                     migration_entry_to_page(entry) != page)
215                                         return not_found(pvmw);
216                                 return true;
217                         }
218                         /* THP pmd was split under us: handle on pte level */
219                         spin_unlock(pvmw->ptl);
220                         pvmw->ptl = NULL;
221                 } else if (!pmd_present(pmde)) {
222                         /*
223                          * If PVMW_SYNC, take and drop THP pmd lock so that we
224                          * cannot return prematurely, while zap_huge_pmd() has
225                          * cleared *pmd but not decremented compound_mapcount().
226                          */
227                         if ((pvmw->flags & PVMW_SYNC) &&
228                             PageTransCompound(page)) {
229                                 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
230
231                                 spin_unlock(ptl);
232                         }
233                         return false;
234                 }
235                 if (!map_pte(pvmw))
236                         goto next_pte;
237 this_pte:
238                 if (check_pte(pvmw))
239                         return true;
240 next_pte:
241                 /* Seek to next pte only makes sense for THP */
242                 if (!PageTransHuge(page))
243                         return not_found(pvmw);
244                 end = vma_address_end(page, pvmw->vma);
245                 do {
246                         pvmw->address += PAGE_SIZE;
247                         if (pvmw->address >= end)
248                                 return not_found(pvmw);
249                         /* Did we cross page table boundary? */
250                         if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
251                                 if (pvmw->ptl) {
252                                         spin_unlock(pvmw->ptl);
253                                         pvmw->ptl = NULL;
254                                 }
255                                 pte_unmap(pvmw->pte);
256                                 pvmw->pte = NULL;
257                                 goto restart;
258                         }
259                         pvmw->pte++;
260                 } while (pte_none(*pvmw->pte));
261
262                 if (!pvmw->ptl) {
263                         pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
264                         spin_lock(pvmw->ptl);
265                 }
266                 goto this_pte;
267         }
268 }
269
270 /**
271  * page_mapped_in_vma - check whether a page is really mapped in a VMA
272  * @page: the page to test
273  * @vma: the VMA to test
274  *
275  * Returns 1 if the page is mapped into the page tables of the VMA, 0
276  * if the page is not mapped into the page tables of this VMA.  Only
277  * valid for normal file or anonymous VMAs.
278  */
279 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
280 {
281         struct page_vma_mapped_walk pvmw = {
282                 .page = page,
283                 .vma = vma,
284                 .flags = PVMW_SYNC,
285         };
286
287         pvmw.address = vma_address(page, vma);
288         if (pvmw.address == -EFAULT)
289                 return 0;
290         if (!page_vma_mapped_walk(&pvmw))
291                 return 0;
292         page_vma_mapped_walk_done(&pvmw);
293         return 1;
294 }