Merge tag 'rust-fixes-6.3-rc1' of https://github.com/Rust-for-Linux/linux
[linux-block.git] / mm / page_vma_mapped.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
7
8 #include "internal.h"
9
10 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11 {
12         page_vma_mapped_walk_done(pvmw);
13         return false;
14 }
15
16 static bool map_pte(struct page_vma_mapped_walk *pvmw)
17 {
18         pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19         if (!(pvmw->flags & PVMW_SYNC)) {
20                 if (pvmw->flags & PVMW_MIGRATION) {
21                         if (!is_swap_pte(*pvmw->pte))
22                                 return false;
23                 } else {
24                         /*
25                          * We get here when we are trying to unmap a private
26                          * device page from the process address space. Such
27                          * page is not CPU accessible and thus is mapped as
28                          * a special swap entry, nonetheless it still does
29                          * count as a valid regular mapping for the page (and
30                          * is accounted as such in page maps count).
31                          *
32                          * So handle this special case as if it was a normal
33                          * page mapping ie lock CPU page table and returns
34                          * true.
35                          *
36                          * For more details on device private memory see HMM
37                          * (include/linux/hmm.h or mm/hmm.c).
38                          */
39                         if (is_swap_pte(*pvmw->pte)) {
40                                 swp_entry_t entry;
41
42                                 /* Handle un-addressable ZONE_DEVICE memory */
43                                 entry = pte_to_swp_entry(*pvmw->pte);
44                                 if (!is_device_private_entry(entry) &&
45                                     !is_device_exclusive_entry(entry))
46                                         return false;
47                         } else if (!pte_present(*pvmw->pte))
48                                 return false;
49                 }
50         }
51         pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
52         spin_lock(pvmw->ptl);
53         return true;
54 }
55
56 /**
57  * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
58  * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
59  *
60  * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
61  * mapped. check_pte() has to validate this.
62  *
63  * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
64  * arbitrary page.
65  *
66  * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
67  * entry that points to @pvmw->page or any subpage in case of THP.
68  *
69  * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
70  * pvmw->page or any subpage in case of THP.
71  *
72  * Otherwise, return false.
73  *
74  */
75 static bool check_pte(struct page_vma_mapped_walk *pvmw)
76 {
77         unsigned long pfn;
78
79         if (pvmw->flags & PVMW_MIGRATION) {
80                 swp_entry_t entry;
81                 if (!is_swap_pte(*pvmw->pte))
82                         return false;
83                 entry = pte_to_swp_entry(*pvmw->pte);
84
85                 if (!is_migration_entry(entry) &&
86                     !is_device_exclusive_entry(entry))
87                         return false;
88
89                 pfn = swp_offset_pfn(entry);
90         } else if (is_swap_pte(*pvmw->pte)) {
91                 swp_entry_t entry;
92
93                 /* Handle un-addressable ZONE_DEVICE memory */
94                 entry = pte_to_swp_entry(*pvmw->pte);
95                 if (!is_device_private_entry(entry) &&
96                     !is_device_exclusive_entry(entry))
97                         return false;
98
99                 pfn = swp_offset_pfn(entry);
100         } else {
101                 if (!pte_present(*pvmw->pte))
102                         return false;
103
104                 pfn = pte_pfn(*pvmw->pte);
105         }
106
107         return (pfn - pvmw->pfn) < pvmw->nr_pages;
108 }
109
110 /* Returns true if the two ranges overlap.  Careful to not overflow. */
111 static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw)
112 {
113         if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
114                 return false;
115         if (pfn > pvmw->pfn + pvmw->nr_pages - 1)
116                 return false;
117         return true;
118 }
119
120 static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
121 {
122         pvmw->address = (pvmw->address + size) & ~(size - 1);
123         if (!pvmw->address)
124                 pvmw->address = ULONG_MAX;
125 }
126
127 /**
128  * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at
129  * @pvmw->address
130  * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
131  * must be set. pmd, pte and ptl must be NULL.
132  *
133  * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
134  * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
135  * adjusted if needed (for PTE-mapped THPs).
136  *
137  * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
138  * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
139  * a loop to find all PTEs that map the THP.
140  *
141  * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
142  * regardless of which page table level the page is mapped at. @pvmw->pmd is
143  * NULL.
144  *
145  * Returns false if there are no more page table entries for the page in
146  * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
147  *
148  * If you need to stop the walk before page_vma_mapped_walk() returned false,
149  * use page_vma_mapped_walk_done(). It will do the housekeeping.
150  */
151 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
152 {
153         struct vm_area_struct *vma = pvmw->vma;
154         struct mm_struct *mm = vma->vm_mm;
155         unsigned long end;
156         pgd_t *pgd;
157         p4d_t *p4d;
158         pud_t *pud;
159         pmd_t pmde;
160
161         /* The only possible pmd mapping has been handled on last iteration */
162         if (pvmw->pmd && !pvmw->pte)
163                 return not_found(pvmw);
164
165         if (unlikely(is_vm_hugetlb_page(vma))) {
166                 struct hstate *hstate = hstate_vma(vma);
167                 unsigned long size = huge_page_size(hstate);
168                 /* The only possible mapping was handled on last iteration */
169                 if (pvmw->pte)
170                         return not_found(pvmw);
171                 /*
172                  * All callers that get here will already hold the
173                  * i_mmap_rwsem.  Therefore, no additional locks need to be
174                  * taken before calling hugetlb_walk().
175                  */
176                 pvmw->pte = hugetlb_walk(vma, pvmw->address, size);
177                 if (!pvmw->pte)
178                         return false;
179
180                 pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte);
181                 if (!check_pte(pvmw))
182                         return not_found(pvmw);
183                 return true;
184         }
185
186         end = vma_address_end(pvmw);
187         if (pvmw->pte)
188                 goto next_pte;
189 restart:
190         do {
191                 pgd = pgd_offset(mm, pvmw->address);
192                 if (!pgd_present(*pgd)) {
193                         step_forward(pvmw, PGDIR_SIZE);
194                         continue;
195                 }
196                 p4d = p4d_offset(pgd, pvmw->address);
197                 if (!p4d_present(*p4d)) {
198                         step_forward(pvmw, P4D_SIZE);
199                         continue;
200                 }
201                 pud = pud_offset(p4d, pvmw->address);
202                 if (!pud_present(*pud)) {
203                         step_forward(pvmw, PUD_SIZE);
204                         continue;
205                 }
206
207                 pvmw->pmd = pmd_offset(pud, pvmw->address);
208                 /*
209                  * Make sure the pmd value isn't cached in a register by the
210                  * compiler and used as a stale value after we've observed a
211                  * subsequent update.
212                  */
213                 pmde = READ_ONCE(*pvmw->pmd);
214
215                 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
216                     (pmd_present(pmde) && pmd_devmap(pmde))) {
217                         pvmw->ptl = pmd_lock(mm, pvmw->pmd);
218                         pmde = *pvmw->pmd;
219                         if (!pmd_present(pmde)) {
220                                 swp_entry_t entry;
221
222                                 if (!thp_migration_supported() ||
223                                     !(pvmw->flags & PVMW_MIGRATION))
224                                         return not_found(pvmw);
225                                 entry = pmd_to_swp_entry(pmde);
226                                 if (!is_migration_entry(entry) ||
227                                     !check_pmd(swp_offset_pfn(entry), pvmw))
228                                         return not_found(pvmw);
229                                 return true;
230                         }
231                         if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) {
232                                 if (pvmw->flags & PVMW_MIGRATION)
233                                         return not_found(pvmw);
234                                 if (!check_pmd(pmd_pfn(pmde), pvmw))
235                                         return not_found(pvmw);
236                                 return true;
237                         }
238                         /* THP pmd was split under us: handle on pte level */
239                         spin_unlock(pvmw->ptl);
240                         pvmw->ptl = NULL;
241                 } else if (!pmd_present(pmde)) {
242                         /*
243                          * If PVMW_SYNC, take and drop THP pmd lock so that we
244                          * cannot return prematurely, while zap_huge_pmd() has
245                          * cleared *pmd but not decremented compound_mapcount().
246                          */
247                         if ((pvmw->flags & PVMW_SYNC) &&
248                             transhuge_vma_suitable(vma, pvmw->address) &&
249                             (pvmw->nr_pages >= HPAGE_PMD_NR)) {
250                                 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
251
252                                 spin_unlock(ptl);
253                         }
254                         step_forward(pvmw, PMD_SIZE);
255                         continue;
256                 }
257                 if (!map_pte(pvmw))
258                         goto next_pte;
259 this_pte:
260                 if (check_pte(pvmw))
261                         return true;
262 next_pte:
263                 do {
264                         pvmw->address += PAGE_SIZE;
265                         if (pvmw->address >= end)
266                                 return not_found(pvmw);
267                         /* Did we cross page table boundary? */
268                         if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
269                                 if (pvmw->ptl) {
270                                         spin_unlock(pvmw->ptl);
271                                         pvmw->ptl = NULL;
272                                 }
273                                 pte_unmap(pvmw->pte);
274                                 pvmw->pte = NULL;
275                                 goto restart;
276                         }
277                         pvmw->pte++;
278                         if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
279                                 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
280                                 spin_lock(pvmw->ptl);
281                         }
282                 } while (pte_none(*pvmw->pte));
283
284                 if (!pvmw->ptl) {
285                         pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
286                         spin_lock(pvmw->ptl);
287                 }
288                 goto this_pte;
289         } while (pvmw->address < end);
290
291         return false;
292 }
293
294 /**
295  * page_mapped_in_vma - check whether a page is really mapped in a VMA
296  * @page: the page to test
297  * @vma: the VMA to test
298  *
299  * Returns 1 if the page is mapped into the page tables of the VMA, 0
300  * if the page is not mapped into the page tables of this VMA.  Only
301  * valid for normal file or anonymous VMAs.
302  */
303 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
304 {
305         struct page_vma_mapped_walk pvmw = {
306                 .pfn = page_to_pfn(page),
307                 .nr_pages = 1,
308                 .vma = vma,
309                 .flags = PVMW_SYNC,
310         };
311
312         pvmw.address = vma_address(page, vma);
313         if (pvmw.address == -EFAULT)
314                 return 0;
315         if (!page_vma_mapped_walk(&pvmw))
316                 return 0;
317         page_vma_mapped_walk_done(&pvmw);
318         return 1;
319 }