| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | #include <linux/mm.h> |
| 3 | #include <linux/rmap.h> |
| 4 | #include <linux/hugetlb.h> |
| 5 | #include <linux/swap.h> |
| 6 | #include <linux/swapops.h> |
| 7 | |
| 8 | #include "internal.h" |
| 9 | |
| 10 | static inline bool not_found(struct page_vma_mapped_walk *pvmw) |
| 11 | { |
| 12 | page_vma_mapped_walk_done(pvmw); |
| 13 | return false; |
| 14 | } |
| 15 | |
| 16 | static bool map_pte(struct page_vma_mapped_walk *pvmw) |
| 17 | { |
| 18 | pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); |
| 19 | if (!(pvmw->flags & PVMW_SYNC)) { |
| 20 | if (pvmw->flags & PVMW_MIGRATION) { |
| 21 | if (!is_swap_pte(*pvmw->pte)) |
| 22 | return false; |
| 23 | } else { |
| 24 | /* |
| 25 | * We get here when we are trying to unmap a private |
| 26 | * device page from the process address space. Such |
| 27 | * page is not CPU accessible and thus is mapped as |
| 28 | * a special swap entry, nonetheless it still does |
| 29 | * count as a valid regular mapping for the page (and |
| 30 | * is accounted as such in page maps count). |
| 31 | * |
| 32 | * So handle this special case as if it was a normal |
| 33 | * page mapping ie lock CPU page table and returns |
| 34 | * true. |
| 35 | * |
| 36 | * For more details on device private memory see HMM |
| 37 | * (include/linux/hmm.h or mm/hmm.c). |
| 38 | */ |
| 39 | if (is_swap_pte(*pvmw->pte)) { |
| 40 | swp_entry_t entry; |
| 41 | |
| 42 | /* Handle un-addressable ZONE_DEVICE memory */ |
| 43 | entry = pte_to_swp_entry(*pvmw->pte); |
| 44 | if (!is_device_private_entry(entry) && |
| 45 | !is_device_exclusive_entry(entry)) |
| 46 | return false; |
| 47 | } else if (!pte_present(*pvmw->pte)) |
| 48 | return false; |
| 49 | } |
| 50 | } |
| 51 | pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); |
| 52 | spin_lock(pvmw->ptl); |
| 53 | return true; |
| 54 | } |
| 55 | |
| 56 | /** |
| 57 | * check_pte - check if @pvmw->page is mapped at the @pvmw->pte |
| 58 | * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking |
| 59 | * |
| 60 | * page_vma_mapped_walk() found a place where @pvmw->page is *potentially* |
| 61 | * mapped. check_pte() has to validate this. |
| 62 | * |
| 63 | * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to |
| 64 | * arbitrary page. |
| 65 | * |
| 66 | * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration |
| 67 | * entry that points to @pvmw->page or any subpage in case of THP. |
| 68 | * |
| 69 | * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to |
| 70 | * pvmw->page or any subpage in case of THP. |
| 71 | * |
| 72 | * Otherwise, return false. |
| 73 | * |
| 74 | */ |
| 75 | static bool check_pte(struct page_vma_mapped_walk *pvmw) |
| 76 | { |
| 77 | unsigned long pfn; |
| 78 | |
| 79 | if (pvmw->flags & PVMW_MIGRATION) { |
| 80 | swp_entry_t entry; |
| 81 | if (!is_swap_pte(*pvmw->pte)) |
| 82 | return false; |
| 83 | entry = pte_to_swp_entry(*pvmw->pte); |
| 84 | |
| 85 | if (!is_migration_entry(entry) && |
| 86 | !is_device_exclusive_entry(entry)) |
| 87 | return false; |
| 88 | |
| 89 | pfn = swp_offset_pfn(entry); |
| 90 | } else if (is_swap_pte(*pvmw->pte)) { |
| 91 | swp_entry_t entry; |
| 92 | |
| 93 | /* Handle un-addressable ZONE_DEVICE memory */ |
| 94 | entry = pte_to_swp_entry(*pvmw->pte); |
| 95 | if (!is_device_private_entry(entry) && |
| 96 | !is_device_exclusive_entry(entry)) |
| 97 | return false; |
| 98 | |
| 99 | pfn = swp_offset_pfn(entry); |
| 100 | } else { |
| 101 | if (!pte_present(*pvmw->pte)) |
| 102 | return false; |
| 103 | |
| 104 | pfn = pte_pfn(*pvmw->pte); |
| 105 | } |
| 106 | |
| 107 | return (pfn - pvmw->pfn) < pvmw->nr_pages; |
| 108 | } |
| 109 | |
| 110 | /* Returns true if the two ranges overlap. Careful to not overflow. */ |
| 111 | static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw) |
| 112 | { |
| 113 | if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn) |
| 114 | return false; |
| 115 | if (pfn > pvmw->pfn + pvmw->nr_pages - 1) |
| 116 | return false; |
| 117 | return true; |
| 118 | } |
| 119 | |
| 120 | static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size) |
| 121 | { |
| 122 | pvmw->address = (pvmw->address + size) & ~(size - 1); |
| 123 | if (!pvmw->address) |
| 124 | pvmw->address = ULONG_MAX; |
| 125 | } |
| 126 | |
| 127 | /** |
| 128 | * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at |
| 129 | * @pvmw->address |
| 130 | * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags |
| 131 | * must be set. pmd, pte and ptl must be NULL. |
| 132 | * |
| 133 | * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point |
| 134 | * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is |
| 135 | * adjusted if needed (for PTE-mapped THPs). |
| 136 | * |
| 137 | * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page |
| 138 | * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in |
| 139 | * a loop to find all PTEs that map the THP. |
| 140 | * |
| 141 | * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry |
| 142 | * regardless of which page table level the page is mapped at. @pvmw->pmd is |
| 143 | * NULL. |
| 144 | * |
| 145 | * Returns false if there are no more page table entries for the page in |
| 146 | * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped. |
| 147 | * |
| 148 | * If you need to stop the walk before page_vma_mapped_walk() returned false, |
| 149 | * use page_vma_mapped_walk_done(). It will do the housekeeping. |
| 150 | */ |
| 151 | bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) |
| 152 | { |
| 153 | struct vm_area_struct *vma = pvmw->vma; |
| 154 | struct mm_struct *mm = vma->vm_mm; |
| 155 | unsigned long end; |
| 156 | pgd_t *pgd; |
| 157 | p4d_t *p4d; |
| 158 | pud_t *pud; |
| 159 | pmd_t pmde; |
| 160 | |
| 161 | /* The only possible pmd mapping has been handled on last iteration */ |
| 162 | if (pvmw->pmd && !pvmw->pte) |
| 163 | return not_found(pvmw); |
| 164 | |
| 165 | if (unlikely(is_vm_hugetlb_page(vma))) { |
| 166 | struct hstate *hstate = hstate_vma(vma); |
| 167 | unsigned long size = huge_page_size(hstate); |
| 168 | /* The only possible mapping was handled on last iteration */ |
| 169 | if (pvmw->pte) |
| 170 | return not_found(pvmw); |
| 171 | |
| 172 | /* when pud is not present, pte will be NULL */ |
| 173 | pvmw->pte = huge_pte_offset(mm, pvmw->address, size); |
| 174 | if (!pvmw->pte) |
| 175 | return false; |
| 176 | |
| 177 | pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte); |
| 178 | if (!check_pte(pvmw)) |
| 179 | return not_found(pvmw); |
| 180 | return true; |
| 181 | } |
| 182 | |
| 183 | end = vma_address_end(pvmw); |
| 184 | if (pvmw->pte) |
| 185 | goto next_pte; |
| 186 | restart: |
| 187 | do { |
| 188 | pgd = pgd_offset(mm, pvmw->address); |
| 189 | if (!pgd_present(*pgd)) { |
| 190 | step_forward(pvmw, PGDIR_SIZE); |
| 191 | continue; |
| 192 | } |
| 193 | p4d = p4d_offset(pgd, pvmw->address); |
| 194 | if (!p4d_present(*p4d)) { |
| 195 | step_forward(pvmw, P4D_SIZE); |
| 196 | continue; |
| 197 | } |
| 198 | pud = pud_offset(p4d, pvmw->address); |
| 199 | if (!pud_present(*pud)) { |
| 200 | step_forward(pvmw, PUD_SIZE); |
| 201 | continue; |
| 202 | } |
| 203 | |
| 204 | pvmw->pmd = pmd_offset(pud, pvmw->address); |
| 205 | /* |
| 206 | * Make sure the pmd value isn't cached in a register by the |
| 207 | * compiler and used as a stale value after we've observed a |
| 208 | * subsequent update. |
| 209 | */ |
| 210 | pmde = READ_ONCE(*pvmw->pmd); |
| 211 | |
| 212 | if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) || |
| 213 | (pmd_present(pmde) && pmd_devmap(pmde))) { |
| 214 | pvmw->ptl = pmd_lock(mm, pvmw->pmd); |
| 215 | pmde = *pvmw->pmd; |
| 216 | if (!pmd_present(pmde)) { |
| 217 | swp_entry_t entry; |
| 218 | |
| 219 | if (!thp_migration_supported() || |
| 220 | !(pvmw->flags & PVMW_MIGRATION)) |
| 221 | return not_found(pvmw); |
| 222 | entry = pmd_to_swp_entry(pmde); |
| 223 | if (!is_migration_entry(entry) || |
| 224 | !check_pmd(swp_offset_pfn(entry), pvmw)) |
| 225 | return not_found(pvmw); |
| 226 | return true; |
| 227 | } |
| 228 | if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) { |
| 229 | if (pvmw->flags & PVMW_MIGRATION) |
| 230 | return not_found(pvmw); |
| 231 | if (!check_pmd(pmd_pfn(pmde), pvmw)) |
| 232 | return not_found(pvmw); |
| 233 | return true; |
| 234 | } |
| 235 | /* THP pmd was split under us: handle on pte level */ |
| 236 | spin_unlock(pvmw->ptl); |
| 237 | pvmw->ptl = NULL; |
| 238 | } else if (!pmd_present(pmde)) { |
| 239 | /* |
| 240 | * If PVMW_SYNC, take and drop THP pmd lock so that we |
| 241 | * cannot return prematurely, while zap_huge_pmd() has |
| 242 | * cleared *pmd but not decremented compound_mapcount(). |
| 243 | */ |
| 244 | if ((pvmw->flags & PVMW_SYNC) && |
| 245 | transhuge_vma_suitable(vma, pvmw->address) && |
| 246 | (pvmw->nr_pages >= HPAGE_PMD_NR)) { |
| 247 | spinlock_t *ptl = pmd_lock(mm, pvmw->pmd); |
| 248 | |
| 249 | spin_unlock(ptl); |
| 250 | } |
| 251 | step_forward(pvmw, PMD_SIZE); |
| 252 | continue; |
| 253 | } |
| 254 | if (!map_pte(pvmw)) |
| 255 | goto next_pte; |
| 256 | this_pte: |
| 257 | if (check_pte(pvmw)) |
| 258 | return true; |
| 259 | next_pte: |
| 260 | do { |
| 261 | pvmw->address += PAGE_SIZE; |
| 262 | if (pvmw->address >= end) |
| 263 | return not_found(pvmw); |
| 264 | /* Did we cross page table boundary? */ |
| 265 | if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) { |
| 266 | if (pvmw->ptl) { |
| 267 | spin_unlock(pvmw->ptl); |
| 268 | pvmw->ptl = NULL; |
| 269 | } |
| 270 | pte_unmap(pvmw->pte); |
| 271 | pvmw->pte = NULL; |
| 272 | goto restart; |
| 273 | } |
| 274 | pvmw->pte++; |
| 275 | if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) { |
| 276 | pvmw->ptl = pte_lockptr(mm, pvmw->pmd); |
| 277 | spin_lock(pvmw->ptl); |
| 278 | } |
| 279 | } while (pte_none(*pvmw->pte)); |
| 280 | |
| 281 | if (!pvmw->ptl) { |
| 282 | pvmw->ptl = pte_lockptr(mm, pvmw->pmd); |
| 283 | spin_lock(pvmw->ptl); |
| 284 | } |
| 285 | goto this_pte; |
| 286 | } while (pvmw->address < end); |
| 287 | |
| 288 | return false; |
| 289 | } |
| 290 | |
| 291 | /** |
| 292 | * page_mapped_in_vma - check whether a page is really mapped in a VMA |
| 293 | * @page: the page to test |
| 294 | * @vma: the VMA to test |
| 295 | * |
| 296 | * Returns 1 if the page is mapped into the page tables of the VMA, 0 |
| 297 | * if the page is not mapped into the page tables of this VMA. Only |
| 298 | * valid for normal file or anonymous VMAs. |
| 299 | */ |
| 300 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) |
| 301 | { |
| 302 | struct page_vma_mapped_walk pvmw = { |
| 303 | .pfn = page_to_pfn(page), |
| 304 | .nr_pages = 1, |
| 305 | .vma = vma, |
| 306 | .flags = PVMW_SYNC, |
| 307 | }; |
| 308 | |
| 309 | pvmw.address = vma_address(page, vma); |
| 310 | if (pvmw.address == -EFAULT) |
| 311 | return 0; |
| 312 | if (!page_vma_mapped_walk(&pvmw)) |
| 313 | return 0; |
| 314 | page_vma_mapped_walk_done(&pvmw); |
| 315 | return 1; |
| 316 | } |