| 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | /* |
| 3 | * Copyright 2013 Red Hat Inc. |
| 4 | * |
| 5 | * Authors: Jérôme Glisse <jglisse@redhat.com> |
| 6 | */ |
| 7 | /* |
| 8 | * Refer to include/linux/hmm.h for information about heterogeneous memory |
| 9 | * management or HMM for short. |
| 10 | */ |
| 11 | #include <linux/pagewalk.h> |
| 12 | #include <linux/hmm.h> |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/rmap.h> |
| 15 | #include <linux/swap.h> |
| 16 | #include <linux/slab.h> |
| 17 | #include <linux/sched.h> |
| 18 | #include <linux/mmzone.h> |
| 19 | #include <linux/pagemap.h> |
| 20 | #include <linux/swapops.h> |
| 21 | #include <linux/hugetlb.h> |
| 22 | #include <linux/memremap.h> |
| 23 | #include <linux/sched/mm.h> |
| 24 | #include <linux/jump_label.h> |
| 25 | #include <linux/dma-mapping.h> |
| 26 | #include <linux/mmu_notifier.h> |
| 27 | #include <linux/memory_hotplug.h> |
| 28 | |
| 29 | #include "internal.h" |
| 30 | |
| 31 | struct hmm_vma_walk { |
| 32 | struct hmm_range *range; |
| 33 | unsigned long last; |
| 34 | }; |
| 35 | |
| 36 | enum { |
| 37 | HMM_NEED_FAULT = 1 << 0, |
| 38 | HMM_NEED_WRITE_FAULT = 1 << 1, |
| 39 | HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT, |
| 40 | }; |
| 41 | |
| 42 | static int hmm_pfns_fill(unsigned long addr, unsigned long end, |
| 43 | struct hmm_range *range, unsigned long cpu_flags) |
| 44 | { |
| 45 | unsigned long i = (addr - range->start) >> PAGE_SHIFT; |
| 46 | |
| 47 | for (; addr < end; addr += PAGE_SIZE, i++) |
| 48 | range->hmm_pfns[i] = cpu_flags; |
| 49 | return 0; |
| 50 | } |
| 51 | |
| 52 | /* |
| 53 | * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s) |
| 54 | * @addr: range virtual start address (inclusive) |
| 55 | * @end: range virtual end address (exclusive) |
| 56 | * @required_fault: HMM_NEED_* flags |
| 57 | * @walk: mm_walk structure |
| 58 | * Return: -EBUSY after page fault, or page fault error |
| 59 | * |
| 60 | * This function will be called whenever pmd_none() or pte_none() returns true, |
| 61 | * or whenever there is no page directory covering the virtual address range. |
| 62 | */ |
| 63 | static int hmm_vma_fault(unsigned long addr, unsigned long end, |
| 64 | unsigned int required_fault, struct mm_walk *walk) |
| 65 | { |
| 66 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 67 | struct vm_area_struct *vma = walk->vma; |
| 68 | unsigned int fault_flags = FAULT_FLAG_REMOTE; |
| 69 | |
| 70 | WARN_ON_ONCE(!required_fault); |
| 71 | hmm_vma_walk->last = addr; |
| 72 | |
| 73 | if (required_fault & HMM_NEED_WRITE_FAULT) { |
| 74 | if (!(vma->vm_flags & VM_WRITE)) |
| 75 | return -EPERM; |
| 76 | fault_flags |= FAULT_FLAG_WRITE; |
| 77 | } |
| 78 | |
| 79 | for (; addr < end; addr += PAGE_SIZE) |
| 80 | if (handle_mm_fault(vma, addr, fault_flags, NULL) & |
| 81 | VM_FAULT_ERROR) |
| 82 | return -EFAULT; |
| 83 | return -EBUSY; |
| 84 | } |
| 85 | |
| 86 | static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, |
| 87 | unsigned long pfn_req_flags, |
| 88 | unsigned long cpu_flags) |
| 89 | { |
| 90 | struct hmm_range *range = hmm_vma_walk->range; |
| 91 | |
| 92 | /* |
| 93 | * So we not only consider the individual per page request we also |
| 94 | * consider the default flags requested for the range. The API can |
| 95 | * be used 2 ways. The first one where the HMM user coalesces |
| 96 | * multiple page faults into one request and sets flags per pfn for |
| 97 | * those faults. The second one where the HMM user wants to pre- |
| 98 | * fault a range with specific flags. For the latter one it is a |
| 99 | * waste to have the user pre-fill the pfn arrays with a default |
| 100 | * flags value. |
| 101 | */ |
| 102 | pfn_req_flags &= range->pfn_flags_mask; |
| 103 | pfn_req_flags |= range->default_flags; |
| 104 | |
| 105 | /* We aren't ask to do anything ... */ |
| 106 | if (!(pfn_req_flags & HMM_PFN_REQ_FAULT)) |
| 107 | return 0; |
| 108 | |
| 109 | /* Need to write fault ? */ |
| 110 | if ((pfn_req_flags & HMM_PFN_REQ_WRITE) && |
| 111 | !(cpu_flags & HMM_PFN_WRITE)) |
| 112 | return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT; |
| 113 | |
| 114 | /* If CPU page table is not valid then we need to fault */ |
| 115 | if (!(cpu_flags & HMM_PFN_VALID)) |
| 116 | return HMM_NEED_FAULT; |
| 117 | return 0; |
| 118 | } |
| 119 | |
| 120 | static unsigned int |
| 121 | hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, |
| 122 | const unsigned long hmm_pfns[], unsigned long npages, |
| 123 | unsigned long cpu_flags) |
| 124 | { |
| 125 | struct hmm_range *range = hmm_vma_walk->range; |
| 126 | unsigned int required_fault = 0; |
| 127 | unsigned long i; |
| 128 | |
| 129 | /* |
| 130 | * If the default flags do not request to fault pages, and the mask does |
| 131 | * not allow for individual pages to be faulted, then |
| 132 | * hmm_pte_need_fault() will always return 0. |
| 133 | */ |
| 134 | if (!((range->default_flags | range->pfn_flags_mask) & |
| 135 | HMM_PFN_REQ_FAULT)) |
| 136 | return 0; |
| 137 | |
| 138 | for (i = 0; i < npages; ++i) { |
| 139 | required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i], |
| 140 | cpu_flags); |
| 141 | if (required_fault == HMM_NEED_ALL_BITS) |
| 142 | return required_fault; |
| 143 | } |
| 144 | return required_fault; |
| 145 | } |
| 146 | |
| 147 | static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, |
| 148 | __always_unused int depth, struct mm_walk *walk) |
| 149 | { |
| 150 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 151 | struct hmm_range *range = hmm_vma_walk->range; |
| 152 | unsigned int required_fault; |
| 153 | unsigned long i, npages; |
| 154 | unsigned long *hmm_pfns; |
| 155 | |
| 156 | i = (addr - range->start) >> PAGE_SHIFT; |
| 157 | npages = (end - addr) >> PAGE_SHIFT; |
| 158 | hmm_pfns = &range->hmm_pfns[i]; |
| 159 | required_fault = |
| 160 | hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0); |
| 161 | if (!walk->vma) { |
| 162 | if (required_fault) |
| 163 | return -EFAULT; |
| 164 | return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR); |
| 165 | } |
| 166 | if (required_fault) |
| 167 | return hmm_vma_fault(addr, end, required_fault, walk); |
| 168 | return hmm_pfns_fill(addr, end, range, 0); |
| 169 | } |
| 170 | |
| 171 | static inline unsigned long hmm_pfn_flags_order(unsigned long order) |
| 172 | { |
| 173 | return order << HMM_PFN_ORDER_SHIFT; |
| 174 | } |
| 175 | |
| 176 | static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range, |
| 177 | pmd_t pmd) |
| 178 | { |
| 179 | if (pmd_protnone(pmd)) |
| 180 | return 0; |
| 181 | return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : |
| 182 | HMM_PFN_VALID) | |
| 183 | hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT); |
| 184 | } |
| 185 | |
| 186 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 187 | static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, |
| 188 | unsigned long end, unsigned long hmm_pfns[], |
| 189 | pmd_t pmd) |
| 190 | { |
| 191 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 192 | struct hmm_range *range = hmm_vma_walk->range; |
| 193 | unsigned long pfn, npages, i; |
| 194 | unsigned int required_fault; |
| 195 | unsigned long cpu_flags; |
| 196 | |
| 197 | npages = (end - addr) >> PAGE_SHIFT; |
| 198 | cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); |
| 199 | required_fault = |
| 200 | hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags); |
| 201 | if (required_fault) |
| 202 | return hmm_vma_fault(addr, end, required_fault, walk); |
| 203 | |
| 204 | pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
| 205 | for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) |
| 206 | hmm_pfns[i] = pfn | cpu_flags; |
| 207 | return 0; |
| 208 | } |
| 209 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 210 | /* stub to allow the code below to compile */ |
| 211 | int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, |
| 212 | unsigned long end, unsigned long hmm_pfns[], pmd_t pmd); |
| 213 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 214 | |
| 215 | static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range, |
| 216 | pte_t pte) |
| 217 | { |
| 218 | if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte)) |
| 219 | return 0; |
| 220 | return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID; |
| 221 | } |
| 222 | |
| 223 | static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, |
| 224 | unsigned long end, pmd_t *pmdp, pte_t *ptep, |
| 225 | unsigned long *hmm_pfn) |
| 226 | { |
| 227 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 228 | struct hmm_range *range = hmm_vma_walk->range; |
| 229 | unsigned int required_fault; |
| 230 | unsigned long cpu_flags; |
| 231 | pte_t pte = *ptep; |
| 232 | uint64_t pfn_req_flags = *hmm_pfn; |
| 233 | |
| 234 | if (pte_none_mostly(pte)) { |
| 235 | required_fault = |
| 236 | hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0); |
| 237 | if (required_fault) |
| 238 | goto fault; |
| 239 | *hmm_pfn = 0; |
| 240 | return 0; |
| 241 | } |
| 242 | |
| 243 | if (!pte_present(pte)) { |
| 244 | swp_entry_t entry = pte_to_swp_entry(pte); |
| 245 | |
| 246 | /* |
| 247 | * Don't fault in device private pages owned by the caller, |
| 248 | * just report the PFN. |
| 249 | */ |
| 250 | if (is_device_private_entry(entry) && |
| 251 | pfn_swap_entry_to_page(entry)->pgmap->owner == |
| 252 | range->dev_private_owner) { |
| 253 | cpu_flags = HMM_PFN_VALID; |
| 254 | if (is_writable_device_private_entry(entry)) |
| 255 | cpu_flags |= HMM_PFN_WRITE; |
| 256 | *hmm_pfn = swp_offset_pfn(entry) | cpu_flags; |
| 257 | return 0; |
| 258 | } |
| 259 | |
| 260 | required_fault = |
| 261 | hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0); |
| 262 | if (!required_fault) { |
| 263 | *hmm_pfn = 0; |
| 264 | return 0; |
| 265 | } |
| 266 | |
| 267 | if (!non_swap_entry(entry)) |
| 268 | goto fault; |
| 269 | |
| 270 | if (is_device_private_entry(entry)) |
| 271 | goto fault; |
| 272 | |
| 273 | if (is_device_exclusive_entry(entry)) |
| 274 | goto fault; |
| 275 | |
| 276 | if (is_migration_entry(entry)) { |
| 277 | pte_unmap(ptep); |
| 278 | hmm_vma_walk->last = addr; |
| 279 | migration_entry_wait(walk->mm, pmdp, addr); |
| 280 | return -EBUSY; |
| 281 | } |
| 282 | |
| 283 | /* Report error for everything else */ |
| 284 | pte_unmap(ptep); |
| 285 | return -EFAULT; |
| 286 | } |
| 287 | |
| 288 | cpu_flags = pte_to_hmm_pfn_flags(range, pte); |
| 289 | required_fault = |
| 290 | hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); |
| 291 | if (required_fault) |
| 292 | goto fault; |
| 293 | |
| 294 | /* |
| 295 | * Bypass devmap pte such as DAX page when all pfn requested |
| 296 | * flags(pfn_req_flags) are fulfilled. |
| 297 | * Since each architecture defines a struct page for the zero page, just |
| 298 | * fall through and treat it like a normal page. |
| 299 | */ |
| 300 | if (!vm_normal_page(walk->vma, addr, pte) && |
| 301 | !pte_devmap(pte) && |
| 302 | !is_zero_pfn(pte_pfn(pte))) { |
| 303 | if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) { |
| 304 | pte_unmap(ptep); |
| 305 | return -EFAULT; |
| 306 | } |
| 307 | *hmm_pfn = HMM_PFN_ERROR; |
| 308 | return 0; |
| 309 | } |
| 310 | |
| 311 | *hmm_pfn = pte_pfn(pte) | cpu_flags; |
| 312 | return 0; |
| 313 | |
| 314 | fault: |
| 315 | pte_unmap(ptep); |
| 316 | /* Fault any virtual address we were asked to fault */ |
| 317 | return hmm_vma_fault(addr, end, required_fault, walk); |
| 318 | } |
| 319 | |
| 320 | static int hmm_vma_walk_pmd(pmd_t *pmdp, |
| 321 | unsigned long start, |
| 322 | unsigned long end, |
| 323 | struct mm_walk *walk) |
| 324 | { |
| 325 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 326 | struct hmm_range *range = hmm_vma_walk->range; |
| 327 | unsigned long *hmm_pfns = |
| 328 | &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT]; |
| 329 | unsigned long npages = (end - start) >> PAGE_SHIFT; |
| 330 | unsigned long addr = start; |
| 331 | pte_t *ptep; |
| 332 | pmd_t pmd; |
| 333 | |
| 334 | again: |
| 335 | pmd = READ_ONCE(*pmdp); |
| 336 | if (pmd_none(pmd)) |
| 337 | return hmm_vma_walk_hole(start, end, -1, walk); |
| 338 | |
| 339 | if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { |
| 340 | if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) { |
| 341 | hmm_vma_walk->last = addr; |
| 342 | pmd_migration_entry_wait(walk->mm, pmdp); |
| 343 | return -EBUSY; |
| 344 | } |
| 345 | return hmm_pfns_fill(start, end, range, 0); |
| 346 | } |
| 347 | |
| 348 | if (!pmd_present(pmd)) { |
| 349 | if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) |
| 350 | return -EFAULT; |
| 351 | return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); |
| 352 | } |
| 353 | |
| 354 | if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { |
| 355 | /* |
| 356 | * No need to take pmd_lock here, even if some other thread |
| 357 | * is splitting the huge pmd we will get that event through |
| 358 | * mmu_notifier callback. |
| 359 | * |
| 360 | * So just read pmd value and check again it's a transparent |
| 361 | * huge or device mapping one and compute corresponding pfn |
| 362 | * values. |
| 363 | */ |
| 364 | pmd = pmdp_get_lockless(pmdp); |
| 365 | if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) |
| 366 | goto again; |
| 367 | |
| 368 | return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd); |
| 369 | } |
| 370 | |
| 371 | /* |
| 372 | * We have handled all the valid cases above ie either none, migration, |
| 373 | * huge or transparent huge. At this point either it is a valid pmd |
| 374 | * entry pointing to pte directory or it is a bad pmd that will not |
| 375 | * recover. |
| 376 | */ |
| 377 | if (pmd_bad(pmd)) { |
| 378 | if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) |
| 379 | return -EFAULT; |
| 380 | return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); |
| 381 | } |
| 382 | |
| 383 | ptep = pte_offset_map(pmdp, addr); |
| 384 | for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) { |
| 385 | int r; |
| 386 | |
| 387 | r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns); |
| 388 | if (r) { |
| 389 | /* hmm_vma_handle_pte() did pte_unmap() */ |
| 390 | return r; |
| 391 | } |
| 392 | } |
| 393 | pte_unmap(ptep - 1); |
| 394 | return 0; |
| 395 | } |
| 396 | |
| 397 | #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \ |
| 398 | defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) |
| 399 | static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range, |
| 400 | pud_t pud) |
| 401 | { |
| 402 | if (!pud_present(pud)) |
| 403 | return 0; |
| 404 | return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : |
| 405 | HMM_PFN_VALID) | |
| 406 | hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT); |
| 407 | } |
| 408 | |
| 409 | static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, |
| 410 | struct mm_walk *walk) |
| 411 | { |
| 412 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 413 | struct hmm_range *range = hmm_vma_walk->range; |
| 414 | unsigned long addr = start; |
| 415 | pud_t pud; |
| 416 | spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma); |
| 417 | |
| 418 | if (!ptl) |
| 419 | return 0; |
| 420 | |
| 421 | /* Normally we don't want to split the huge page */ |
| 422 | walk->action = ACTION_CONTINUE; |
| 423 | |
| 424 | pud = READ_ONCE(*pudp); |
| 425 | if (pud_none(pud)) { |
| 426 | spin_unlock(ptl); |
| 427 | return hmm_vma_walk_hole(start, end, -1, walk); |
| 428 | } |
| 429 | |
| 430 | if (pud_huge(pud) && pud_devmap(pud)) { |
| 431 | unsigned long i, npages, pfn; |
| 432 | unsigned int required_fault; |
| 433 | unsigned long *hmm_pfns; |
| 434 | unsigned long cpu_flags; |
| 435 | |
| 436 | if (!pud_present(pud)) { |
| 437 | spin_unlock(ptl); |
| 438 | return hmm_vma_walk_hole(start, end, -1, walk); |
| 439 | } |
| 440 | |
| 441 | i = (addr - range->start) >> PAGE_SHIFT; |
| 442 | npages = (end - addr) >> PAGE_SHIFT; |
| 443 | hmm_pfns = &range->hmm_pfns[i]; |
| 444 | |
| 445 | cpu_flags = pud_to_hmm_pfn_flags(range, pud); |
| 446 | required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns, |
| 447 | npages, cpu_flags); |
| 448 | if (required_fault) { |
| 449 | spin_unlock(ptl); |
| 450 | return hmm_vma_fault(addr, end, required_fault, walk); |
| 451 | } |
| 452 | |
| 453 | pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); |
| 454 | for (i = 0; i < npages; ++i, ++pfn) |
| 455 | hmm_pfns[i] = pfn | cpu_flags; |
| 456 | goto out_unlock; |
| 457 | } |
| 458 | |
| 459 | /* Ask for the PUD to be split */ |
| 460 | walk->action = ACTION_SUBTREE; |
| 461 | |
| 462 | out_unlock: |
| 463 | spin_unlock(ptl); |
| 464 | return 0; |
| 465 | } |
| 466 | #else |
| 467 | #define hmm_vma_walk_pud NULL |
| 468 | #endif |
| 469 | |
| 470 | #ifdef CONFIG_HUGETLB_PAGE |
| 471 | static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, |
| 472 | unsigned long start, unsigned long end, |
| 473 | struct mm_walk *walk) |
| 474 | { |
| 475 | unsigned long addr = start, i, pfn; |
| 476 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 477 | struct hmm_range *range = hmm_vma_walk->range; |
| 478 | struct vm_area_struct *vma = walk->vma; |
| 479 | unsigned int required_fault; |
| 480 | unsigned long pfn_req_flags; |
| 481 | unsigned long cpu_flags; |
| 482 | spinlock_t *ptl; |
| 483 | pte_t entry; |
| 484 | |
| 485 | ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); |
| 486 | entry = huge_ptep_get(pte); |
| 487 | |
| 488 | i = (start - range->start) >> PAGE_SHIFT; |
| 489 | pfn_req_flags = range->hmm_pfns[i]; |
| 490 | cpu_flags = pte_to_hmm_pfn_flags(range, entry) | |
| 491 | hmm_pfn_flags_order(huge_page_order(hstate_vma(vma))); |
| 492 | required_fault = |
| 493 | hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); |
| 494 | if (required_fault) { |
| 495 | int ret; |
| 496 | |
| 497 | spin_unlock(ptl); |
| 498 | hugetlb_vma_unlock_read(vma); |
| 499 | /* |
| 500 | * Avoid deadlock: drop the vma lock before calling |
| 501 | * hmm_vma_fault(), which will itself potentially take and |
| 502 | * drop the vma lock. This is also correct from a |
| 503 | * protection point of view, because there is no further |
| 504 | * use here of either pte or ptl after dropping the vma |
| 505 | * lock. |
| 506 | */ |
| 507 | ret = hmm_vma_fault(addr, end, required_fault, walk); |
| 508 | hugetlb_vma_lock_read(vma); |
| 509 | return ret; |
| 510 | } |
| 511 | |
| 512 | pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); |
| 513 | for (; addr < end; addr += PAGE_SIZE, i++, pfn++) |
| 514 | range->hmm_pfns[i] = pfn | cpu_flags; |
| 515 | |
| 516 | spin_unlock(ptl); |
| 517 | return 0; |
| 518 | } |
| 519 | #else |
| 520 | #define hmm_vma_walk_hugetlb_entry NULL |
| 521 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 522 | |
| 523 | static int hmm_vma_walk_test(unsigned long start, unsigned long end, |
| 524 | struct mm_walk *walk) |
| 525 | { |
| 526 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 527 | struct hmm_range *range = hmm_vma_walk->range; |
| 528 | struct vm_area_struct *vma = walk->vma; |
| 529 | |
| 530 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)) && |
| 531 | vma->vm_flags & VM_READ) |
| 532 | return 0; |
| 533 | |
| 534 | /* |
| 535 | * vma ranges that don't have struct page backing them or map I/O |
| 536 | * devices directly cannot be handled by hmm_range_fault(). |
| 537 | * |
| 538 | * If the vma does not allow read access, then assume that it does not |
| 539 | * allow write access either. HMM does not support architectures that |
| 540 | * allow write without read. |
| 541 | * |
| 542 | * If a fault is requested for an unsupported range then it is a hard |
| 543 | * failure. |
| 544 | */ |
| 545 | if (hmm_range_need_fault(hmm_vma_walk, |
| 546 | range->hmm_pfns + |
| 547 | ((start - range->start) >> PAGE_SHIFT), |
| 548 | (end - start) >> PAGE_SHIFT, 0)) |
| 549 | return -EFAULT; |
| 550 | |
| 551 | hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); |
| 552 | |
| 553 | /* Skip this vma and continue processing the next vma. */ |
| 554 | return 1; |
| 555 | } |
| 556 | |
| 557 | static const struct mm_walk_ops hmm_walk_ops = { |
| 558 | .pud_entry = hmm_vma_walk_pud, |
| 559 | .pmd_entry = hmm_vma_walk_pmd, |
| 560 | .pte_hole = hmm_vma_walk_hole, |
| 561 | .hugetlb_entry = hmm_vma_walk_hugetlb_entry, |
| 562 | .test_walk = hmm_vma_walk_test, |
| 563 | }; |
| 564 | |
| 565 | /** |
| 566 | * hmm_range_fault - try to fault some address in a virtual address range |
| 567 | * @range: argument structure |
| 568 | * |
| 569 | * Returns 0 on success or one of the following error codes: |
| 570 | * |
| 571 | * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma |
| 572 | * (e.g., device file vma). |
| 573 | * -ENOMEM: Out of memory. |
| 574 | * -EPERM: Invalid permission (e.g., asking for write and range is read |
| 575 | * only). |
| 576 | * -EBUSY: The range has been invalidated and the caller needs to wait for |
| 577 | * the invalidation to finish. |
| 578 | * -EFAULT: A page was requested to be valid and could not be made valid |
| 579 | * ie it has no backing VMA or it is illegal to access |
| 580 | * |
| 581 | * This is similar to get_user_pages(), except that it can read the page tables |
| 582 | * without mutating them (ie causing faults). |
| 583 | */ |
| 584 | int hmm_range_fault(struct hmm_range *range) |
| 585 | { |
| 586 | struct hmm_vma_walk hmm_vma_walk = { |
| 587 | .range = range, |
| 588 | .last = range->start, |
| 589 | }; |
| 590 | struct mm_struct *mm = range->notifier->mm; |
| 591 | int ret; |
| 592 | |
| 593 | mmap_assert_locked(mm); |
| 594 | |
| 595 | do { |
| 596 | /* If range is no longer valid force retry. */ |
| 597 | if (mmu_interval_check_retry(range->notifier, |
| 598 | range->notifier_seq)) |
| 599 | return -EBUSY; |
| 600 | ret = walk_page_range(mm, hmm_vma_walk.last, range->end, |
| 601 | &hmm_walk_ops, &hmm_vma_walk); |
| 602 | /* |
| 603 | * When -EBUSY is returned the loop restarts with |
| 604 | * hmm_vma_walk.last set to an address that has not been stored |
| 605 | * in pfns. All entries < last in the pfn array are set to their |
| 606 | * output, and all >= are still at their input values. |
| 607 | */ |
| 608 | } while (ret == -EBUSY); |
| 609 | return ret; |
| 610 | } |
| 611 | EXPORT_SYMBOL(hmm_range_fault); |