1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2013 Red Hat Inc.
5 * Authors: Jérôme Glisse <jglisse@redhat.com>
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
11 #include <linux/pagewalk.h>
12 #include <linux/hmm.h>
13 #include <linux/init.h>
14 #include <linux/rmap.h>
15 #include <linux/swap.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/mmzone.h>
19 #include <linux/pagemap.h>
20 #include <linux/swapops.h>
21 #include <linux/hugetlb.h>
22 #include <linux/memremap.h>
23 #include <linux/sched/mm.h>
24 #include <linux/jump_label.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/mmu_notifier.h>
27 #include <linux/memory_hotplug.h>
30 struct hmm_range *range;
31 struct dev_pagemap *pgmap;
36 static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
37 bool write_fault, uint64_t *pfn)
39 unsigned int flags = FAULT_FLAG_REMOTE;
40 struct hmm_vma_walk *hmm_vma_walk = walk->private;
41 struct hmm_range *range = hmm_vma_walk->range;
42 struct vm_area_struct *vma = walk->vma;
48 if (hmm_vma_walk->flags & HMM_FAULT_ALLOW_RETRY)
49 flags |= FAULT_FLAG_ALLOW_RETRY;
51 flags |= FAULT_FLAG_WRITE;
53 ret = handle_mm_fault(vma, addr, flags);
54 if (ret & VM_FAULT_RETRY) {
55 /* Note, handle_mm_fault did up_read(&mm->mmap_sem)) */
58 if (ret & VM_FAULT_ERROR)
64 *pfn = range->values[HMM_PFN_ERROR];
68 static int hmm_pfns_fill(unsigned long addr, unsigned long end,
69 struct hmm_range *range, enum hmm_pfn_value_e value)
71 uint64_t *pfns = range->pfns;
74 i = (addr - range->start) >> PAGE_SHIFT;
75 for (; addr < end; addr += PAGE_SIZE, i++)
76 pfns[i] = range->values[value];
82 * hmm_vma_walk_hole_() - handle a range lacking valid pmd or pte(s)
83 * @addr: range virtual start address (inclusive)
84 * @end: range virtual end address (exclusive)
85 * @fault: should we fault or not ?
86 * @write_fault: write fault ?
87 * @walk: mm_walk structure
88 * Return: 0 on success, -EBUSY after page fault, or page fault error
90 * This function will be called whenever pmd_none() or pte_none() returns true,
91 * or whenever there is no page directory covering the virtual address range.
93 static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
94 bool fault, bool write_fault,
97 struct hmm_vma_walk *hmm_vma_walk = walk->private;
98 struct hmm_range *range = hmm_vma_walk->range;
99 uint64_t *pfns = range->pfns;
102 hmm_vma_walk->last = addr;
103 i = (addr - range->start) >> PAGE_SHIFT;
105 if (write_fault && walk->vma && !(walk->vma->vm_flags & VM_WRITE))
108 for (; addr < end; addr += PAGE_SIZE, i++) {
109 pfns[i] = range->values[HMM_PFN_NONE];
110 if (fault || write_fault) {
113 ret = hmm_vma_do_fault(walk, addr, write_fault,
120 return (fault || write_fault) ? -EBUSY : 0;
123 static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
124 uint64_t pfns, uint64_t cpu_flags,
125 bool *fault, bool *write_fault)
127 struct hmm_range *range = hmm_vma_walk->range;
129 if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT)
133 * So we not only consider the individual per page request we also
134 * consider the default flags requested for the range. The API can
135 * be used 2 ways. The first one where the HMM user coalesces
136 * multiple page faults into one request and sets flags per pfn for
137 * those faults. The second one where the HMM user wants to pre-
138 * fault a range with specific flags. For the latter one it is a
139 * waste to have the user pre-fill the pfn arrays with a default
142 pfns = (pfns & range->pfn_flags_mask) | range->default_flags;
144 /* We aren't ask to do anything ... */
145 if (!(pfns & range->flags[HMM_PFN_VALID]))
147 /* If this is device memory then only fault if explicitly requested */
148 if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
149 /* Do we fault on device memory ? */
150 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
151 *write_fault = pfns & range->flags[HMM_PFN_WRITE];
157 /* If CPU page table is not valid then we need to fault */
158 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
159 /* Need to write fault ? */
160 if ((pfns & range->flags[HMM_PFN_WRITE]) &&
161 !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
167 static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
168 const uint64_t *pfns, unsigned long npages,
169 uint64_t cpu_flags, bool *fault,
174 if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) {
175 *fault = *write_fault = false;
179 *fault = *write_fault = false;
180 for (i = 0; i < npages; ++i) {
181 hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
188 static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
189 __always_unused int depth, struct mm_walk *walk)
191 struct hmm_vma_walk *hmm_vma_walk = walk->private;
192 struct hmm_range *range = hmm_vma_walk->range;
193 bool fault, write_fault;
194 unsigned long i, npages;
197 i = (addr - range->start) >> PAGE_SHIFT;
198 npages = (end - addr) >> PAGE_SHIFT;
199 pfns = &range->pfns[i];
200 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
201 0, &fault, &write_fault);
202 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
205 static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
207 if (pmd_protnone(pmd))
209 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
210 range->flags[HMM_PFN_WRITE] :
211 range->flags[HMM_PFN_VALID];
214 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
215 static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
216 unsigned long end, uint64_t *pfns, pmd_t pmd)
218 struct hmm_vma_walk *hmm_vma_walk = walk->private;
219 struct hmm_range *range = hmm_vma_walk->range;
220 unsigned long pfn, npages, i;
221 bool fault, write_fault;
224 npages = (end - addr) >> PAGE_SHIFT;
225 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
226 hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
227 &fault, &write_fault);
229 if (fault || write_fault)
230 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
232 pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
233 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
234 if (pmd_devmap(pmd)) {
235 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
236 hmm_vma_walk->pgmap);
237 if (unlikely(!hmm_vma_walk->pgmap))
240 pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
242 if (hmm_vma_walk->pgmap) {
243 put_dev_pagemap(hmm_vma_walk->pgmap);
244 hmm_vma_walk->pgmap = NULL;
246 hmm_vma_walk->last = end;
249 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
250 /* stub to allow the code below to compile */
251 int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
252 unsigned long end, uint64_t *pfns, pmd_t pmd);
253 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
255 static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
257 if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
259 return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
260 range->flags[HMM_PFN_WRITE] :
261 range->flags[HMM_PFN_VALID];
264 static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
265 unsigned long end, pmd_t *pmdp, pte_t *ptep,
268 struct hmm_vma_walk *hmm_vma_walk = walk->private;
269 struct hmm_range *range = hmm_vma_walk->range;
270 bool fault, write_fault;
273 uint64_t orig_pfn = *pfn;
275 *pfn = range->values[HMM_PFN_NONE];
276 fault = write_fault = false;
279 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0,
280 &fault, &write_fault);
281 if (fault || write_fault)
286 if (!pte_present(pte)) {
287 swp_entry_t entry = pte_to_swp_entry(pte);
290 * This is a special swap entry, ignore migration, use
291 * device and report anything else as error.
293 if (is_device_private_entry(entry)) {
294 cpu_flags = range->flags[HMM_PFN_VALID] |
295 range->flags[HMM_PFN_DEVICE_PRIVATE];
296 cpu_flags |= is_write_device_private_entry(entry) ?
297 range->flags[HMM_PFN_WRITE] : 0;
298 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
299 &fault, &write_fault);
300 if (fault || write_fault)
302 *pfn = hmm_device_entry_from_pfn(range,
308 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, &fault,
310 if (!fault && !write_fault)
313 if (!non_swap_entry(entry))
316 if (is_migration_entry(entry)) {
318 hmm_vma_walk->last = addr;
319 migration_entry_wait(walk->mm, pmdp, addr);
323 /* Report error for everything else */
325 *pfn = range->values[HMM_PFN_ERROR];
329 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
330 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, &fault,
332 if (fault || write_fault)
335 if (pte_devmap(pte)) {
336 hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
337 hmm_vma_walk->pgmap);
338 if (unlikely(!hmm_vma_walk->pgmap)) {
345 * Since each architecture defines a struct page for the zero page, just
346 * fall through and treat it like a normal page.
348 if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) {
349 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, &fault,
351 if (fault || write_fault) {
355 *pfn = range->values[HMM_PFN_SPECIAL];
359 *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
363 if (hmm_vma_walk->pgmap) {
364 put_dev_pagemap(hmm_vma_walk->pgmap);
365 hmm_vma_walk->pgmap = NULL;
368 /* Fault any virtual address we were asked to fault */
369 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
372 static int hmm_vma_walk_pmd(pmd_t *pmdp,
375 struct mm_walk *walk)
377 struct hmm_vma_walk *hmm_vma_walk = walk->private;
378 struct hmm_range *range = hmm_vma_walk->range;
379 uint64_t *pfns = &range->pfns[(start - range->start) >> PAGE_SHIFT];
380 unsigned long npages = (end - start) >> PAGE_SHIFT;
381 unsigned long addr = start;
382 bool fault, write_fault;
387 pmd = READ_ONCE(*pmdp);
389 return hmm_vma_walk_hole(start, end, -1, walk);
391 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
392 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
393 0, &fault, &write_fault);
394 if (fault || write_fault) {
395 hmm_vma_walk->last = addr;
396 pmd_migration_entry_wait(walk->mm, pmdp);
399 return hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
402 if (!pmd_present(pmd)) {
403 hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
405 if (fault || write_fault)
407 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
410 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
412 * No need to take pmd_lock here, even if some other thread
413 * is splitting the huge pmd we will get that event through
414 * mmu_notifier callback.
416 * So just read pmd value and check again it's a transparent
417 * huge or device mapping one and compute corresponding pfn
420 pmd = pmd_read_atomic(pmdp);
422 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
425 return hmm_vma_handle_pmd(walk, addr, end, pfns, pmd);
429 * We have handled all the valid cases above ie either none, migration,
430 * huge or transparent huge. At this point either it is a valid pmd
431 * entry pointing to pte directory or it is a bad pmd that will not
435 hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
437 if (fault || write_fault)
439 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
442 ptep = pte_offset_map(pmdp, addr);
443 for (; addr < end; addr += PAGE_SIZE, ptep++, pfns++) {
446 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, pfns);
448 /* hmm_vma_handle_pte() did pte_unmap() */
449 hmm_vma_walk->last = addr;
453 if (hmm_vma_walk->pgmap) {
455 * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
456 * so that we can leverage get_dev_pagemap() optimization which
457 * will not re-take a reference on a pgmap if we already have
460 put_dev_pagemap(hmm_vma_walk->pgmap);
461 hmm_vma_walk->pgmap = NULL;
465 hmm_vma_walk->last = addr;
469 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
470 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
471 static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
473 if (!pud_present(pud))
475 return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
476 range->flags[HMM_PFN_WRITE] :
477 range->flags[HMM_PFN_VALID];
480 static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
481 struct mm_walk *walk)
483 struct hmm_vma_walk *hmm_vma_walk = walk->private;
484 struct hmm_range *range = hmm_vma_walk->range;
485 unsigned long addr = start;
488 spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
493 /* Normally we don't want to split the huge page */
494 walk->action = ACTION_CONTINUE;
496 pud = READ_ONCE(*pudp);
499 return hmm_vma_walk_hole(start, end, -1, walk);
502 if (pud_huge(pud) && pud_devmap(pud)) {
503 unsigned long i, npages, pfn;
504 uint64_t *pfns, cpu_flags;
505 bool fault, write_fault;
507 if (!pud_present(pud)) {
509 return hmm_vma_walk_hole(start, end, -1, walk);
512 i = (addr - range->start) >> PAGE_SHIFT;
513 npages = (end - addr) >> PAGE_SHIFT;
514 pfns = &range->pfns[i];
516 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
517 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
518 cpu_flags, &fault, &write_fault);
519 if (fault || write_fault) {
521 return hmm_vma_walk_hole_(addr, end, fault, write_fault,
525 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
526 for (i = 0; i < npages; ++i, ++pfn) {
527 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
528 hmm_vma_walk->pgmap);
529 if (unlikely(!hmm_vma_walk->pgmap)) {
533 pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
536 if (hmm_vma_walk->pgmap) {
537 put_dev_pagemap(hmm_vma_walk->pgmap);
538 hmm_vma_walk->pgmap = NULL;
540 hmm_vma_walk->last = end;
544 /* Ask for the PUD to be split */
545 walk->action = ACTION_SUBTREE;
552 #define hmm_vma_walk_pud NULL
555 #ifdef CONFIG_HUGETLB_PAGE
556 static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
557 unsigned long start, unsigned long end,
558 struct mm_walk *walk)
560 unsigned long addr = start, i, pfn;
561 struct hmm_vma_walk *hmm_vma_walk = walk->private;
562 struct hmm_range *range = hmm_vma_walk->range;
563 struct vm_area_struct *vma = walk->vma;
564 uint64_t orig_pfn, cpu_flags;
565 bool fault, write_fault;
570 ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
571 entry = huge_ptep_get(pte);
573 i = (start - range->start) >> PAGE_SHIFT;
574 orig_pfn = range->pfns[i];
575 range->pfns[i] = range->values[HMM_PFN_NONE];
576 cpu_flags = pte_to_hmm_pfn_flags(range, entry);
577 fault = write_fault = false;
578 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
579 &fault, &write_fault);
580 if (fault || write_fault) {
585 pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
586 for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
587 range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
589 hmm_vma_walk->last = end;
595 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
600 #define hmm_vma_walk_hugetlb_entry NULL
601 #endif /* CONFIG_HUGETLB_PAGE */
603 static int hmm_vma_walk_test(unsigned long start, unsigned long end,
604 struct mm_walk *walk)
606 struct hmm_vma_walk *hmm_vma_walk = walk->private;
607 struct hmm_range *range = hmm_vma_walk->range;
608 struct vm_area_struct *vma = walk->vma;
611 * Skip vma ranges that don't have struct page backing them or map I/O
614 * If the vma does not allow read access, then assume that it does not
615 * allow write access either. HMM does not support architectures that
616 * allow write without read.
618 if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) ||
619 !(vma->vm_flags & VM_READ)) {
620 bool fault, write_fault;
623 * Check to see if a fault is requested for any page in the
626 hmm_range_need_fault(hmm_vma_walk, range->pfns +
627 ((start - range->start) >> PAGE_SHIFT),
628 (end - start) >> PAGE_SHIFT,
629 0, &fault, &write_fault);
630 if (fault || write_fault)
633 hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
634 hmm_vma_walk->last = end;
636 /* Skip this vma and continue processing the next vma. */
643 static const struct mm_walk_ops hmm_walk_ops = {
644 .pud_entry = hmm_vma_walk_pud,
645 .pmd_entry = hmm_vma_walk_pmd,
646 .pte_hole = hmm_vma_walk_hole,
647 .hugetlb_entry = hmm_vma_walk_hugetlb_entry,
648 .test_walk = hmm_vma_walk_test,
652 * hmm_range_fault - try to fault some address in a virtual address range
653 * @range: range being faulted
654 * @flags: HMM_FAULT_* flags
656 * Return: the number of valid pages in range->pfns[] (from range start
657 * address), which may be zero. On error one of the following status codes
660 * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma
661 * (e.g., device file vma).
662 * -ENOMEM: Out of memory.
663 * -EPERM: Invalid permission (e.g., asking for write and range is read
665 * -EAGAIN: A page fault needs to be retried and mmap_sem was dropped.
666 * -EBUSY: The range has been invalidated and the caller needs to wait for
667 * the invalidation to finish.
668 * -EFAULT: Invalid (i.e., either no valid vma or it is illegal to access
669 * that range) number of valid pages in range->pfns[] (from
670 * range start address).
672 * This is similar to a regular CPU page fault except that it will not trigger
673 * any memory migration if the memory being faulted is not accessible by CPUs
674 * and caller does not ask for migration.
676 * On error, for one virtual address in the range, the function will mark the
677 * corresponding HMM pfn entry with an error flag.
679 long hmm_range_fault(struct hmm_range *range, unsigned int flags)
681 struct hmm_vma_walk hmm_vma_walk = {
683 .last = range->start,
686 struct mm_struct *mm = range->notifier->mm;
689 lockdep_assert_held(&mm->mmap_sem);
692 /* If range is no longer valid force retry. */
693 if (mmu_interval_check_retry(range->notifier,
694 range->notifier_seq))
696 ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
697 &hmm_walk_ops, &hmm_vma_walk);
698 } while (ret == -EBUSY);
702 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
704 EXPORT_SYMBOL(hmm_range_fault);