1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2013 Red Hat Inc.
5 * Authors: Jérôme Glisse <jglisse@redhat.com>
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
11 #include <linux/pagewalk.h>
12 #include <linux/hmm.h>
13 #include <linux/init.h>
14 #include <linux/rmap.h>
15 #include <linux/swap.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/mmzone.h>
19 #include <linux/pagemap.h>
20 #include <linux/swapops.h>
21 #include <linux/hugetlb.h>
22 #include <linux/memremap.h>
23 #include <linux/sched/mm.h>
24 #include <linux/jump_label.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/mmu_notifier.h>
27 #include <linux/memory_hotplug.h>
30 struct hmm_range *range;
31 struct dev_pagemap *pgmap;
36 static int hmm_pfns_fill(unsigned long addr, unsigned long end,
37 struct hmm_range *range, enum hmm_pfn_value_e value)
39 uint64_t *pfns = range->pfns;
42 i = (addr - range->start) >> PAGE_SHIFT;
43 for (; addr < end; addr += PAGE_SIZE, i++)
44 pfns[i] = range->values[value];
50 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
51 * @addr: range virtual start address (inclusive)
52 * @end: range virtual end address (exclusive)
53 * @fault: should we fault or not ?
54 * @write_fault: write fault ?
55 * @walk: mm_walk structure
56 * Return: -EBUSY after page fault, or page fault error
58 * This function will be called whenever pmd_none() or pte_none() returns true,
59 * or whenever there is no page directory covering the virtual address range.
61 static int hmm_vma_fault(unsigned long addr, unsigned long end,
62 bool fault, bool write_fault,
65 struct hmm_vma_walk *hmm_vma_walk = walk->private;
66 struct hmm_range *range = hmm_vma_walk->range;
67 struct vm_area_struct *vma = walk->vma;
68 uint64_t *pfns = range->pfns;
69 unsigned long i = (addr - range->start) >> PAGE_SHIFT;
70 unsigned int fault_flags = FAULT_FLAG_REMOTE;
72 WARN_ON_ONCE(!fault && !write_fault);
73 hmm_vma_walk->last = addr;
79 if (!(vma->vm_flags & VM_WRITE))
81 fault_flags |= FAULT_FLAG_WRITE;
84 for (; addr < end; addr += PAGE_SIZE, i++)
85 if (handle_mm_fault(vma, addr, fault_flags) & VM_FAULT_ERROR)
91 pfns[i] = range->values[HMM_PFN_ERROR];
95 static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
96 uint64_t pfns, uint64_t cpu_flags,
97 bool *fault, bool *write_fault)
99 struct hmm_range *range = hmm_vma_walk->range;
101 if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT)
105 * So we not only consider the individual per page request we also
106 * consider the default flags requested for the range. The API can
107 * be used 2 ways. The first one where the HMM user coalesces
108 * multiple page faults into one request and sets flags per pfn for
109 * those faults. The second one where the HMM user wants to pre-
110 * fault a range with specific flags. For the latter one it is a
111 * waste to have the user pre-fill the pfn arrays with a default
114 pfns = (pfns & range->pfn_flags_mask) | range->default_flags;
116 /* We aren't ask to do anything ... */
117 if (!(pfns & range->flags[HMM_PFN_VALID]))
120 /* If CPU page table is not valid then we need to fault */
121 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
122 /* Need to write fault ? */
123 if ((pfns & range->flags[HMM_PFN_WRITE]) &&
124 !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
130 static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
131 const uint64_t *pfns, unsigned long npages,
132 uint64_t cpu_flags, bool *fault,
137 if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) {
138 *fault = *write_fault = false;
142 *fault = *write_fault = false;
143 for (i = 0; i < npages; ++i) {
144 hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
151 static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
152 __always_unused int depth, struct mm_walk *walk)
154 struct hmm_vma_walk *hmm_vma_walk = walk->private;
155 struct hmm_range *range = hmm_vma_walk->range;
156 bool fault, write_fault;
157 unsigned long i, npages;
160 i = (addr - range->start) >> PAGE_SHIFT;
161 npages = (end - addr) >> PAGE_SHIFT;
162 pfns = &range->pfns[i];
163 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
164 0, &fault, &write_fault);
165 if (fault || write_fault)
166 return hmm_vma_fault(addr, end, fault, write_fault, walk);
167 hmm_vma_walk->last = addr;
168 return hmm_pfns_fill(addr, end, range, HMM_PFN_NONE);
171 static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
173 if (pmd_protnone(pmd))
175 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
176 range->flags[HMM_PFN_WRITE] :
177 range->flags[HMM_PFN_VALID];
180 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
181 static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
182 unsigned long end, uint64_t *pfns, pmd_t pmd)
184 struct hmm_vma_walk *hmm_vma_walk = walk->private;
185 struct hmm_range *range = hmm_vma_walk->range;
186 unsigned long pfn, npages, i;
187 bool fault, write_fault;
190 npages = (end - addr) >> PAGE_SHIFT;
191 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
192 hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
193 &fault, &write_fault);
195 if (fault || write_fault)
196 return hmm_vma_fault(addr, end, fault, write_fault, walk);
198 pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
199 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
200 if (pmd_devmap(pmd)) {
201 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
202 hmm_vma_walk->pgmap);
203 if (unlikely(!hmm_vma_walk->pgmap))
206 pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
208 if (hmm_vma_walk->pgmap) {
209 put_dev_pagemap(hmm_vma_walk->pgmap);
210 hmm_vma_walk->pgmap = NULL;
212 hmm_vma_walk->last = end;
215 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
216 /* stub to allow the code below to compile */
217 int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
218 unsigned long end, uint64_t *pfns, pmd_t pmd);
219 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
221 static inline bool hmm_is_device_private_entry(struct hmm_range *range,
224 return is_device_private_entry(entry) &&
225 device_private_entry_to_page(entry)->pgmap->owner ==
226 range->dev_private_owner;
229 static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
231 if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
233 return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
234 range->flags[HMM_PFN_WRITE] :
235 range->flags[HMM_PFN_VALID];
238 static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
239 unsigned long end, pmd_t *pmdp, pte_t *ptep,
242 struct hmm_vma_walk *hmm_vma_walk = walk->private;
243 struct hmm_range *range = hmm_vma_walk->range;
244 bool fault, write_fault;
247 uint64_t orig_pfn = *pfn;
249 *pfn = range->values[HMM_PFN_NONE];
250 fault = write_fault = false;
253 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0,
254 &fault, &write_fault);
255 if (fault || write_fault)
260 if (!pte_present(pte)) {
261 swp_entry_t entry = pte_to_swp_entry(pte);
264 * Never fault in device private pages pages, but just report
265 * the PFN even if not present.
267 if (hmm_is_device_private_entry(range, entry)) {
268 *pfn = hmm_device_entry_from_pfn(range,
270 *pfn |= range->flags[HMM_PFN_VALID];
271 if (is_write_device_private_entry(entry))
272 *pfn |= range->flags[HMM_PFN_WRITE];
276 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, &fault,
278 if (!fault && !write_fault)
281 if (!non_swap_entry(entry))
284 if (is_migration_entry(entry)) {
286 hmm_vma_walk->last = addr;
287 migration_entry_wait(walk->mm, pmdp, addr);
291 /* Report error for everything else */
293 *pfn = range->values[HMM_PFN_ERROR];
297 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
298 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, &fault,
300 if (fault || write_fault)
303 if (pte_devmap(pte)) {
304 hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
305 hmm_vma_walk->pgmap);
306 if (unlikely(!hmm_vma_walk->pgmap)) {
313 * Since each architecture defines a struct page for the zero page, just
314 * fall through and treat it like a normal page.
316 if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) {
317 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, &fault,
319 if (fault || write_fault) {
323 *pfn = range->values[HMM_PFN_SPECIAL];
327 *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
331 if (hmm_vma_walk->pgmap) {
332 put_dev_pagemap(hmm_vma_walk->pgmap);
333 hmm_vma_walk->pgmap = NULL;
336 /* Fault any virtual address we were asked to fault */
337 return hmm_vma_fault(addr, end, fault, write_fault, walk);
340 static int hmm_vma_walk_pmd(pmd_t *pmdp,
343 struct mm_walk *walk)
345 struct hmm_vma_walk *hmm_vma_walk = walk->private;
346 struct hmm_range *range = hmm_vma_walk->range;
347 uint64_t *pfns = &range->pfns[(start - range->start) >> PAGE_SHIFT];
348 unsigned long npages = (end - start) >> PAGE_SHIFT;
349 unsigned long addr = start;
350 bool fault, write_fault;
355 pmd = READ_ONCE(*pmdp);
357 return hmm_vma_walk_hole(start, end, -1, walk);
359 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
360 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
361 0, &fault, &write_fault);
362 if (fault || write_fault) {
363 hmm_vma_walk->last = addr;
364 pmd_migration_entry_wait(walk->mm, pmdp);
367 return hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
370 if (!pmd_present(pmd)) {
371 hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
373 if (fault || write_fault)
375 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
378 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
380 * No need to take pmd_lock here, even if some other thread
381 * is splitting the huge pmd we will get that event through
382 * mmu_notifier callback.
384 * So just read pmd value and check again it's a transparent
385 * huge or device mapping one and compute corresponding pfn
388 pmd = pmd_read_atomic(pmdp);
390 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
393 return hmm_vma_handle_pmd(walk, addr, end, pfns, pmd);
397 * We have handled all the valid cases above ie either none, migration,
398 * huge or transparent huge. At this point either it is a valid pmd
399 * entry pointing to pte directory or it is a bad pmd that will not
403 hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
405 if (fault || write_fault)
407 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
410 ptep = pte_offset_map(pmdp, addr);
411 for (; addr < end; addr += PAGE_SIZE, ptep++, pfns++) {
414 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, pfns);
416 /* hmm_vma_handle_pte() did pte_unmap() */
417 hmm_vma_walk->last = addr;
421 if (hmm_vma_walk->pgmap) {
423 * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
424 * so that we can leverage get_dev_pagemap() optimization which
425 * will not re-take a reference on a pgmap if we already have
428 put_dev_pagemap(hmm_vma_walk->pgmap);
429 hmm_vma_walk->pgmap = NULL;
433 hmm_vma_walk->last = addr;
437 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
438 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
439 static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
441 if (!pud_present(pud))
443 return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
444 range->flags[HMM_PFN_WRITE] :
445 range->flags[HMM_PFN_VALID];
448 static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
449 struct mm_walk *walk)
451 struct hmm_vma_walk *hmm_vma_walk = walk->private;
452 struct hmm_range *range = hmm_vma_walk->range;
453 unsigned long addr = start;
456 spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
461 /* Normally we don't want to split the huge page */
462 walk->action = ACTION_CONTINUE;
464 pud = READ_ONCE(*pudp);
467 return hmm_vma_walk_hole(start, end, -1, walk);
470 if (pud_huge(pud) && pud_devmap(pud)) {
471 unsigned long i, npages, pfn;
472 uint64_t *pfns, cpu_flags;
473 bool fault, write_fault;
475 if (!pud_present(pud)) {
477 return hmm_vma_walk_hole(start, end, -1, walk);
480 i = (addr - range->start) >> PAGE_SHIFT;
481 npages = (end - addr) >> PAGE_SHIFT;
482 pfns = &range->pfns[i];
484 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
485 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
486 cpu_flags, &fault, &write_fault);
487 if (fault || write_fault) {
489 return hmm_vma_fault(addr, end, fault, write_fault,
493 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
494 for (i = 0; i < npages; ++i, ++pfn) {
495 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
496 hmm_vma_walk->pgmap);
497 if (unlikely(!hmm_vma_walk->pgmap)) {
501 pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
504 if (hmm_vma_walk->pgmap) {
505 put_dev_pagemap(hmm_vma_walk->pgmap);
506 hmm_vma_walk->pgmap = NULL;
508 hmm_vma_walk->last = end;
512 /* Ask for the PUD to be split */
513 walk->action = ACTION_SUBTREE;
520 #define hmm_vma_walk_pud NULL
523 #ifdef CONFIG_HUGETLB_PAGE
524 static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
525 unsigned long start, unsigned long end,
526 struct mm_walk *walk)
528 unsigned long addr = start, i, pfn;
529 struct hmm_vma_walk *hmm_vma_walk = walk->private;
530 struct hmm_range *range = hmm_vma_walk->range;
531 struct vm_area_struct *vma = walk->vma;
532 uint64_t orig_pfn, cpu_flags;
533 bool fault, write_fault;
537 ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
538 entry = huge_ptep_get(pte);
540 i = (start - range->start) >> PAGE_SHIFT;
541 orig_pfn = range->pfns[i];
542 range->pfns[i] = range->values[HMM_PFN_NONE];
543 cpu_flags = pte_to_hmm_pfn_flags(range, entry);
544 fault = write_fault = false;
545 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
546 &fault, &write_fault);
547 if (fault || write_fault) {
549 return hmm_vma_fault(addr, end, fault, write_fault, walk);
552 pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
553 for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
554 range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
556 hmm_vma_walk->last = end;
561 #define hmm_vma_walk_hugetlb_entry NULL
562 #endif /* CONFIG_HUGETLB_PAGE */
564 static int hmm_vma_walk_test(unsigned long start, unsigned long end,
565 struct mm_walk *walk)
567 struct hmm_vma_walk *hmm_vma_walk = walk->private;
568 struct hmm_range *range = hmm_vma_walk->range;
569 struct vm_area_struct *vma = walk->vma;
572 * Skip vma ranges that don't have struct page backing them or map I/O
575 * If the vma does not allow read access, then assume that it does not
576 * allow write access either. HMM does not support architectures that
577 * allow write without read.
579 if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) ||
580 !(vma->vm_flags & VM_READ)) {
581 bool fault, write_fault;
584 * Check to see if a fault is requested for any page in the
587 hmm_range_need_fault(hmm_vma_walk, range->pfns +
588 ((start - range->start) >> PAGE_SHIFT),
589 (end - start) >> PAGE_SHIFT,
590 0, &fault, &write_fault);
591 if (fault || write_fault)
594 hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
595 hmm_vma_walk->last = end;
597 /* Skip this vma and continue processing the next vma. */
604 static const struct mm_walk_ops hmm_walk_ops = {
605 .pud_entry = hmm_vma_walk_pud,
606 .pmd_entry = hmm_vma_walk_pmd,
607 .pte_hole = hmm_vma_walk_hole,
608 .hugetlb_entry = hmm_vma_walk_hugetlb_entry,
609 .test_walk = hmm_vma_walk_test,
613 * hmm_range_fault - try to fault some address in a virtual address range
614 * @range: range being faulted
615 * @flags: HMM_FAULT_* flags
617 * Return: the number of valid pages in range->pfns[] (from range start
618 * address), which may be zero. On error one of the following status codes
621 * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma
622 * (e.g., device file vma).
623 * -ENOMEM: Out of memory.
624 * -EPERM: Invalid permission (e.g., asking for write and range is read
626 * -EBUSY: The range has been invalidated and the caller needs to wait for
627 * the invalidation to finish.
628 * -EFAULT: Invalid (i.e., either no valid vma or it is illegal to access
629 * that range) number of valid pages in range->pfns[] (from
630 * range start address).
632 * This is similar to a regular CPU page fault except that it will not trigger
633 * any memory migration if the memory being faulted is not accessible by CPUs
634 * and caller does not ask for migration.
636 * On error, for one virtual address in the range, the function will mark the
637 * corresponding HMM pfn entry with an error flag.
639 long hmm_range_fault(struct hmm_range *range, unsigned int flags)
641 struct hmm_vma_walk hmm_vma_walk = {
643 .last = range->start,
646 struct mm_struct *mm = range->notifier->mm;
649 lockdep_assert_held(&mm->mmap_sem);
652 /* If range is no longer valid force retry. */
653 if (mmu_interval_check_retry(range->notifier,
654 range->notifier_seq))
656 ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
657 &hmm_walk_ops, &hmm_vma_walk);
658 } while (ret == -EBUSY);
662 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
664 EXPORT_SYMBOL(hmm_range_fault);