2 * Copyright 2013 Red Hat Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Authors: Jérôme Glisse <jglisse@redhat.com>
17 * Refer to include/linux/hmm.h for information about heterogeneous memory
18 * management or HMM for short.
21 #include <linux/hmm.h>
22 #include <linux/init.h>
23 #include <linux/rmap.h>
24 #include <linux/swap.h>
25 #include <linux/slab.h>
26 #include <linux/sched.h>
27 #include <linux/mmzone.h>
28 #include <linux/pagemap.h>
29 #include <linux/swapops.h>
30 #include <linux/hugetlb.h>
31 #include <linux/memremap.h>
32 #include <linux/jump_label.h>
33 #include <linux/mmu_notifier.h>
34 #include <linux/memory_hotplug.h>
36 #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
38 #if IS_ENABLED(CONFIG_HMM_MIRROR)
39 static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
41 static inline struct hmm *mm_get_hmm(struct mm_struct *mm)
43 struct hmm *hmm = READ_ONCE(mm->hmm);
45 if (hmm && kref_get_unless_zero(&hmm->kref))
52 * hmm_get_or_create - register HMM against an mm (HMM internal)
54 * @mm: mm struct to attach to
55 * Returns: returns an HMM object, either by referencing the existing
56 * (per-process) object, or by creating a new one.
58 * This is not intended to be used directly by device drivers. If mm already
59 * has an HMM struct then it get a reference on it and returns it. Otherwise
60 * it allocates an HMM struct, initializes it, associate it with the mm and
63 static struct hmm *hmm_get_or_create(struct mm_struct *mm)
65 struct hmm *hmm = mm_get_hmm(mm);
71 hmm = kmalloc(sizeof(*hmm), GFP_KERNEL);
74 init_waitqueue_head(&hmm->wq);
75 INIT_LIST_HEAD(&hmm->mirrors);
76 init_rwsem(&hmm->mirrors_sem);
77 hmm->mmu_notifier.ops = NULL;
78 INIT_LIST_HEAD(&hmm->ranges);
79 mutex_init(&hmm->lock);
80 kref_init(&hmm->kref);
85 spin_lock(&mm->page_table_lock);
90 spin_unlock(&mm->page_table_lock);
96 * We should only get here if hold the mmap_sem in write mode ie on
97 * registration of first mirror through hmm_mirror_register()
99 hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
100 if (__mmu_notifier_register(&hmm->mmu_notifier, mm))
106 spin_lock(&mm->page_table_lock);
109 spin_unlock(&mm->page_table_lock);
115 static void hmm_free(struct kref *kref)
117 struct hmm *hmm = container_of(kref, struct hmm, kref);
118 struct mm_struct *mm = hmm->mm;
120 mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm);
122 spin_lock(&mm->page_table_lock);
125 spin_unlock(&mm->page_table_lock);
130 static inline void hmm_put(struct hmm *hmm)
132 kref_put(&hmm->kref, hmm_free);
135 void hmm_mm_destroy(struct mm_struct *mm)
139 spin_lock(&mm->page_table_lock);
140 hmm = mm_get_hmm(mm);
145 spin_unlock(&mm->page_table_lock);
150 spin_unlock(&mm->page_table_lock);
153 static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
155 struct hmm *hmm = mm_get_hmm(mm);
156 struct hmm_mirror *mirror;
157 struct hmm_range *range;
159 /* Report this HMM as dying. */
162 /* Wake-up everyone waiting on any range. */
163 mutex_lock(&hmm->lock);
164 list_for_each_entry(range, &hmm->ranges, list) {
165 range->valid = false;
167 wake_up_all(&hmm->wq);
168 mutex_unlock(&hmm->lock);
170 down_write(&hmm->mirrors_sem);
171 mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror,
174 list_del_init(&mirror->list);
175 if (mirror->ops->release) {
177 * Drop mirrors_sem so callback can wait on any pending
178 * work that might itself trigger mmu_notifier callback
179 * and thus would deadlock with us.
181 up_write(&hmm->mirrors_sem);
182 mirror->ops->release(mirror);
183 down_write(&hmm->mirrors_sem);
185 mirror = list_first_entry_or_null(&hmm->mirrors,
186 struct hmm_mirror, list);
188 up_write(&hmm->mirrors_sem);
193 static int hmm_invalidate_range_start(struct mmu_notifier *mn,
194 const struct mmu_notifier_range *nrange)
196 struct hmm *hmm = mm_get_hmm(nrange->mm);
197 struct hmm_mirror *mirror;
198 struct hmm_update update;
199 struct hmm_range *range;
204 update.start = nrange->start;
205 update.end = nrange->end;
206 update.event = HMM_UPDATE_INVALIDATE;
207 update.blockable = nrange->blockable;
209 if (nrange->blockable)
210 mutex_lock(&hmm->lock);
211 else if (!mutex_trylock(&hmm->lock)) {
216 list_for_each_entry(range, &hmm->ranges, list) {
217 if (update.end < range->start || update.start >= range->end)
220 range->valid = false;
222 mutex_unlock(&hmm->lock);
224 if (nrange->blockable)
225 down_read(&hmm->mirrors_sem);
226 else if (!down_read_trylock(&hmm->mirrors_sem)) {
230 list_for_each_entry(mirror, &hmm->mirrors, list) {
233 ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
234 if (!update.blockable && ret == -EAGAIN) {
235 up_read(&hmm->mirrors_sem);
240 up_read(&hmm->mirrors_sem);
247 static void hmm_invalidate_range_end(struct mmu_notifier *mn,
248 const struct mmu_notifier_range *nrange)
250 struct hmm *hmm = mm_get_hmm(nrange->mm);
254 mutex_lock(&hmm->lock);
256 if (!hmm->notifiers) {
257 struct hmm_range *range;
259 list_for_each_entry(range, &hmm->ranges, list) {
264 wake_up_all(&hmm->wq);
266 mutex_unlock(&hmm->lock);
271 static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
272 .release = hmm_release,
273 .invalidate_range_start = hmm_invalidate_range_start,
274 .invalidate_range_end = hmm_invalidate_range_end,
278 * hmm_mirror_register() - register a mirror against an mm
280 * @mirror: new mirror struct to register
281 * @mm: mm to register against
283 * To start mirroring a process address space, the device driver must register
284 * an HMM mirror struct.
286 * THE mm->mmap_sem MUST BE HELD IN WRITE MODE !
288 int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
291 if (!mm || !mirror || !mirror->ops)
294 mirror->hmm = hmm_get_or_create(mm);
298 down_write(&mirror->hmm->mirrors_sem);
299 list_add(&mirror->list, &mirror->hmm->mirrors);
300 up_write(&mirror->hmm->mirrors_sem);
304 EXPORT_SYMBOL(hmm_mirror_register);
307 * hmm_mirror_unregister() - unregister a mirror
309 * @mirror: new mirror struct to register
311 * Stop mirroring a process address space, and cleanup.
313 void hmm_mirror_unregister(struct hmm_mirror *mirror)
315 struct hmm *hmm = READ_ONCE(mirror->hmm);
320 down_write(&hmm->mirrors_sem);
321 list_del_init(&mirror->list);
322 /* To protect us against double unregister ... */
324 up_write(&hmm->mirrors_sem);
328 EXPORT_SYMBOL(hmm_mirror_unregister);
330 struct hmm_vma_walk {
331 struct hmm_range *range;
332 struct dev_pagemap *pgmap;
338 static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
339 bool write_fault, uint64_t *pfn)
341 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE;
342 struct hmm_vma_walk *hmm_vma_walk = walk->private;
343 struct hmm_range *range = hmm_vma_walk->range;
344 struct vm_area_struct *vma = walk->vma;
347 flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY;
348 flags |= write_fault ? FAULT_FLAG_WRITE : 0;
349 ret = handle_mm_fault(vma, addr, flags);
350 if (ret & VM_FAULT_RETRY)
352 if (ret & VM_FAULT_ERROR) {
353 *pfn = range->values[HMM_PFN_ERROR];
360 static int hmm_pfns_bad(unsigned long addr,
362 struct mm_walk *walk)
364 struct hmm_vma_walk *hmm_vma_walk = walk->private;
365 struct hmm_range *range = hmm_vma_walk->range;
366 uint64_t *pfns = range->pfns;
369 i = (addr - range->start) >> PAGE_SHIFT;
370 for (; addr < end; addr += PAGE_SIZE, i++)
371 pfns[i] = range->values[HMM_PFN_ERROR];
377 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
378 * @start: range virtual start address (inclusive)
379 * @end: range virtual end address (exclusive)
380 * @fault: should we fault or not ?
381 * @write_fault: write fault ?
382 * @walk: mm_walk structure
383 * Returns: 0 on success, -EBUSY after page fault, or page fault error
385 * This function will be called whenever pmd_none() or pte_none() returns true,
386 * or whenever there is no page directory covering the virtual address range.
388 static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
389 bool fault, bool write_fault,
390 struct mm_walk *walk)
392 struct hmm_vma_walk *hmm_vma_walk = walk->private;
393 struct hmm_range *range = hmm_vma_walk->range;
394 uint64_t *pfns = range->pfns;
395 unsigned long i, page_size;
397 hmm_vma_walk->last = addr;
398 page_size = hmm_range_page_size(range);
399 i = (addr - range->start) >> range->page_shift;
401 for (; addr < end; addr += page_size, i++) {
402 pfns[i] = range->values[HMM_PFN_NONE];
403 if (fault || write_fault) {
406 ret = hmm_vma_do_fault(walk, addr, write_fault,
413 return (fault || write_fault) ? -EBUSY : 0;
416 static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
417 uint64_t pfns, uint64_t cpu_flags,
418 bool *fault, bool *write_fault)
420 struct hmm_range *range = hmm_vma_walk->range;
422 if (!hmm_vma_walk->fault)
426 * So we not only consider the individual per page request we also
427 * consider the default flags requested for the range. The API can
428 * be use in 2 fashions. The first one where the HMM user coalesce
429 * multiple page fault into one request and set flags per pfns for
430 * of those faults. The second one where the HMM user want to pre-
431 * fault a range with specific flags. For the latter one it is a
432 * waste to have the user pre-fill the pfn arrays with a default
435 pfns = (pfns & range->pfn_flags_mask) | range->default_flags;
437 /* We aren't ask to do anything ... */
438 if (!(pfns & range->flags[HMM_PFN_VALID]))
440 /* If this is device memory than only fault if explicitly requested */
441 if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
442 /* Do we fault on device memory ? */
443 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
444 *write_fault = pfns & range->flags[HMM_PFN_WRITE];
450 /* If CPU page table is not valid then we need to fault */
451 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
452 /* Need to write fault ? */
453 if ((pfns & range->flags[HMM_PFN_WRITE]) &&
454 !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
460 static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
461 const uint64_t *pfns, unsigned long npages,
462 uint64_t cpu_flags, bool *fault,
467 if (!hmm_vma_walk->fault) {
468 *fault = *write_fault = false;
472 *fault = *write_fault = false;
473 for (i = 0; i < npages; ++i) {
474 hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
481 static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
482 struct mm_walk *walk)
484 struct hmm_vma_walk *hmm_vma_walk = walk->private;
485 struct hmm_range *range = hmm_vma_walk->range;
486 bool fault, write_fault;
487 unsigned long i, npages;
490 i = (addr - range->start) >> PAGE_SHIFT;
491 npages = (end - addr) >> PAGE_SHIFT;
492 pfns = &range->pfns[i];
493 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
494 0, &fault, &write_fault);
495 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
498 static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
500 if (pmd_protnone(pmd))
502 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
503 range->flags[HMM_PFN_WRITE] :
504 range->flags[HMM_PFN_VALID];
507 static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
509 if (!pud_present(pud))
511 return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
512 range->flags[HMM_PFN_WRITE] :
513 range->flags[HMM_PFN_VALID];
516 static int hmm_vma_handle_pmd(struct mm_walk *walk,
522 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
523 struct hmm_vma_walk *hmm_vma_walk = walk->private;
524 struct hmm_range *range = hmm_vma_walk->range;
525 unsigned long pfn, npages, i;
526 bool fault, write_fault;
529 npages = (end - addr) >> PAGE_SHIFT;
530 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
531 hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
532 &fault, &write_fault);
534 if (pmd_protnone(pmd) || fault || write_fault)
535 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
537 pfn = pmd_pfn(pmd) + pte_index(addr);
538 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
539 if (pmd_devmap(pmd)) {
540 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
541 hmm_vma_walk->pgmap);
542 if (unlikely(!hmm_vma_walk->pgmap))
545 pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags;
547 if (hmm_vma_walk->pgmap) {
548 put_dev_pagemap(hmm_vma_walk->pgmap);
549 hmm_vma_walk->pgmap = NULL;
551 hmm_vma_walk->last = end;
554 /* If THP is not enabled then we should never reach that code ! */
559 static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
561 if (pte_none(pte) || !pte_present(pte))
563 return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
564 range->flags[HMM_PFN_WRITE] :
565 range->flags[HMM_PFN_VALID];
568 static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
569 unsigned long end, pmd_t *pmdp, pte_t *ptep,
572 struct hmm_vma_walk *hmm_vma_walk = walk->private;
573 struct hmm_range *range = hmm_vma_walk->range;
574 struct vm_area_struct *vma = walk->vma;
575 bool fault, write_fault;
578 uint64_t orig_pfn = *pfn;
580 *pfn = range->values[HMM_PFN_NONE];
581 fault = write_fault = false;
584 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0,
585 &fault, &write_fault);
586 if (fault || write_fault)
591 if (!pte_present(pte)) {
592 swp_entry_t entry = pte_to_swp_entry(pte);
594 if (!non_swap_entry(entry)) {
595 if (fault || write_fault)
601 * This is a special swap entry, ignore migration, use
602 * device and report anything else as error.
604 if (is_device_private_entry(entry)) {
605 cpu_flags = range->flags[HMM_PFN_VALID] |
606 range->flags[HMM_PFN_DEVICE_PRIVATE];
607 cpu_flags |= is_write_device_private_entry(entry) ?
608 range->flags[HMM_PFN_WRITE] : 0;
609 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
610 &fault, &write_fault);
611 if (fault || write_fault)
613 *pfn = hmm_pfn_from_pfn(range, swp_offset(entry));
618 if (is_migration_entry(entry)) {
619 if (fault || write_fault) {
621 hmm_vma_walk->last = addr;
622 migration_entry_wait(vma->vm_mm,
629 /* Report error for everything else */
630 *pfn = range->values[HMM_PFN_ERROR];
633 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
634 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
635 &fault, &write_fault);
638 if (fault || write_fault)
641 if (pte_devmap(pte)) {
642 hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
643 hmm_vma_walk->pgmap);
644 if (unlikely(!hmm_vma_walk->pgmap))
646 } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) {
647 *pfn = range->values[HMM_PFN_SPECIAL];
651 *pfn = hmm_pfn_from_pfn(range, pte_pfn(pte)) | cpu_flags;
655 if (hmm_vma_walk->pgmap) {
656 put_dev_pagemap(hmm_vma_walk->pgmap);
657 hmm_vma_walk->pgmap = NULL;
660 /* Fault any virtual address we were asked to fault */
661 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
664 static int hmm_vma_walk_pmd(pmd_t *pmdp,
667 struct mm_walk *walk)
669 struct hmm_vma_walk *hmm_vma_walk = walk->private;
670 struct hmm_range *range = hmm_vma_walk->range;
671 struct vm_area_struct *vma = walk->vma;
672 uint64_t *pfns = range->pfns;
673 unsigned long addr = start, i;
679 pmd = READ_ONCE(*pmdp);
681 return hmm_vma_walk_hole(start, end, walk);
683 if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB))
684 return hmm_pfns_bad(start, end, walk);
686 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
687 bool fault, write_fault;
688 unsigned long npages;
691 i = (addr - range->start) >> PAGE_SHIFT;
692 npages = (end - addr) >> PAGE_SHIFT;
693 pfns = &range->pfns[i];
695 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
696 0, &fault, &write_fault);
697 if (fault || write_fault) {
698 hmm_vma_walk->last = addr;
699 pmd_migration_entry_wait(vma->vm_mm, pmdp);
703 } else if (!pmd_present(pmd))
704 return hmm_pfns_bad(start, end, walk);
706 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
708 * No need to take pmd_lock here, even if some other threads
709 * is splitting the huge pmd we will get that event through
710 * mmu_notifier callback.
712 * So just read pmd value and check again its a transparent
713 * huge or device mapping one and compute corresponding pfn
716 pmd = pmd_read_atomic(pmdp);
718 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
721 i = (addr - range->start) >> PAGE_SHIFT;
722 return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
726 * We have handled all the valid case above ie either none, migration,
727 * huge or transparent huge. At this point either it is a valid pmd
728 * entry pointing to pte directory or it is a bad pmd that will not
732 return hmm_pfns_bad(start, end, walk);
734 ptep = pte_offset_map(pmdp, addr);
735 i = (addr - range->start) >> PAGE_SHIFT;
736 for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
739 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
741 /* hmm_vma_handle_pte() did unmap pte directory */
742 hmm_vma_walk->last = addr;
746 if (hmm_vma_walk->pgmap) {
748 * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
749 * so that we can leverage get_dev_pagemap() optimization which
750 * will not re-take a reference on a pgmap if we already have
753 put_dev_pagemap(hmm_vma_walk->pgmap);
754 hmm_vma_walk->pgmap = NULL;
758 hmm_vma_walk->last = addr;
762 static int hmm_vma_walk_pud(pud_t *pudp,
765 struct mm_walk *walk)
767 struct hmm_vma_walk *hmm_vma_walk = walk->private;
768 struct hmm_range *range = hmm_vma_walk->range;
769 unsigned long addr = start, next;
775 pud = READ_ONCE(*pudp);
777 return hmm_vma_walk_hole(start, end, walk);
779 if (pud_huge(pud) && pud_devmap(pud)) {
780 unsigned long i, npages, pfn;
781 uint64_t *pfns, cpu_flags;
782 bool fault, write_fault;
784 if (!pud_present(pud))
785 return hmm_vma_walk_hole(start, end, walk);
787 i = (addr - range->start) >> PAGE_SHIFT;
788 npages = (end - addr) >> PAGE_SHIFT;
789 pfns = &range->pfns[i];
791 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
792 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
793 cpu_flags, &fault, &write_fault);
794 if (fault || write_fault)
795 return hmm_vma_walk_hole_(addr, end, fault,
798 #ifdef CONFIG_HUGETLB_PAGE
799 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
800 for (i = 0; i < npages; ++i, ++pfn) {
801 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
802 hmm_vma_walk->pgmap);
803 if (unlikely(!hmm_vma_walk->pgmap))
805 pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags;
807 if (hmm_vma_walk->pgmap) {
808 put_dev_pagemap(hmm_vma_walk->pgmap);
809 hmm_vma_walk->pgmap = NULL;
811 hmm_vma_walk->last = end;
818 split_huge_pud(walk->vma, pudp, addr);
822 pmdp = pmd_offset(pudp, addr);
824 next = pmd_addr_end(addr, end);
825 ret = hmm_vma_walk_pmd(pmdp, addr, next, walk);
828 } while (pmdp++, addr = next, addr != end);
833 static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
834 unsigned long start, unsigned long end,
835 struct mm_walk *walk)
837 #ifdef CONFIG_HUGETLB_PAGE
838 unsigned long addr = start, i, pfn, mask, size, pfn_inc;
839 struct hmm_vma_walk *hmm_vma_walk = walk->private;
840 struct hmm_range *range = hmm_vma_walk->range;
841 struct vm_area_struct *vma = walk->vma;
842 struct hstate *h = hstate_vma(vma);
843 uint64_t orig_pfn, cpu_flags;
844 bool fault, write_fault;
849 size = 1UL << huge_page_shift(h);
851 if (range->page_shift != PAGE_SHIFT) {
852 /* Make sure we are looking at full page. */
855 if (end < (start + size))
857 pfn_inc = size >> PAGE_SHIFT;
864 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
865 entry = huge_ptep_get(pte);
867 i = (start - range->start) >> range->page_shift;
868 orig_pfn = range->pfns[i];
869 range->pfns[i] = range->values[HMM_PFN_NONE];
870 cpu_flags = pte_to_hmm_pfn_flags(range, entry);
871 fault = write_fault = false;
872 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
873 &fault, &write_fault);
874 if (fault || write_fault) {
879 pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift);
880 for (; addr < end; addr += size, i++, pfn += pfn_inc)
881 range->pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags;
882 hmm_vma_walk->last = end;
888 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
891 #else /* CONFIG_HUGETLB_PAGE */
896 static void hmm_pfns_clear(struct hmm_range *range,
901 for (; addr < end; addr += PAGE_SIZE, pfns++)
902 *pfns = range->values[HMM_PFN_NONE];
906 * hmm_range_register() - start tracking change to CPU page table over a range
908 * @mm: the mm struct for the range of virtual address
909 * @start: start virtual address (inclusive)
910 * @end: end virtual address (exclusive)
911 * @page_shift: expect page shift for the range
912 * Returns 0 on success, -EFAULT if the address space is no longer valid
914 * Track updates to the CPU page table see include/linux/hmm.h
916 int hmm_range_register(struct hmm_range *range,
917 struct mm_struct *mm,
922 unsigned long mask = ((1UL << page_shift) - 1UL);
924 range->valid = false;
927 if ((start & mask) || (end & mask))
932 range->page_shift = page_shift;
933 range->start = start;
936 range->hmm = hmm_get_or_create(mm);
940 /* Check if hmm_mm_destroy() was call. */
941 if (range->hmm->mm == NULL || range->hmm->dead) {
946 /* Initialize range to track CPU page table update */
947 mutex_lock(&range->hmm->lock);
949 list_add_rcu(&range->list, &range->hmm->ranges);
952 * If there are any concurrent notifiers we have to wait for them for
953 * the range to be valid (see hmm_range_wait_until_valid()).
955 if (!range->hmm->notifiers)
957 mutex_unlock(&range->hmm->lock);
961 EXPORT_SYMBOL(hmm_range_register);
964 * hmm_range_unregister() - stop tracking change to CPU page table over a range
967 * Range struct is used to track updates to the CPU page table after a call to
968 * hmm_range_register(). See include/linux/hmm.h for how to use it.
970 void hmm_range_unregister(struct hmm_range *range)
972 /* Sanity check this really should not happen. */
973 if (range->hmm == NULL || range->end <= range->start)
976 mutex_lock(&range->hmm->lock);
977 list_del_rcu(&range->list);
978 mutex_unlock(&range->hmm->lock);
980 /* Drop reference taken by hmm_range_register() */
981 range->valid = false;
985 EXPORT_SYMBOL(hmm_range_unregister);
988 * hmm_range_snapshot() - snapshot CPU page table for a range
990 * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
991 * permission (for instance asking for write and range is read only),
992 * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
993 * vma or it is illegal to access that range), number of valid pages
994 * in range->pfns[] (from range start address).
996 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
997 * validity is tracked by range struct. See in include/linux/hmm.h for example
1000 long hmm_range_snapshot(struct hmm_range *range)
1002 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
1003 unsigned long start = range->start, end;
1004 struct hmm_vma_walk hmm_vma_walk;
1005 struct hmm *hmm = range->hmm;
1006 struct vm_area_struct *vma;
1007 struct mm_walk mm_walk;
1009 /* Check if hmm_mm_destroy() was call. */
1010 if (hmm->mm == NULL || hmm->dead)
1014 /* If range is no longer valid force retry. */
1018 vma = find_vma(hmm->mm, start);
1019 if (vma == NULL || (vma->vm_flags & device_vma))
1022 if (is_vm_hugetlb_page(vma)) {
1023 struct hstate *h = hstate_vma(vma);
1025 if (huge_page_shift(h) != range->page_shift &&
1026 range->page_shift != PAGE_SHIFT)
1029 if (range->page_shift != PAGE_SHIFT)
1033 if (!(vma->vm_flags & VM_READ)) {
1035 * If vma do not allow read access, then assume that it
1036 * does not allow write access, either. HMM does not
1037 * support architecture that allow write without read.
1039 hmm_pfns_clear(range, range->pfns,
1040 range->start, range->end);
1045 hmm_vma_walk.pgmap = NULL;
1046 hmm_vma_walk.last = start;
1047 hmm_vma_walk.fault = false;
1048 hmm_vma_walk.range = range;
1049 mm_walk.private = &hmm_vma_walk;
1050 end = min(range->end, vma->vm_end);
1053 mm_walk.mm = vma->vm_mm;
1054 mm_walk.pte_entry = NULL;
1055 mm_walk.test_walk = NULL;
1056 mm_walk.hugetlb_entry = NULL;
1057 mm_walk.pud_entry = hmm_vma_walk_pud;
1058 mm_walk.pmd_entry = hmm_vma_walk_pmd;
1059 mm_walk.pte_hole = hmm_vma_walk_hole;
1060 mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
1062 walk_page_range(start, end, &mm_walk);
1064 } while (start < range->end);
1066 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
1068 EXPORT_SYMBOL(hmm_range_snapshot);
1071 * hmm_range_fault() - try to fault some address in a virtual address range
1072 * @range: range being faulted
1073 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
1074 * Returns: number of valid pages in range->pfns[] (from range start
1075 * address). This may be zero. If the return value is negative,
1076 * then one of the following values may be returned:
1078 * -EINVAL invalid arguments or mm or virtual address are in an
1079 * invalid vma (for instance device file vma).
1080 * -ENOMEM: Out of memory.
1081 * -EPERM: Invalid permission (for instance asking for write and
1082 * range is read only).
1083 * -EAGAIN: If you need to retry and mmap_sem was drop. This can only
1084 * happens if block argument is false.
1085 * -EBUSY: If the the range is being invalidated and you should wait
1086 * for invalidation to finish.
1087 * -EFAULT: Invalid (ie either no valid vma or it is illegal to access
1088 * that range), number of valid pages in range->pfns[] (from
1089 * range start address).
1091 * This is similar to a regular CPU page fault except that it will not trigger
1092 * any memory migration if the memory being faulted is not accessible by CPUs
1093 * and caller does not ask for migration.
1095 * On error, for one virtual address in the range, the function will mark the
1096 * corresponding HMM pfn entry with an error flag.
1098 long hmm_range_fault(struct hmm_range *range, bool block)
1100 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
1101 unsigned long start = range->start, end;
1102 struct hmm_vma_walk hmm_vma_walk;
1103 struct hmm *hmm = range->hmm;
1104 struct vm_area_struct *vma;
1105 struct mm_walk mm_walk;
1108 /* Check if hmm_mm_destroy() was call. */
1109 if (hmm->mm == NULL || hmm->dead)
1113 /* If range is no longer valid force retry. */
1114 if (!range->valid) {
1115 up_read(&hmm->mm->mmap_sem);
1119 vma = find_vma(hmm->mm, start);
1120 if (vma == NULL || (vma->vm_flags & device_vma))
1123 if (is_vm_hugetlb_page(vma)) {
1124 if (huge_page_shift(hstate_vma(vma)) !=
1125 range->page_shift &&
1126 range->page_shift != PAGE_SHIFT)
1129 if (range->page_shift != PAGE_SHIFT)
1133 if (!(vma->vm_flags & VM_READ)) {
1135 * If vma do not allow read access, then assume that it
1136 * does not allow write access, either. HMM does not
1137 * support architecture that allow write without read.
1139 hmm_pfns_clear(range, range->pfns,
1140 range->start, range->end);
1145 hmm_vma_walk.pgmap = NULL;
1146 hmm_vma_walk.last = start;
1147 hmm_vma_walk.fault = true;
1148 hmm_vma_walk.block = block;
1149 hmm_vma_walk.range = range;
1150 mm_walk.private = &hmm_vma_walk;
1151 end = min(range->end, vma->vm_end);
1154 mm_walk.mm = vma->vm_mm;
1155 mm_walk.pte_entry = NULL;
1156 mm_walk.test_walk = NULL;
1157 mm_walk.hugetlb_entry = NULL;
1158 mm_walk.pud_entry = hmm_vma_walk_pud;
1159 mm_walk.pmd_entry = hmm_vma_walk_pmd;
1160 mm_walk.pte_hole = hmm_vma_walk_hole;
1161 mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
1164 ret = walk_page_range(start, end, &mm_walk);
1165 start = hmm_vma_walk.last;
1167 /* Keep trying while the range is valid. */
1168 } while (ret == -EBUSY && range->valid);
1173 i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
1174 hmm_pfns_clear(range, &range->pfns[i],
1175 hmm_vma_walk.last, range->end);
1180 } while (start < range->end);
1182 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
1184 EXPORT_SYMBOL(hmm_range_fault);
1185 #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
1188 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
1189 struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
1194 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
1200 EXPORT_SYMBOL(hmm_vma_alloc_locked_page);
1203 static void hmm_devmem_ref_release(struct percpu_ref *ref)
1205 struct hmm_devmem *devmem;
1207 devmem = container_of(ref, struct hmm_devmem, ref);
1208 complete(&devmem->completion);
1211 static void hmm_devmem_ref_exit(void *data)
1213 struct percpu_ref *ref = data;
1214 struct hmm_devmem *devmem;
1216 devmem = container_of(ref, struct hmm_devmem, ref);
1217 wait_for_completion(&devmem->completion);
1218 percpu_ref_exit(ref);
1221 static void hmm_devmem_ref_kill(struct percpu_ref *ref)
1223 percpu_ref_kill(ref);
1226 static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma,
1228 const struct page *page,
1232 struct hmm_devmem *devmem = page->pgmap->data;
1234 return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
1237 static void hmm_devmem_free(struct page *page, void *data)
1239 struct hmm_devmem *devmem = data;
1241 page->mapping = NULL;
1243 devmem->ops->free(devmem, page);
1247 * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
1249 * @ops: memory event device driver callback (see struct hmm_devmem_ops)
1250 * @device: device struct to bind the resource too
1251 * @size: size in bytes of the device memory to add
1252 * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise
1254 * This function first finds an empty range of physical address big enough to
1255 * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
1256 * in turn allocates struct pages. It does not do anything beyond that; all
1257 * events affecting the memory will go through the various callbacks provided
1258 * by hmm_devmem_ops struct.
1260 * Device driver should call this function during device initialization and
1261 * is then responsible of memory management. HMM only provides helpers.
1263 struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
1264 struct device *device,
1267 struct hmm_devmem *devmem;
1268 resource_size_t addr;
1272 dev_pagemap_get_ops();
1274 devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
1276 return ERR_PTR(-ENOMEM);
1278 init_completion(&devmem->completion);
1279 devmem->pfn_first = -1UL;
1280 devmem->pfn_last = -1UL;
1281 devmem->resource = NULL;
1282 devmem->device = device;
1285 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1288 return ERR_PTR(ret);
1290 ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
1292 return ERR_PTR(ret);
1294 size = ALIGN(size, PA_SECTION_SIZE);
1295 addr = min((unsigned long)iomem_resource.end,
1296 (1UL << MAX_PHYSMEM_BITS) - 1);
1297 addr = addr - size + 1UL;
1300 * FIXME add a new helper to quickly walk resource tree and find free
1303 * FIXME what about ioport_resource resource ?
1305 for (; addr > size && addr >= iomem_resource.start; addr -= size) {
1306 ret = region_intersects(addr, size, 0, IORES_DESC_NONE);
1307 if (ret != REGION_DISJOINT)
1310 devmem->resource = devm_request_mem_region(device, addr, size,
1312 if (!devmem->resource)
1313 return ERR_PTR(-ENOMEM);
1316 if (!devmem->resource)
1317 return ERR_PTR(-ERANGE);
1319 devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
1320 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1321 devmem->pfn_last = devmem->pfn_first +
1322 (resource_size(devmem->resource) >> PAGE_SHIFT);
1323 devmem->page_fault = hmm_devmem_fault;
1325 devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
1326 devmem->pagemap.res = *devmem->resource;
1327 devmem->pagemap.page_free = hmm_devmem_free;
1328 devmem->pagemap.altmap_valid = false;
1329 devmem->pagemap.ref = &devmem->ref;
1330 devmem->pagemap.data = devmem;
1331 devmem->pagemap.kill = hmm_devmem_ref_kill;
1333 result = devm_memremap_pages(devmem->device, &devmem->pagemap);
1338 EXPORT_SYMBOL_GPL(hmm_devmem_add);
1340 struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
1341 struct device *device,
1342 struct resource *res)
1344 struct hmm_devmem *devmem;
1348 if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY)
1349 return ERR_PTR(-EINVAL);
1351 dev_pagemap_get_ops();
1353 devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
1355 return ERR_PTR(-ENOMEM);
1357 init_completion(&devmem->completion);
1358 devmem->pfn_first = -1UL;
1359 devmem->pfn_last = -1UL;
1360 devmem->resource = res;
1361 devmem->device = device;
1364 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1367 return ERR_PTR(ret);
1369 ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
1372 return ERR_PTR(ret);
1374 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1375 devmem->pfn_last = devmem->pfn_first +
1376 (resource_size(devmem->resource) >> PAGE_SHIFT);
1377 devmem->page_fault = hmm_devmem_fault;
1379 devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
1380 devmem->pagemap.res = *devmem->resource;
1381 devmem->pagemap.page_free = hmm_devmem_free;
1382 devmem->pagemap.altmap_valid = false;
1383 devmem->pagemap.ref = &devmem->ref;
1384 devmem->pagemap.data = devmem;
1385 devmem->pagemap.kill = hmm_devmem_ref_kill;
1387 result = devm_memremap_pages(devmem->device, &devmem->pagemap);
1392 EXPORT_SYMBOL_GPL(hmm_devmem_add_resource);
1395 * A device driver that wants to handle multiple devices memory through a
1396 * single fake device can use hmm_device to do so. This is purely a helper
1397 * and it is not needed to make use of any HMM functionality.
1399 #define HMM_DEVICE_MAX 256
1401 static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX);
1402 static DEFINE_SPINLOCK(hmm_device_lock);
1403 static struct class *hmm_device_class;
1404 static dev_t hmm_device_devt;
1406 static void hmm_device_release(struct device *device)
1408 struct hmm_device *hmm_device;
1410 hmm_device = container_of(device, struct hmm_device, device);
1411 spin_lock(&hmm_device_lock);
1412 clear_bit(hmm_device->minor, hmm_device_mask);
1413 spin_unlock(&hmm_device_lock);
1418 struct hmm_device *hmm_device_new(void *drvdata)
1420 struct hmm_device *hmm_device;
1422 hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL);
1424 return ERR_PTR(-ENOMEM);
1426 spin_lock(&hmm_device_lock);
1427 hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX);
1428 if (hmm_device->minor >= HMM_DEVICE_MAX) {
1429 spin_unlock(&hmm_device_lock);
1431 return ERR_PTR(-EBUSY);
1433 set_bit(hmm_device->minor, hmm_device_mask);
1434 spin_unlock(&hmm_device_lock);
1436 dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor);
1437 hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt),
1439 hmm_device->device.release = hmm_device_release;
1440 dev_set_drvdata(&hmm_device->device, drvdata);
1441 hmm_device->device.class = hmm_device_class;
1442 device_initialize(&hmm_device->device);
1446 EXPORT_SYMBOL(hmm_device_new);
1448 void hmm_device_put(struct hmm_device *hmm_device)
1450 put_device(&hmm_device->device);
1452 EXPORT_SYMBOL(hmm_device_put);
1454 static int __init hmm_init(void)
1458 ret = alloc_chrdev_region(&hmm_device_devt, 0,
1464 hmm_device_class = class_create(THIS_MODULE, "hmm_device");
1465 if (IS_ERR(hmm_device_class)) {
1466 unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX);
1467 return PTR_ERR(hmm_device_class);
1472 device_initcall(hmm_init);
1473 #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */