2 * Copyright 2013 Red Hat Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Authors: Jérôme Glisse <jglisse@redhat.com>
17 * Refer to include/linux/hmm.h for information about heterogeneous memory
18 * management or HMM for short.
21 #include <linux/hmm.h>
22 #include <linux/init.h>
23 #include <linux/rmap.h>
24 #include <linux/swap.h>
25 #include <linux/slab.h>
26 #include <linux/sched.h>
27 #include <linux/mmzone.h>
28 #include <linux/pagemap.h>
29 #include <linux/swapops.h>
30 #include <linux/hugetlb.h>
31 #include <linux/memremap.h>
32 #include <linux/jump_label.h>
33 #include <linux/mmu_notifier.h>
34 #include <linux/memory_hotplug.h>
36 #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
38 #if IS_ENABLED(CONFIG_HMM_MIRROR)
39 static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
42 * struct hmm - HMM per mm struct
44 * @mm: mm struct this HMM struct is bound to
45 * @lock: lock protecting ranges list
46 * @sequence: we track updates to the CPU page table with a sequence number
47 * @ranges: list of range being snapshotted
48 * @mirrors: list of mirrors for this mm
49 * @mmu_notifier: mmu notifier to track updates to CPU page table
50 * @mirrors_sem: read/write semaphore protecting the mirrors list
56 struct list_head ranges;
57 struct list_head mirrors;
58 struct mmu_notifier mmu_notifier;
59 struct rw_semaphore mirrors_sem;
63 * hmm_register - register HMM against an mm (HMM internal)
65 * @mm: mm struct to attach to
67 * This is not intended to be used directly by device drivers. It allocates an
68 * HMM struct if mm does not have one, and initializes it.
70 static struct hmm *hmm_register(struct mm_struct *mm)
72 struct hmm *hmm = READ_ONCE(mm->hmm);
76 * The hmm struct can only be freed once the mm_struct goes away,
77 * hence we should always have pre-allocated an new hmm struct
83 hmm = kmalloc(sizeof(*hmm), GFP_KERNEL);
86 INIT_LIST_HEAD(&hmm->mirrors);
87 init_rwsem(&hmm->mirrors_sem);
88 atomic_set(&hmm->sequence, 0);
89 hmm->mmu_notifier.ops = NULL;
90 INIT_LIST_HEAD(&hmm->ranges);
91 spin_lock_init(&hmm->lock);
94 spin_lock(&mm->page_table_lock);
99 spin_unlock(&mm->page_table_lock);
105 * We should only get here if hold the mmap_sem in write mode ie on
106 * registration of first mirror through hmm_mirror_register()
108 hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
109 if (__mmu_notifier_register(&hmm->mmu_notifier, mm))
115 spin_lock(&mm->page_table_lock);
118 spin_unlock(&mm->page_table_lock);
124 void hmm_mm_destroy(struct mm_struct *mm)
129 static int hmm_invalidate_range(struct hmm *hmm,
130 const struct hmm_update *update)
132 struct hmm_mirror *mirror;
133 struct hmm_range *range;
135 spin_lock(&hmm->lock);
136 list_for_each_entry(range, &hmm->ranges, list) {
137 unsigned long addr, idx, npages;
139 if (update->end < range->start || update->start >= range->end)
142 range->valid = false;
143 addr = max(update->start, range->start);
144 idx = (addr - range->start) >> PAGE_SHIFT;
145 npages = (min(range->end, update->end) - addr) >> PAGE_SHIFT;
146 memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages);
148 spin_unlock(&hmm->lock);
150 down_read(&hmm->mirrors_sem);
151 list_for_each_entry(mirror, &hmm->mirrors, list) {
154 ret = mirror->ops->sync_cpu_device_pagetables(mirror, update);
155 if (!update->blockable && ret == -EAGAIN) {
156 up_read(&hmm->mirrors_sem);
160 up_read(&hmm->mirrors_sem);
165 static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
167 struct hmm_mirror *mirror;
168 struct hmm *hmm = mm->hmm;
170 down_write(&hmm->mirrors_sem);
171 mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror,
174 list_del_init(&mirror->list);
175 if (mirror->ops->release) {
177 * Drop mirrors_sem so callback can wait on any pending
178 * work that might itself trigger mmu_notifier callback
179 * and thus would deadlock with us.
181 up_write(&hmm->mirrors_sem);
182 mirror->ops->release(mirror);
183 down_write(&hmm->mirrors_sem);
185 mirror = list_first_entry_or_null(&hmm->mirrors,
186 struct hmm_mirror, list);
188 up_write(&hmm->mirrors_sem);
191 static int hmm_invalidate_range_start(struct mmu_notifier *mn,
192 struct mm_struct *mm,
197 struct hmm *hmm = mm->hmm;
201 atomic_inc(&hmm->sequence);
206 static void hmm_invalidate_range_end(struct mmu_notifier *mn,
207 struct mm_struct *mm,
211 struct hmm_update update;
212 struct hmm *hmm = mm->hmm;
216 update.start = start;
218 update.event = HMM_UPDATE_INVALIDATE;
219 update.blockable = true;
220 hmm_invalidate_range(hmm, &update);
223 static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
224 .release = hmm_release,
225 .invalidate_range_start = hmm_invalidate_range_start,
226 .invalidate_range_end = hmm_invalidate_range_end,
230 * hmm_mirror_register() - register a mirror against an mm
232 * @mirror: new mirror struct to register
233 * @mm: mm to register against
235 * To start mirroring a process address space, the device driver must register
236 * an HMM mirror struct.
238 * THE mm->mmap_sem MUST BE HELD IN WRITE MODE !
240 int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
243 if (!mm || !mirror || !mirror->ops)
247 mirror->hmm = hmm_register(mm);
251 down_write(&mirror->hmm->mirrors_sem);
252 if (mirror->hmm->mm == NULL) {
254 * A racing hmm_mirror_unregister() is about to destroy the hmm
255 * struct. Try again to allocate a new one.
257 up_write(&mirror->hmm->mirrors_sem);
261 list_add(&mirror->list, &mirror->hmm->mirrors);
262 up_write(&mirror->hmm->mirrors_sem);
267 EXPORT_SYMBOL(hmm_mirror_register);
270 * hmm_mirror_unregister() - unregister a mirror
272 * @mirror: new mirror struct to register
274 * Stop mirroring a process address space, and cleanup.
276 void hmm_mirror_unregister(struct hmm_mirror *mirror)
278 bool should_unregister = false;
279 struct mm_struct *mm;
282 if (mirror->hmm == NULL)
286 down_write(&hmm->mirrors_sem);
287 list_del_init(&mirror->list);
288 should_unregister = list_empty(&hmm->mirrors);
292 up_write(&hmm->mirrors_sem);
294 if (!should_unregister || mm == NULL)
297 mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm);
299 spin_lock(&mm->page_table_lock);
302 spin_unlock(&mm->page_table_lock);
306 EXPORT_SYMBOL(hmm_mirror_unregister);
308 struct hmm_vma_walk {
309 struct hmm_range *range;
315 static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
316 bool write_fault, uint64_t *pfn)
318 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE;
319 struct hmm_vma_walk *hmm_vma_walk = walk->private;
320 struct hmm_range *range = hmm_vma_walk->range;
321 struct vm_area_struct *vma = walk->vma;
324 flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY;
325 flags |= write_fault ? FAULT_FLAG_WRITE : 0;
326 ret = handle_mm_fault(vma, addr, flags);
327 if (ret & VM_FAULT_RETRY)
329 if (ret & VM_FAULT_ERROR) {
330 *pfn = range->values[HMM_PFN_ERROR];
337 static int hmm_pfns_bad(unsigned long addr,
339 struct mm_walk *walk)
341 struct hmm_vma_walk *hmm_vma_walk = walk->private;
342 struct hmm_range *range = hmm_vma_walk->range;
343 uint64_t *pfns = range->pfns;
346 i = (addr - range->start) >> PAGE_SHIFT;
347 for (; addr < end; addr += PAGE_SIZE, i++)
348 pfns[i] = range->values[HMM_PFN_ERROR];
354 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
355 * @start: range virtual start address (inclusive)
356 * @end: range virtual end address (exclusive)
357 * @fault: should we fault or not ?
358 * @write_fault: write fault ?
359 * @walk: mm_walk structure
360 * Returns: 0 on success, -EAGAIN after page fault, or page fault error
362 * This function will be called whenever pmd_none() or pte_none() returns true,
363 * or whenever there is no page directory covering the virtual address range.
365 static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
366 bool fault, bool write_fault,
367 struct mm_walk *walk)
369 struct hmm_vma_walk *hmm_vma_walk = walk->private;
370 struct hmm_range *range = hmm_vma_walk->range;
371 uint64_t *pfns = range->pfns;
374 hmm_vma_walk->last = addr;
375 i = (addr - range->start) >> PAGE_SHIFT;
376 for (; addr < end; addr += PAGE_SIZE, i++) {
377 pfns[i] = range->values[HMM_PFN_NONE];
378 if (fault || write_fault) {
381 ret = hmm_vma_do_fault(walk, addr, write_fault,
388 return (fault || write_fault) ? -EAGAIN : 0;
391 static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
392 uint64_t pfns, uint64_t cpu_flags,
393 bool *fault, bool *write_fault)
395 struct hmm_range *range = hmm_vma_walk->range;
397 *fault = *write_fault = false;
398 if (!hmm_vma_walk->fault)
401 /* We aren't ask to do anything ... */
402 if (!(pfns & range->flags[HMM_PFN_VALID]))
404 /* If this is device memory than only fault if explicitly requested */
405 if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
406 /* Do we fault on device memory ? */
407 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
408 *write_fault = pfns & range->flags[HMM_PFN_WRITE];
414 /* If CPU page table is not valid then we need to fault */
415 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
416 /* Need to write fault ? */
417 if ((pfns & range->flags[HMM_PFN_WRITE]) &&
418 !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
424 static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
425 const uint64_t *pfns, unsigned long npages,
426 uint64_t cpu_flags, bool *fault,
431 if (!hmm_vma_walk->fault) {
432 *fault = *write_fault = false;
436 for (i = 0; i < npages; ++i) {
437 hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
439 if ((*fault) || (*write_fault))
444 static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
445 struct mm_walk *walk)
447 struct hmm_vma_walk *hmm_vma_walk = walk->private;
448 struct hmm_range *range = hmm_vma_walk->range;
449 bool fault, write_fault;
450 unsigned long i, npages;
453 i = (addr - range->start) >> PAGE_SHIFT;
454 npages = (end - addr) >> PAGE_SHIFT;
455 pfns = &range->pfns[i];
456 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
457 0, &fault, &write_fault);
458 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
461 static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
463 if (pmd_protnone(pmd))
465 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
466 range->flags[HMM_PFN_WRITE] :
467 range->flags[HMM_PFN_VALID];
470 static int hmm_vma_handle_pmd(struct mm_walk *walk,
476 struct hmm_vma_walk *hmm_vma_walk = walk->private;
477 struct hmm_range *range = hmm_vma_walk->range;
478 unsigned long pfn, npages, i;
479 bool fault, write_fault;
482 npages = (end - addr) >> PAGE_SHIFT;
483 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
484 hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
485 &fault, &write_fault);
487 if (pmd_protnone(pmd) || fault || write_fault)
488 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
490 pfn = pmd_pfn(pmd) + pte_index(addr);
491 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
492 pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags;
493 hmm_vma_walk->last = end;
497 static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
499 if (pte_none(pte) || !pte_present(pte))
501 return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
502 range->flags[HMM_PFN_WRITE] :
503 range->flags[HMM_PFN_VALID];
506 static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
507 unsigned long end, pmd_t *pmdp, pte_t *ptep,
510 struct hmm_vma_walk *hmm_vma_walk = walk->private;
511 struct hmm_range *range = hmm_vma_walk->range;
512 struct vm_area_struct *vma = walk->vma;
513 bool fault, write_fault;
516 uint64_t orig_pfn = *pfn;
518 *pfn = range->values[HMM_PFN_NONE];
519 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
520 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
521 &fault, &write_fault);
524 if (fault || write_fault)
529 if (!pte_present(pte)) {
530 swp_entry_t entry = pte_to_swp_entry(pte);
532 if (!non_swap_entry(entry)) {
533 if (fault || write_fault)
539 * This is a special swap entry, ignore migration, use
540 * device and report anything else as error.
542 if (is_device_private_entry(entry)) {
543 cpu_flags = range->flags[HMM_PFN_VALID] |
544 range->flags[HMM_PFN_DEVICE_PRIVATE];
545 cpu_flags |= is_write_device_private_entry(entry) ?
546 range->flags[HMM_PFN_WRITE] : 0;
547 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
548 &fault, &write_fault);
549 if (fault || write_fault)
551 *pfn = hmm_pfn_from_pfn(range, swp_offset(entry));
556 if (is_migration_entry(entry)) {
557 if (fault || write_fault) {
559 hmm_vma_walk->last = addr;
560 migration_entry_wait(vma->vm_mm,
567 /* Report error for everything else */
568 *pfn = range->values[HMM_PFN_ERROR];
572 if (fault || write_fault)
575 *pfn = hmm_pfn_from_pfn(range, pte_pfn(pte)) | cpu_flags;
580 /* Fault any virtual address we were asked to fault */
581 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
584 static int hmm_vma_walk_pmd(pmd_t *pmdp,
587 struct mm_walk *walk)
589 struct hmm_vma_walk *hmm_vma_walk = walk->private;
590 struct hmm_range *range = hmm_vma_walk->range;
591 struct vm_area_struct *vma = walk->vma;
592 uint64_t *pfns = range->pfns;
593 unsigned long addr = start, i;
599 pmd = READ_ONCE(*pmdp);
601 return hmm_vma_walk_hole(start, end, walk);
603 if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB))
604 return hmm_pfns_bad(start, end, walk);
606 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
607 bool fault, write_fault;
608 unsigned long npages;
611 i = (addr - range->start) >> PAGE_SHIFT;
612 npages = (end - addr) >> PAGE_SHIFT;
613 pfns = &range->pfns[i];
615 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
616 0, &fault, &write_fault);
617 if (fault || write_fault) {
618 hmm_vma_walk->last = addr;
619 pmd_migration_entry_wait(vma->vm_mm, pmdp);
623 } else if (!pmd_present(pmd))
624 return hmm_pfns_bad(start, end, walk);
626 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
628 * No need to take pmd_lock here, even if some other threads
629 * is splitting the huge pmd we will get that event through
630 * mmu_notifier callback.
632 * So just read pmd value and check again its a transparent
633 * huge or device mapping one and compute corresponding pfn
636 pmd = pmd_read_atomic(pmdp);
638 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
641 i = (addr - range->start) >> PAGE_SHIFT;
642 return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
646 * We have handled all the valid case above ie either none, migration,
647 * huge or transparent huge. At this point either it is a valid pmd
648 * entry pointing to pte directory or it is a bad pmd that will not
652 return hmm_pfns_bad(start, end, walk);
654 ptep = pte_offset_map(pmdp, addr);
655 i = (addr - range->start) >> PAGE_SHIFT;
656 for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
659 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
661 /* hmm_vma_handle_pte() did unmap pte directory */
662 hmm_vma_walk->last = addr;
668 hmm_vma_walk->last = addr;
672 static void hmm_pfns_clear(struct hmm_range *range,
677 for (; addr < end; addr += PAGE_SIZE, pfns++)
678 *pfns = range->values[HMM_PFN_NONE];
681 static void hmm_pfns_special(struct hmm_range *range)
683 unsigned long addr = range->start, i = 0;
685 for (; addr < range->end; addr += PAGE_SIZE, i++)
686 range->pfns[i] = range->values[HMM_PFN_SPECIAL];
690 * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses
691 * @range: range being snapshotted
692 * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
693 * vma permission, 0 success
695 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
696 * validity is tracked by range struct. See hmm_vma_range_done() for further
699 * The range struct is initialized here. It tracks the CPU page table, but only
700 * if the function returns success (0), in which case the caller must then call
701 * hmm_vma_range_done() to stop CPU page table update tracking on this range.
703 * NOT CALLING hmm_vma_range_done() IF FUNCTION RETURNS 0 WILL LEAD TO SERIOUS
704 * MEMORY CORRUPTION ! YOU HAVE BEEN WARNED !
706 int hmm_vma_get_pfns(struct hmm_range *range)
708 struct vm_area_struct *vma = range->vma;
709 struct hmm_vma_walk hmm_vma_walk;
710 struct mm_walk mm_walk;
713 /* Sanity check, this really should not happen ! */
714 if (range->start < vma->vm_start || range->start >= vma->vm_end)
716 if (range->end < vma->vm_start || range->end > vma->vm_end)
719 hmm = hmm_register(vma->vm_mm);
722 /* Caller must have registered a mirror, via hmm_mirror_register() ! */
723 if (!hmm->mmu_notifier.ops)
726 /* FIXME support hugetlb fs */
727 if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) ||
729 hmm_pfns_special(range);
733 if (!(vma->vm_flags & VM_READ)) {
735 * If vma do not allow read access, then assume that it does
736 * not allow write access, either. Architecture that allow
737 * write without read access are not supported by HMM, because
738 * operations such has atomic access would not work.
740 hmm_pfns_clear(range, range->pfns, range->start, range->end);
744 /* Initialize range to track CPU page table update */
745 spin_lock(&hmm->lock);
747 list_add_rcu(&range->list, &hmm->ranges);
748 spin_unlock(&hmm->lock);
750 hmm_vma_walk.fault = false;
751 hmm_vma_walk.range = range;
752 mm_walk.private = &hmm_vma_walk;
755 mm_walk.mm = vma->vm_mm;
756 mm_walk.pte_entry = NULL;
757 mm_walk.test_walk = NULL;
758 mm_walk.hugetlb_entry = NULL;
759 mm_walk.pmd_entry = hmm_vma_walk_pmd;
760 mm_walk.pte_hole = hmm_vma_walk_hole;
762 walk_page_range(range->start, range->end, &mm_walk);
765 EXPORT_SYMBOL(hmm_vma_get_pfns);
768 * hmm_vma_range_done() - stop tracking change to CPU page table over a range
769 * @range: range being tracked
770 * Returns: false if range data has been invalidated, true otherwise
772 * Range struct is used to track updates to the CPU page table after a call to
773 * either hmm_vma_get_pfns() or hmm_vma_fault(). Once the device driver is done
774 * using the data, or wants to lock updates to the data it got from those
775 * functions, it must call the hmm_vma_range_done() function, which will then
776 * stop tracking CPU page table updates.
778 * Note that device driver must still implement general CPU page table update
779 * tracking either by using hmm_mirror (see hmm_mirror_register()) or by using
780 * the mmu_notifier API directly.
782 * CPU page table update tracking done through hmm_range is only temporary and
783 * to be used while trying to duplicate CPU page table contents for a range of
786 * There are two ways to use this :
788 * hmm_vma_get_pfns(range); or hmm_vma_fault(...);
789 * trans = device_build_page_table_update_transaction(pfns);
790 * device_page_table_lock();
791 * if (!hmm_vma_range_done(range)) {
792 * device_page_table_unlock();
795 * device_commit_transaction(trans);
796 * device_page_table_unlock();
799 * hmm_vma_get_pfns(range); or hmm_vma_fault(...);
800 * device_page_table_lock();
801 * hmm_vma_range_done(range);
802 * device_update_page_table(range->pfns);
803 * device_page_table_unlock();
805 bool hmm_vma_range_done(struct hmm_range *range)
807 unsigned long npages = (range->end - range->start) >> PAGE_SHIFT;
810 if (range->end <= range->start) {
815 hmm = hmm_register(range->vma->vm_mm);
817 memset(range->pfns, 0, sizeof(*range->pfns) * npages);
821 spin_lock(&hmm->lock);
822 list_del_rcu(&range->list);
823 spin_unlock(&hmm->lock);
827 EXPORT_SYMBOL(hmm_vma_range_done);
830 * hmm_vma_fault() - try to fault some address in a virtual address range
831 * @range: range being faulted
832 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
833 * Returns: 0 success, error otherwise (-EAGAIN means mmap_sem have been drop)
835 * This is similar to a regular CPU page fault except that it will not trigger
836 * any memory migration if the memory being faulted is not accessible by CPUs.
838 * On error, for one virtual address in the range, the function will mark the
839 * corresponding HMM pfn entry with an error flag.
841 * Expected use pattern:
843 * down_read(&mm->mmap_sem);
844 * // Find vma and address device wants to fault, initialize hmm_pfn_t
845 * // array accordingly
846 * ret = hmm_vma_fault(range, write, block);
849 * hmm_vma_range_done(range);
850 * // You might want to rate limit or yield to play nicely, you may
851 * // also commit any valid pfn in the array assuming that you are
852 * // getting true from hmm_vma_range_monitor_end()
861 * up_read(&mm->mmap_sem)
864 * // Take device driver lock that serialize device page table update
865 * driver_lock_device_page_table_update();
866 * hmm_vma_range_done(range);
867 * // Commit pfns we got from hmm_vma_fault()
868 * driver_unlock_device_page_table_update();
869 * up_read(&mm->mmap_sem)
871 * YOU MUST CALL hmm_vma_range_done() AFTER THIS FUNCTION RETURN SUCCESS (0)
872 * BEFORE FREEING THE range struct OR YOU WILL HAVE SERIOUS MEMORY CORRUPTION !
874 * YOU HAVE BEEN WARNED !
876 int hmm_vma_fault(struct hmm_range *range, bool block)
878 struct vm_area_struct *vma = range->vma;
879 unsigned long start = range->start;
880 struct hmm_vma_walk hmm_vma_walk;
881 struct mm_walk mm_walk;
885 /* Sanity check, this really should not happen ! */
886 if (range->start < vma->vm_start || range->start >= vma->vm_end)
888 if (range->end < vma->vm_start || range->end > vma->vm_end)
891 hmm = hmm_register(vma->vm_mm);
893 hmm_pfns_clear(range, range->pfns, range->start, range->end);
896 /* Caller must have registered a mirror using hmm_mirror_register() */
897 if (!hmm->mmu_notifier.ops)
900 /* FIXME support hugetlb fs */
901 if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) ||
903 hmm_pfns_special(range);
907 if (!(vma->vm_flags & VM_READ)) {
909 * If vma do not allow read access, then assume that it does
910 * not allow write access, either. Architecture that allow
911 * write without read access are not supported by HMM, because
912 * operations such has atomic access would not work.
914 hmm_pfns_clear(range, range->pfns, range->start, range->end);
918 /* Initialize range to track CPU page table update */
919 spin_lock(&hmm->lock);
921 list_add_rcu(&range->list, &hmm->ranges);
922 spin_unlock(&hmm->lock);
924 hmm_vma_walk.fault = true;
925 hmm_vma_walk.block = block;
926 hmm_vma_walk.range = range;
927 mm_walk.private = &hmm_vma_walk;
928 hmm_vma_walk.last = range->start;
931 mm_walk.mm = vma->vm_mm;
932 mm_walk.pte_entry = NULL;
933 mm_walk.test_walk = NULL;
934 mm_walk.hugetlb_entry = NULL;
935 mm_walk.pmd_entry = hmm_vma_walk_pmd;
936 mm_walk.pte_hole = hmm_vma_walk_hole;
939 ret = walk_page_range(start, range->end, &mm_walk);
940 start = hmm_vma_walk.last;
941 } while (ret == -EAGAIN);
946 i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
947 hmm_pfns_clear(range, &range->pfns[i], hmm_vma_walk.last,
949 hmm_vma_range_done(range);
953 EXPORT_SYMBOL(hmm_vma_fault);
954 #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
957 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
958 struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
963 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
969 EXPORT_SYMBOL(hmm_vma_alloc_locked_page);
972 static void hmm_devmem_ref_release(struct percpu_ref *ref)
974 struct hmm_devmem *devmem;
976 devmem = container_of(ref, struct hmm_devmem, ref);
977 complete(&devmem->completion);
980 static void hmm_devmem_ref_exit(void *data)
982 struct percpu_ref *ref = data;
983 struct hmm_devmem *devmem;
985 devmem = container_of(ref, struct hmm_devmem, ref);
986 percpu_ref_exit(ref);
987 devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data);
990 static void hmm_devmem_ref_kill(void *data)
992 struct percpu_ref *ref = data;
993 struct hmm_devmem *devmem;
995 devmem = container_of(ref, struct hmm_devmem, ref);
996 percpu_ref_kill(ref);
997 wait_for_completion(&devmem->completion);
998 devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data);
1001 static int hmm_devmem_fault(struct vm_area_struct *vma,
1003 const struct page *page,
1007 struct hmm_devmem *devmem = page->pgmap->data;
1009 return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
1012 static void hmm_devmem_free(struct page *page, void *data)
1014 struct hmm_devmem *devmem = data;
1016 page->mapping = NULL;
1018 devmem->ops->free(devmem, page);
1021 static DEFINE_MUTEX(hmm_devmem_lock);
1022 static RADIX_TREE(hmm_devmem_radix, GFP_KERNEL);
1024 static void hmm_devmem_radix_release(struct resource *resource)
1026 resource_size_t key;
1028 mutex_lock(&hmm_devmem_lock);
1029 for (key = resource->start;
1030 key <= resource->end;
1031 key += PA_SECTION_SIZE)
1032 radix_tree_delete(&hmm_devmem_radix, key >> PA_SECTION_SHIFT);
1033 mutex_unlock(&hmm_devmem_lock);
1036 static void hmm_devmem_release(struct device *dev, void *data)
1038 struct hmm_devmem *devmem = data;
1039 struct resource *resource = devmem->resource;
1040 unsigned long start_pfn, npages;
1044 if (percpu_ref_tryget_live(&devmem->ref)) {
1045 dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
1046 percpu_ref_put(&devmem->ref);
1049 /* pages are dead and unused, undo the arch mapping */
1050 start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT;
1051 npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT;
1053 page = pfn_to_page(start_pfn);
1054 zone = page_zone(page);
1056 mem_hotplug_begin();
1057 if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY)
1058 __remove_pages(zone, start_pfn, npages, NULL);
1060 arch_remove_memory(start_pfn << PAGE_SHIFT,
1061 npages << PAGE_SHIFT, NULL);
1064 hmm_devmem_radix_release(resource);
1067 static int hmm_devmem_pages_create(struct hmm_devmem *devmem)
1069 resource_size_t key, align_start, align_size, align_end;
1070 struct device *device = devmem->device;
1071 int ret, nid, is_ram;
1073 align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1);
1074 align_size = ALIGN(devmem->resource->start +
1075 resource_size(devmem->resource),
1076 PA_SECTION_SIZE) - align_start;
1078 is_ram = region_intersects(align_start, align_size,
1079 IORESOURCE_SYSTEM_RAM,
1081 if (is_ram == REGION_MIXED) {
1082 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
1083 __func__, devmem->resource);
1086 if (is_ram == REGION_INTERSECTS)
1089 if (devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY)
1090 devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
1092 devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
1094 devmem->pagemap.res = *devmem->resource;
1095 devmem->pagemap.page_fault = hmm_devmem_fault;
1096 devmem->pagemap.page_free = hmm_devmem_free;
1097 devmem->pagemap.dev = devmem->device;
1098 devmem->pagemap.ref = &devmem->ref;
1099 devmem->pagemap.data = devmem;
1101 mutex_lock(&hmm_devmem_lock);
1102 align_end = align_start + align_size - 1;
1103 for (key = align_start; key <= align_end; key += PA_SECTION_SIZE) {
1104 struct hmm_devmem *dup;
1106 dup = radix_tree_lookup(&hmm_devmem_radix,
1107 key >> PA_SECTION_SHIFT);
1109 dev_err(device, "%s: collides with mapping for %s\n",
1110 __func__, dev_name(dup->device));
1111 mutex_unlock(&hmm_devmem_lock);
1115 ret = radix_tree_insert(&hmm_devmem_radix,
1116 key >> PA_SECTION_SHIFT,
1119 dev_err(device, "%s: failed: %d\n", __func__, ret);
1120 mutex_unlock(&hmm_devmem_lock);
1124 mutex_unlock(&hmm_devmem_lock);
1126 nid = dev_to_node(device);
1128 nid = numa_mem_id();
1130 mem_hotplug_begin();
1132 * For device private memory we call add_pages() as we only need to
1133 * allocate and initialize struct page for the device memory. More-
1134 * over the device memory is un-accessible thus we do not want to
1135 * create a linear mapping for the memory like arch_add_memory()
1138 * For device public memory, which is accesible by the CPU, we do
1139 * want the linear mapping and thus use arch_add_memory().
1141 if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC)
1142 ret = arch_add_memory(nid, align_start, align_size, NULL,
1145 ret = add_pages(nid, align_start >> PAGE_SHIFT,
1146 align_size >> PAGE_SHIFT, NULL, false);
1149 goto error_add_memory;
1151 move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
1152 align_start >> PAGE_SHIFT,
1153 align_size >> PAGE_SHIFT, NULL);
1157 * Initialization of the pages has been deferred until now in order
1158 * to allow us to do the work while not holding the hotplug lock.
1160 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
1161 align_start >> PAGE_SHIFT,
1162 align_size >> PAGE_SHIFT, &devmem->pagemap);
1167 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
1169 hmm_devmem_radix_release(devmem->resource);
1174 static int hmm_devmem_match(struct device *dev, void *data, void *match_data)
1176 struct hmm_devmem *devmem = data;
1178 return devmem->resource == match_data;
1181 static void hmm_devmem_pages_remove(struct hmm_devmem *devmem)
1183 devres_release(devmem->device, &hmm_devmem_release,
1184 &hmm_devmem_match, devmem->resource);
1188 * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
1190 * @ops: memory event device driver callback (see struct hmm_devmem_ops)
1191 * @device: device struct to bind the resource too
1192 * @size: size in bytes of the device memory to add
1193 * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise
1195 * This function first finds an empty range of physical address big enough to
1196 * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
1197 * in turn allocates struct pages. It does not do anything beyond that; all
1198 * events affecting the memory will go through the various callbacks provided
1199 * by hmm_devmem_ops struct.
1201 * Device driver should call this function during device initialization and
1202 * is then responsible of memory management. HMM only provides helpers.
1204 struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
1205 struct device *device,
1208 struct hmm_devmem *devmem;
1209 resource_size_t addr;
1212 dev_pagemap_get_ops();
1214 devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
1215 GFP_KERNEL, dev_to_node(device));
1217 return ERR_PTR(-ENOMEM);
1219 init_completion(&devmem->completion);
1220 devmem->pfn_first = -1UL;
1221 devmem->pfn_last = -1UL;
1222 devmem->resource = NULL;
1223 devmem->device = device;
1226 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1229 goto error_percpu_ref;
1231 ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
1233 goto error_devm_add_action;
1235 size = ALIGN(size, PA_SECTION_SIZE);
1236 addr = min((unsigned long)iomem_resource.end,
1237 (1UL << MAX_PHYSMEM_BITS) - 1);
1238 addr = addr - size + 1UL;
1241 * FIXME add a new helper to quickly walk resource tree and find free
1244 * FIXME what about ioport_resource resource ?
1246 for (; addr > size && addr >= iomem_resource.start; addr -= size) {
1247 ret = region_intersects(addr, size, 0, IORES_DESC_NONE);
1248 if (ret != REGION_DISJOINT)
1251 devmem->resource = devm_request_mem_region(device, addr, size,
1253 if (!devmem->resource) {
1255 goto error_no_resource;
1259 if (!devmem->resource) {
1261 goto error_no_resource;
1264 devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
1265 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1266 devmem->pfn_last = devmem->pfn_first +
1267 (resource_size(devmem->resource) >> PAGE_SHIFT);
1269 ret = hmm_devmem_pages_create(devmem);
1273 devres_add(device, devmem);
1275 ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
1277 hmm_devmem_remove(devmem);
1278 return ERR_PTR(ret);
1284 devm_release_mem_region(device, devmem->resource->start,
1285 resource_size(devmem->resource));
1287 error_devm_add_action:
1288 hmm_devmem_ref_kill(&devmem->ref);
1289 hmm_devmem_ref_exit(&devmem->ref);
1291 devres_free(devmem);
1292 return ERR_PTR(ret);
1294 EXPORT_SYMBOL(hmm_devmem_add);
1296 struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
1297 struct device *device,
1298 struct resource *res)
1300 struct hmm_devmem *devmem;
1303 if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY)
1304 return ERR_PTR(-EINVAL);
1306 dev_pagemap_get_ops();
1308 devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
1309 GFP_KERNEL, dev_to_node(device));
1311 return ERR_PTR(-ENOMEM);
1313 init_completion(&devmem->completion);
1314 devmem->pfn_first = -1UL;
1315 devmem->pfn_last = -1UL;
1316 devmem->resource = res;
1317 devmem->device = device;
1320 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1323 goto error_percpu_ref;
1325 ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
1327 goto error_devm_add_action;
1330 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1331 devmem->pfn_last = devmem->pfn_first +
1332 (resource_size(devmem->resource) >> PAGE_SHIFT);
1334 ret = hmm_devmem_pages_create(devmem);
1336 goto error_devm_add_action;
1338 devres_add(device, devmem);
1340 ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
1342 hmm_devmem_remove(devmem);
1343 return ERR_PTR(ret);
1348 error_devm_add_action:
1349 hmm_devmem_ref_kill(&devmem->ref);
1350 hmm_devmem_ref_exit(&devmem->ref);
1352 devres_free(devmem);
1353 return ERR_PTR(ret);
1355 EXPORT_SYMBOL(hmm_devmem_add_resource);
1358 * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE)
1360 * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory
1362 * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf
1363 * of the device driver. It will free struct page and remove the resource that
1364 * reserved the physical address range for this device memory.
1366 void hmm_devmem_remove(struct hmm_devmem *devmem)
1368 resource_size_t start, size;
1369 struct device *device;
1375 device = devmem->device;
1376 start = devmem->resource->start;
1377 size = resource_size(devmem->resource);
1379 cdm = devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY;
1380 hmm_devmem_ref_kill(&devmem->ref);
1381 hmm_devmem_ref_exit(&devmem->ref);
1382 hmm_devmem_pages_remove(devmem);
1385 devm_release_mem_region(device, start, size);
1387 EXPORT_SYMBOL(hmm_devmem_remove);
1390 * A device driver that wants to handle multiple devices memory through a
1391 * single fake device can use hmm_device to do so. This is purely a helper
1392 * and it is not needed to make use of any HMM functionality.
1394 #define HMM_DEVICE_MAX 256
1396 static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX);
1397 static DEFINE_SPINLOCK(hmm_device_lock);
1398 static struct class *hmm_device_class;
1399 static dev_t hmm_device_devt;
1401 static void hmm_device_release(struct device *device)
1403 struct hmm_device *hmm_device;
1405 hmm_device = container_of(device, struct hmm_device, device);
1406 spin_lock(&hmm_device_lock);
1407 clear_bit(hmm_device->minor, hmm_device_mask);
1408 spin_unlock(&hmm_device_lock);
1413 struct hmm_device *hmm_device_new(void *drvdata)
1415 struct hmm_device *hmm_device;
1417 hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL);
1419 return ERR_PTR(-ENOMEM);
1421 spin_lock(&hmm_device_lock);
1422 hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX);
1423 if (hmm_device->minor >= HMM_DEVICE_MAX) {
1424 spin_unlock(&hmm_device_lock);
1426 return ERR_PTR(-EBUSY);
1428 set_bit(hmm_device->minor, hmm_device_mask);
1429 spin_unlock(&hmm_device_lock);
1431 dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor);
1432 hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt),
1434 hmm_device->device.release = hmm_device_release;
1435 dev_set_drvdata(&hmm_device->device, drvdata);
1436 hmm_device->device.class = hmm_device_class;
1437 device_initialize(&hmm_device->device);
1441 EXPORT_SYMBOL(hmm_device_new);
1443 void hmm_device_put(struct hmm_device *hmm_device)
1445 put_device(&hmm_device->device);
1447 EXPORT_SYMBOL(hmm_device_put);
1449 static int __init hmm_init(void)
1453 ret = alloc_chrdev_region(&hmm_device_devt, 0,
1459 hmm_device_class = class_create(THIS_MODULE, "hmm_device");
1460 if (IS_ERR(hmm_device_class)) {
1461 unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX);
1462 return PTR_ERR(hmm_device_class);
1467 device_initcall(hmm_init);
1468 #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */