2 * Copyright 2013 Red Hat Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Authors: Jérôme Glisse <jglisse@redhat.com>
17 * Refer to include/linux/hmm.h for information about heterogeneous memory
18 * management or HMM for short.
21 #include <linux/hmm.h>
22 #include <linux/init.h>
23 #include <linux/rmap.h>
24 #include <linux/swap.h>
25 #include <linux/slab.h>
26 #include <linux/sched.h>
27 #include <linux/mmzone.h>
28 #include <linux/pagemap.h>
29 #include <linux/swapops.h>
30 #include <linux/hugetlb.h>
31 #include <linux/memremap.h>
32 #include <linux/jump_label.h>
33 #include <linux/mmu_notifier.h>
34 #include <linux/memory_hotplug.h>
36 #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
38 #if IS_ENABLED(CONFIG_HMM_MIRROR)
39 static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
42 * struct hmm - HMM per mm struct
44 * @mm: mm struct this HMM struct is bound to
45 * @lock: lock protecting ranges list
46 * @sequence: we track updates to the CPU page table with a sequence number
47 * @ranges: list of range being snapshotted
48 * @mirrors: list of mirrors for this mm
49 * @mmu_notifier: mmu notifier to track updates to CPU page table
50 * @mirrors_sem: read/write semaphore protecting the mirrors list
56 struct list_head ranges;
57 struct list_head mirrors;
58 struct mmu_notifier mmu_notifier;
59 struct rw_semaphore mirrors_sem;
63 * hmm_register - register HMM against an mm (HMM internal)
65 * @mm: mm struct to attach to
67 * This is not intended to be used directly by device drivers. It allocates an
68 * HMM struct if mm does not have one, and initializes it.
70 static struct hmm *hmm_register(struct mm_struct *mm)
72 struct hmm *hmm = READ_ONCE(mm->hmm);
76 * The hmm struct can only be freed once the mm_struct goes away,
77 * hence we should always have pre-allocated an new hmm struct
83 hmm = kmalloc(sizeof(*hmm), GFP_KERNEL);
86 INIT_LIST_HEAD(&hmm->mirrors);
87 init_rwsem(&hmm->mirrors_sem);
88 atomic_set(&hmm->sequence, 0);
89 hmm->mmu_notifier.ops = NULL;
90 INIT_LIST_HEAD(&hmm->ranges);
91 spin_lock_init(&hmm->lock);
94 spin_lock(&mm->page_table_lock);
99 spin_unlock(&mm->page_table_lock);
105 * We should only get here if hold the mmap_sem in write mode ie on
106 * registration of first mirror through hmm_mirror_register()
108 hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
109 if (__mmu_notifier_register(&hmm->mmu_notifier, mm))
115 spin_lock(&mm->page_table_lock);
118 spin_unlock(&mm->page_table_lock);
124 void hmm_mm_destroy(struct mm_struct *mm)
129 static void hmm_invalidate_range(struct hmm *hmm,
130 enum hmm_update_type action,
134 struct hmm_mirror *mirror;
135 struct hmm_range *range;
137 spin_lock(&hmm->lock);
138 list_for_each_entry(range, &hmm->ranges, list) {
139 unsigned long addr, idx, npages;
141 if (end < range->start || start >= range->end)
144 range->valid = false;
145 addr = max(start, range->start);
146 idx = (addr - range->start) >> PAGE_SHIFT;
147 npages = (min(range->end, end) - addr) >> PAGE_SHIFT;
148 memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages);
150 spin_unlock(&hmm->lock);
152 down_read(&hmm->mirrors_sem);
153 list_for_each_entry(mirror, &hmm->mirrors, list)
154 mirror->ops->sync_cpu_device_pagetables(mirror, action,
156 up_read(&hmm->mirrors_sem);
159 static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
161 struct hmm_mirror *mirror;
162 struct hmm *hmm = mm->hmm;
164 down_write(&hmm->mirrors_sem);
165 mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror,
168 list_del_init(&mirror->list);
169 if (mirror->ops->release) {
171 * Drop mirrors_sem so callback can wait on any pending
172 * work that might itself trigger mmu_notifier callback
173 * and thus would deadlock with us.
175 up_write(&hmm->mirrors_sem);
176 mirror->ops->release(mirror);
177 down_write(&hmm->mirrors_sem);
179 mirror = list_first_entry_or_null(&hmm->mirrors,
180 struct hmm_mirror, list);
182 up_write(&hmm->mirrors_sem);
185 static int hmm_invalidate_range_start(struct mmu_notifier *mn,
186 struct mm_struct *mm,
191 struct hmm *hmm = mm->hmm;
195 atomic_inc(&hmm->sequence);
200 static void hmm_invalidate_range_end(struct mmu_notifier *mn,
201 struct mm_struct *mm,
205 struct hmm *hmm = mm->hmm;
209 hmm_invalidate_range(mm->hmm, HMM_UPDATE_INVALIDATE, start, end);
212 static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
213 .release = hmm_release,
214 .invalidate_range_start = hmm_invalidate_range_start,
215 .invalidate_range_end = hmm_invalidate_range_end,
219 * hmm_mirror_register() - register a mirror against an mm
221 * @mirror: new mirror struct to register
222 * @mm: mm to register against
224 * To start mirroring a process address space, the device driver must register
225 * an HMM mirror struct.
227 * THE mm->mmap_sem MUST BE HELD IN WRITE MODE !
229 int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
232 if (!mm || !mirror || !mirror->ops)
236 mirror->hmm = hmm_register(mm);
240 down_write(&mirror->hmm->mirrors_sem);
241 if (mirror->hmm->mm == NULL) {
243 * A racing hmm_mirror_unregister() is about to destroy the hmm
244 * struct. Try again to allocate a new one.
246 up_write(&mirror->hmm->mirrors_sem);
250 list_add(&mirror->list, &mirror->hmm->mirrors);
251 up_write(&mirror->hmm->mirrors_sem);
256 EXPORT_SYMBOL(hmm_mirror_register);
259 * hmm_mirror_unregister() - unregister a mirror
261 * @mirror: new mirror struct to register
263 * Stop mirroring a process address space, and cleanup.
265 void hmm_mirror_unregister(struct hmm_mirror *mirror)
267 bool should_unregister = false;
268 struct mm_struct *mm;
271 if (mirror->hmm == NULL)
275 down_write(&hmm->mirrors_sem);
276 list_del_init(&mirror->list);
277 should_unregister = list_empty(&hmm->mirrors);
281 up_write(&hmm->mirrors_sem);
283 if (!should_unregister || mm == NULL)
286 mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm);
288 spin_lock(&mm->page_table_lock);
291 spin_unlock(&mm->page_table_lock);
295 EXPORT_SYMBOL(hmm_mirror_unregister);
297 struct hmm_vma_walk {
298 struct hmm_range *range;
304 static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
305 bool write_fault, uint64_t *pfn)
307 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE;
308 struct hmm_vma_walk *hmm_vma_walk = walk->private;
309 struct hmm_range *range = hmm_vma_walk->range;
310 struct vm_area_struct *vma = walk->vma;
313 flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY;
314 flags |= write_fault ? FAULT_FLAG_WRITE : 0;
315 ret = handle_mm_fault(vma, addr, flags);
316 if (ret & VM_FAULT_RETRY)
318 if (ret & VM_FAULT_ERROR) {
319 *pfn = range->values[HMM_PFN_ERROR];
326 static int hmm_pfns_bad(unsigned long addr,
328 struct mm_walk *walk)
330 struct hmm_vma_walk *hmm_vma_walk = walk->private;
331 struct hmm_range *range = hmm_vma_walk->range;
332 uint64_t *pfns = range->pfns;
335 i = (addr - range->start) >> PAGE_SHIFT;
336 for (; addr < end; addr += PAGE_SIZE, i++)
337 pfns[i] = range->values[HMM_PFN_ERROR];
343 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
344 * @start: range virtual start address (inclusive)
345 * @end: range virtual end address (exclusive)
346 * @fault: should we fault or not ?
347 * @write_fault: write fault ?
348 * @walk: mm_walk structure
349 * Returns: 0 on success, -EAGAIN after page fault, or page fault error
351 * This function will be called whenever pmd_none() or pte_none() returns true,
352 * or whenever there is no page directory covering the virtual address range.
354 static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
355 bool fault, bool write_fault,
356 struct mm_walk *walk)
358 struct hmm_vma_walk *hmm_vma_walk = walk->private;
359 struct hmm_range *range = hmm_vma_walk->range;
360 uint64_t *pfns = range->pfns;
363 hmm_vma_walk->last = addr;
364 i = (addr - range->start) >> PAGE_SHIFT;
365 for (; addr < end; addr += PAGE_SIZE, i++) {
366 pfns[i] = range->values[HMM_PFN_NONE];
367 if (fault || write_fault) {
370 ret = hmm_vma_do_fault(walk, addr, write_fault,
377 return (fault || write_fault) ? -EAGAIN : 0;
380 static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
381 uint64_t pfns, uint64_t cpu_flags,
382 bool *fault, bool *write_fault)
384 struct hmm_range *range = hmm_vma_walk->range;
386 *fault = *write_fault = false;
387 if (!hmm_vma_walk->fault)
390 /* We aren't ask to do anything ... */
391 if (!(pfns & range->flags[HMM_PFN_VALID]))
393 /* If this is device memory than only fault if explicitly requested */
394 if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
395 /* Do we fault on device memory ? */
396 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
397 *write_fault = pfns & range->flags[HMM_PFN_WRITE];
403 /* If CPU page table is not valid then we need to fault */
404 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
405 /* Need to write fault ? */
406 if ((pfns & range->flags[HMM_PFN_WRITE]) &&
407 !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
413 static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
414 const uint64_t *pfns, unsigned long npages,
415 uint64_t cpu_flags, bool *fault,
420 if (!hmm_vma_walk->fault) {
421 *fault = *write_fault = false;
425 for (i = 0; i < npages; ++i) {
426 hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
428 if ((*fault) || (*write_fault))
433 static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
434 struct mm_walk *walk)
436 struct hmm_vma_walk *hmm_vma_walk = walk->private;
437 struct hmm_range *range = hmm_vma_walk->range;
438 bool fault, write_fault;
439 unsigned long i, npages;
442 i = (addr - range->start) >> PAGE_SHIFT;
443 npages = (end - addr) >> PAGE_SHIFT;
444 pfns = &range->pfns[i];
445 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
446 0, &fault, &write_fault);
447 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
450 static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
452 if (pmd_protnone(pmd))
454 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
455 range->flags[HMM_PFN_WRITE] :
456 range->flags[HMM_PFN_VALID];
459 static int hmm_vma_handle_pmd(struct mm_walk *walk,
465 struct hmm_vma_walk *hmm_vma_walk = walk->private;
466 struct hmm_range *range = hmm_vma_walk->range;
467 unsigned long pfn, npages, i;
468 bool fault, write_fault;
471 npages = (end - addr) >> PAGE_SHIFT;
472 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
473 hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
474 &fault, &write_fault);
476 if (pmd_protnone(pmd) || fault || write_fault)
477 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
479 pfn = pmd_pfn(pmd) + pte_index(addr);
480 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
481 pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags;
482 hmm_vma_walk->last = end;
486 static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
488 if (pte_none(pte) || !pte_present(pte))
490 return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
491 range->flags[HMM_PFN_WRITE] :
492 range->flags[HMM_PFN_VALID];
495 static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
496 unsigned long end, pmd_t *pmdp, pte_t *ptep,
499 struct hmm_vma_walk *hmm_vma_walk = walk->private;
500 struct hmm_range *range = hmm_vma_walk->range;
501 struct vm_area_struct *vma = walk->vma;
502 bool fault, write_fault;
505 uint64_t orig_pfn = *pfn;
507 *pfn = range->values[HMM_PFN_NONE];
508 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
509 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
510 &fault, &write_fault);
513 if (fault || write_fault)
518 if (!pte_present(pte)) {
519 swp_entry_t entry = pte_to_swp_entry(pte);
521 if (!non_swap_entry(entry)) {
522 if (fault || write_fault)
528 * This is a special swap entry, ignore migration, use
529 * device and report anything else as error.
531 if (is_device_private_entry(entry)) {
532 cpu_flags = range->flags[HMM_PFN_VALID] |
533 range->flags[HMM_PFN_DEVICE_PRIVATE];
534 cpu_flags |= is_write_device_private_entry(entry) ?
535 range->flags[HMM_PFN_WRITE] : 0;
536 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
537 &fault, &write_fault);
538 if (fault || write_fault)
540 *pfn = hmm_pfn_from_pfn(range, swp_offset(entry));
545 if (is_migration_entry(entry)) {
546 if (fault || write_fault) {
548 hmm_vma_walk->last = addr;
549 migration_entry_wait(vma->vm_mm,
556 /* Report error for everything else */
557 *pfn = range->values[HMM_PFN_ERROR];
561 if (fault || write_fault)
564 *pfn = hmm_pfn_from_pfn(range, pte_pfn(pte)) | cpu_flags;
569 /* Fault any virtual address we were asked to fault */
570 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
573 static int hmm_vma_walk_pmd(pmd_t *pmdp,
576 struct mm_walk *walk)
578 struct hmm_vma_walk *hmm_vma_walk = walk->private;
579 struct hmm_range *range = hmm_vma_walk->range;
580 struct vm_area_struct *vma = walk->vma;
581 uint64_t *pfns = range->pfns;
582 unsigned long addr = start, i;
588 pmd = READ_ONCE(*pmdp);
590 return hmm_vma_walk_hole(start, end, walk);
592 if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB))
593 return hmm_pfns_bad(start, end, walk);
595 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
596 bool fault, write_fault;
597 unsigned long npages;
600 i = (addr - range->start) >> PAGE_SHIFT;
601 npages = (end - addr) >> PAGE_SHIFT;
602 pfns = &range->pfns[i];
604 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
605 0, &fault, &write_fault);
606 if (fault || write_fault) {
607 hmm_vma_walk->last = addr;
608 pmd_migration_entry_wait(vma->vm_mm, pmdp);
612 } else if (!pmd_present(pmd))
613 return hmm_pfns_bad(start, end, walk);
615 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
617 * No need to take pmd_lock here, even if some other threads
618 * is splitting the huge pmd we will get that event through
619 * mmu_notifier callback.
621 * So just read pmd value and check again its a transparent
622 * huge or device mapping one and compute corresponding pfn
625 pmd = pmd_read_atomic(pmdp);
627 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
630 i = (addr - range->start) >> PAGE_SHIFT;
631 return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
635 * We have handled all the valid case above ie either none, migration,
636 * huge or transparent huge. At this point either it is a valid pmd
637 * entry pointing to pte directory or it is a bad pmd that will not
641 return hmm_pfns_bad(start, end, walk);
643 ptep = pte_offset_map(pmdp, addr);
644 i = (addr - range->start) >> PAGE_SHIFT;
645 for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
648 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
650 /* hmm_vma_handle_pte() did unmap pte directory */
651 hmm_vma_walk->last = addr;
657 hmm_vma_walk->last = addr;
661 static void hmm_pfns_clear(struct hmm_range *range,
666 for (; addr < end; addr += PAGE_SIZE, pfns++)
667 *pfns = range->values[HMM_PFN_NONE];
670 static void hmm_pfns_special(struct hmm_range *range)
672 unsigned long addr = range->start, i = 0;
674 for (; addr < range->end; addr += PAGE_SIZE, i++)
675 range->pfns[i] = range->values[HMM_PFN_SPECIAL];
679 * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses
680 * @range: range being snapshotted
681 * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
682 * vma permission, 0 success
684 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
685 * validity is tracked by range struct. See hmm_vma_range_done() for further
688 * The range struct is initialized here. It tracks the CPU page table, but only
689 * if the function returns success (0), in which case the caller must then call
690 * hmm_vma_range_done() to stop CPU page table update tracking on this range.
692 * NOT CALLING hmm_vma_range_done() IF FUNCTION RETURNS 0 WILL LEAD TO SERIOUS
693 * MEMORY CORRUPTION ! YOU HAVE BEEN WARNED !
695 int hmm_vma_get_pfns(struct hmm_range *range)
697 struct vm_area_struct *vma = range->vma;
698 struct hmm_vma_walk hmm_vma_walk;
699 struct mm_walk mm_walk;
702 /* Sanity check, this really should not happen ! */
703 if (range->start < vma->vm_start || range->start >= vma->vm_end)
705 if (range->end < vma->vm_start || range->end > vma->vm_end)
708 hmm = hmm_register(vma->vm_mm);
711 /* Caller must have registered a mirror, via hmm_mirror_register() ! */
712 if (!hmm->mmu_notifier.ops)
715 /* FIXME support hugetlb fs */
716 if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) ||
718 hmm_pfns_special(range);
722 if (!(vma->vm_flags & VM_READ)) {
724 * If vma do not allow read access, then assume that it does
725 * not allow write access, either. Architecture that allow
726 * write without read access are not supported by HMM, because
727 * operations such has atomic access would not work.
729 hmm_pfns_clear(range, range->pfns, range->start, range->end);
733 /* Initialize range to track CPU page table update */
734 spin_lock(&hmm->lock);
736 list_add_rcu(&range->list, &hmm->ranges);
737 spin_unlock(&hmm->lock);
739 hmm_vma_walk.fault = false;
740 hmm_vma_walk.range = range;
741 mm_walk.private = &hmm_vma_walk;
744 mm_walk.mm = vma->vm_mm;
745 mm_walk.pte_entry = NULL;
746 mm_walk.test_walk = NULL;
747 mm_walk.hugetlb_entry = NULL;
748 mm_walk.pmd_entry = hmm_vma_walk_pmd;
749 mm_walk.pte_hole = hmm_vma_walk_hole;
751 walk_page_range(range->start, range->end, &mm_walk);
754 EXPORT_SYMBOL(hmm_vma_get_pfns);
757 * hmm_vma_range_done() - stop tracking change to CPU page table over a range
758 * @range: range being tracked
759 * Returns: false if range data has been invalidated, true otherwise
761 * Range struct is used to track updates to the CPU page table after a call to
762 * either hmm_vma_get_pfns() or hmm_vma_fault(). Once the device driver is done
763 * using the data, or wants to lock updates to the data it got from those
764 * functions, it must call the hmm_vma_range_done() function, which will then
765 * stop tracking CPU page table updates.
767 * Note that device driver must still implement general CPU page table update
768 * tracking either by using hmm_mirror (see hmm_mirror_register()) or by using
769 * the mmu_notifier API directly.
771 * CPU page table update tracking done through hmm_range is only temporary and
772 * to be used while trying to duplicate CPU page table contents for a range of
775 * There are two ways to use this :
777 * hmm_vma_get_pfns(range); or hmm_vma_fault(...);
778 * trans = device_build_page_table_update_transaction(pfns);
779 * device_page_table_lock();
780 * if (!hmm_vma_range_done(range)) {
781 * device_page_table_unlock();
784 * device_commit_transaction(trans);
785 * device_page_table_unlock();
788 * hmm_vma_get_pfns(range); or hmm_vma_fault(...);
789 * device_page_table_lock();
790 * hmm_vma_range_done(range);
791 * device_update_page_table(range->pfns);
792 * device_page_table_unlock();
794 bool hmm_vma_range_done(struct hmm_range *range)
796 unsigned long npages = (range->end - range->start) >> PAGE_SHIFT;
799 if (range->end <= range->start) {
804 hmm = hmm_register(range->vma->vm_mm);
806 memset(range->pfns, 0, sizeof(*range->pfns) * npages);
810 spin_lock(&hmm->lock);
811 list_del_rcu(&range->list);
812 spin_unlock(&hmm->lock);
816 EXPORT_SYMBOL(hmm_vma_range_done);
819 * hmm_vma_fault() - try to fault some address in a virtual address range
820 * @range: range being faulted
821 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
822 * Returns: 0 success, error otherwise (-EAGAIN means mmap_sem have been drop)
824 * This is similar to a regular CPU page fault except that it will not trigger
825 * any memory migration if the memory being faulted is not accessible by CPUs.
827 * On error, for one virtual address in the range, the function will mark the
828 * corresponding HMM pfn entry with an error flag.
830 * Expected use pattern:
832 * down_read(&mm->mmap_sem);
833 * // Find vma and address device wants to fault, initialize hmm_pfn_t
834 * // array accordingly
835 * ret = hmm_vma_fault(range, write, block);
838 * hmm_vma_range_done(range);
839 * // You might want to rate limit or yield to play nicely, you may
840 * // also commit any valid pfn in the array assuming that you are
841 * // getting true from hmm_vma_range_monitor_end()
850 * up_read(&mm->mmap_sem)
853 * // Take device driver lock that serialize device page table update
854 * driver_lock_device_page_table_update();
855 * hmm_vma_range_done(range);
856 * // Commit pfns we got from hmm_vma_fault()
857 * driver_unlock_device_page_table_update();
858 * up_read(&mm->mmap_sem)
860 * YOU MUST CALL hmm_vma_range_done() AFTER THIS FUNCTION RETURN SUCCESS (0)
861 * BEFORE FREEING THE range struct OR YOU WILL HAVE SERIOUS MEMORY CORRUPTION !
863 * YOU HAVE BEEN WARNED !
865 int hmm_vma_fault(struct hmm_range *range, bool block)
867 struct vm_area_struct *vma = range->vma;
868 unsigned long start = range->start;
869 struct hmm_vma_walk hmm_vma_walk;
870 struct mm_walk mm_walk;
874 /* Sanity check, this really should not happen ! */
875 if (range->start < vma->vm_start || range->start >= vma->vm_end)
877 if (range->end < vma->vm_start || range->end > vma->vm_end)
880 hmm = hmm_register(vma->vm_mm);
882 hmm_pfns_clear(range, range->pfns, range->start, range->end);
885 /* Caller must have registered a mirror using hmm_mirror_register() */
886 if (!hmm->mmu_notifier.ops)
889 /* FIXME support hugetlb fs */
890 if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) ||
892 hmm_pfns_special(range);
896 if (!(vma->vm_flags & VM_READ)) {
898 * If vma do not allow read access, then assume that it does
899 * not allow write access, either. Architecture that allow
900 * write without read access are not supported by HMM, because
901 * operations such has atomic access would not work.
903 hmm_pfns_clear(range, range->pfns, range->start, range->end);
907 /* Initialize range to track CPU page table update */
908 spin_lock(&hmm->lock);
910 list_add_rcu(&range->list, &hmm->ranges);
911 spin_unlock(&hmm->lock);
913 hmm_vma_walk.fault = true;
914 hmm_vma_walk.block = block;
915 hmm_vma_walk.range = range;
916 mm_walk.private = &hmm_vma_walk;
917 hmm_vma_walk.last = range->start;
920 mm_walk.mm = vma->vm_mm;
921 mm_walk.pte_entry = NULL;
922 mm_walk.test_walk = NULL;
923 mm_walk.hugetlb_entry = NULL;
924 mm_walk.pmd_entry = hmm_vma_walk_pmd;
925 mm_walk.pte_hole = hmm_vma_walk_hole;
928 ret = walk_page_range(start, range->end, &mm_walk);
929 start = hmm_vma_walk.last;
930 } while (ret == -EAGAIN);
935 i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
936 hmm_pfns_clear(range, &range->pfns[i], hmm_vma_walk.last,
938 hmm_vma_range_done(range);
942 EXPORT_SYMBOL(hmm_vma_fault);
943 #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
946 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
947 struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
952 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
958 EXPORT_SYMBOL(hmm_vma_alloc_locked_page);
961 static void hmm_devmem_ref_release(struct percpu_ref *ref)
963 struct hmm_devmem *devmem;
965 devmem = container_of(ref, struct hmm_devmem, ref);
966 complete(&devmem->completion);
969 static void hmm_devmem_ref_exit(void *data)
971 struct percpu_ref *ref = data;
972 struct hmm_devmem *devmem;
974 devmem = container_of(ref, struct hmm_devmem, ref);
975 percpu_ref_exit(ref);
976 devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data);
979 static void hmm_devmem_ref_kill(void *data)
981 struct percpu_ref *ref = data;
982 struct hmm_devmem *devmem;
984 devmem = container_of(ref, struct hmm_devmem, ref);
985 percpu_ref_kill(ref);
986 wait_for_completion(&devmem->completion);
987 devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data);
990 static int hmm_devmem_fault(struct vm_area_struct *vma,
992 const struct page *page,
996 struct hmm_devmem *devmem = page->pgmap->data;
998 return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
1001 static void hmm_devmem_free(struct page *page, void *data)
1003 struct hmm_devmem *devmem = data;
1005 page->mapping = NULL;
1007 devmem->ops->free(devmem, page);
1010 static DEFINE_MUTEX(hmm_devmem_lock);
1011 static RADIX_TREE(hmm_devmem_radix, GFP_KERNEL);
1013 static void hmm_devmem_radix_release(struct resource *resource)
1015 resource_size_t key;
1017 mutex_lock(&hmm_devmem_lock);
1018 for (key = resource->start;
1019 key <= resource->end;
1020 key += PA_SECTION_SIZE)
1021 radix_tree_delete(&hmm_devmem_radix, key >> PA_SECTION_SHIFT);
1022 mutex_unlock(&hmm_devmem_lock);
1025 static void hmm_devmem_release(struct device *dev, void *data)
1027 struct hmm_devmem *devmem = data;
1028 struct resource *resource = devmem->resource;
1029 unsigned long start_pfn, npages;
1033 if (percpu_ref_tryget_live(&devmem->ref)) {
1034 dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
1035 percpu_ref_put(&devmem->ref);
1038 /* pages are dead and unused, undo the arch mapping */
1039 start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT;
1040 npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT;
1042 page = pfn_to_page(start_pfn);
1043 zone = page_zone(page);
1045 mem_hotplug_begin();
1046 if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY)
1047 __remove_pages(zone, start_pfn, npages, NULL);
1049 arch_remove_memory(start_pfn << PAGE_SHIFT,
1050 npages << PAGE_SHIFT, NULL);
1053 hmm_devmem_radix_release(resource);
1056 static int hmm_devmem_pages_create(struct hmm_devmem *devmem)
1058 resource_size_t key, align_start, align_size, align_end;
1059 struct device *device = devmem->device;
1060 int ret, nid, is_ram;
1062 align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1);
1063 align_size = ALIGN(devmem->resource->start +
1064 resource_size(devmem->resource),
1065 PA_SECTION_SIZE) - align_start;
1067 is_ram = region_intersects(align_start, align_size,
1068 IORESOURCE_SYSTEM_RAM,
1070 if (is_ram == REGION_MIXED) {
1071 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
1072 __func__, devmem->resource);
1075 if (is_ram == REGION_INTERSECTS)
1078 if (devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY)
1079 devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
1081 devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
1083 devmem->pagemap.res = *devmem->resource;
1084 devmem->pagemap.page_fault = hmm_devmem_fault;
1085 devmem->pagemap.page_free = hmm_devmem_free;
1086 devmem->pagemap.dev = devmem->device;
1087 devmem->pagemap.ref = &devmem->ref;
1088 devmem->pagemap.data = devmem;
1090 mutex_lock(&hmm_devmem_lock);
1091 align_end = align_start + align_size - 1;
1092 for (key = align_start; key <= align_end; key += PA_SECTION_SIZE) {
1093 struct hmm_devmem *dup;
1095 dup = radix_tree_lookup(&hmm_devmem_radix,
1096 key >> PA_SECTION_SHIFT);
1098 dev_err(device, "%s: collides with mapping for %s\n",
1099 __func__, dev_name(dup->device));
1100 mutex_unlock(&hmm_devmem_lock);
1104 ret = radix_tree_insert(&hmm_devmem_radix,
1105 key >> PA_SECTION_SHIFT,
1108 dev_err(device, "%s: failed: %d\n", __func__, ret);
1109 mutex_unlock(&hmm_devmem_lock);
1113 mutex_unlock(&hmm_devmem_lock);
1115 nid = dev_to_node(device);
1117 nid = numa_mem_id();
1119 mem_hotplug_begin();
1121 * For device private memory we call add_pages() as we only need to
1122 * allocate and initialize struct page for the device memory. More-
1123 * over the device memory is un-accessible thus we do not want to
1124 * create a linear mapping for the memory like arch_add_memory()
1127 * For device public memory, which is accesible by the CPU, we do
1128 * want the linear mapping and thus use arch_add_memory().
1130 if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC)
1131 ret = arch_add_memory(nid, align_start, align_size, NULL,
1134 ret = add_pages(nid, align_start >> PAGE_SHIFT,
1135 align_size >> PAGE_SHIFT, NULL, false);
1138 goto error_add_memory;
1140 move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
1141 align_start >> PAGE_SHIFT,
1142 align_size >> PAGE_SHIFT, NULL);
1146 * Initialization of the pages has been deferred until now in order
1147 * to allow us to do the work while not holding the hotplug lock.
1149 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
1150 align_start >> PAGE_SHIFT,
1151 align_size >> PAGE_SHIFT, &devmem->pagemap);
1156 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
1158 hmm_devmem_radix_release(devmem->resource);
1163 static int hmm_devmem_match(struct device *dev, void *data, void *match_data)
1165 struct hmm_devmem *devmem = data;
1167 return devmem->resource == match_data;
1170 static void hmm_devmem_pages_remove(struct hmm_devmem *devmem)
1172 devres_release(devmem->device, &hmm_devmem_release,
1173 &hmm_devmem_match, devmem->resource);
1177 * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
1179 * @ops: memory event device driver callback (see struct hmm_devmem_ops)
1180 * @device: device struct to bind the resource too
1181 * @size: size in bytes of the device memory to add
1182 * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise
1184 * This function first finds an empty range of physical address big enough to
1185 * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
1186 * in turn allocates struct pages. It does not do anything beyond that; all
1187 * events affecting the memory will go through the various callbacks provided
1188 * by hmm_devmem_ops struct.
1190 * Device driver should call this function during device initialization and
1191 * is then responsible of memory management. HMM only provides helpers.
1193 struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
1194 struct device *device,
1197 struct hmm_devmem *devmem;
1198 resource_size_t addr;
1201 dev_pagemap_get_ops();
1203 devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
1204 GFP_KERNEL, dev_to_node(device));
1206 return ERR_PTR(-ENOMEM);
1208 init_completion(&devmem->completion);
1209 devmem->pfn_first = -1UL;
1210 devmem->pfn_last = -1UL;
1211 devmem->resource = NULL;
1212 devmem->device = device;
1215 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1218 goto error_percpu_ref;
1220 ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
1222 goto error_devm_add_action;
1224 size = ALIGN(size, PA_SECTION_SIZE);
1225 addr = min((unsigned long)iomem_resource.end,
1226 (1UL << MAX_PHYSMEM_BITS) - 1);
1227 addr = addr - size + 1UL;
1230 * FIXME add a new helper to quickly walk resource tree and find free
1233 * FIXME what about ioport_resource resource ?
1235 for (; addr > size && addr >= iomem_resource.start; addr -= size) {
1236 ret = region_intersects(addr, size, 0, IORES_DESC_NONE);
1237 if (ret != REGION_DISJOINT)
1240 devmem->resource = devm_request_mem_region(device, addr, size,
1242 if (!devmem->resource) {
1244 goto error_no_resource;
1248 if (!devmem->resource) {
1250 goto error_no_resource;
1253 devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
1254 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1255 devmem->pfn_last = devmem->pfn_first +
1256 (resource_size(devmem->resource) >> PAGE_SHIFT);
1258 ret = hmm_devmem_pages_create(devmem);
1262 devres_add(device, devmem);
1264 ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
1266 hmm_devmem_remove(devmem);
1267 return ERR_PTR(ret);
1273 devm_release_mem_region(device, devmem->resource->start,
1274 resource_size(devmem->resource));
1276 error_devm_add_action:
1277 hmm_devmem_ref_kill(&devmem->ref);
1278 hmm_devmem_ref_exit(&devmem->ref);
1280 devres_free(devmem);
1281 return ERR_PTR(ret);
1283 EXPORT_SYMBOL(hmm_devmem_add);
1285 struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
1286 struct device *device,
1287 struct resource *res)
1289 struct hmm_devmem *devmem;
1292 if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY)
1293 return ERR_PTR(-EINVAL);
1295 dev_pagemap_get_ops();
1297 devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
1298 GFP_KERNEL, dev_to_node(device));
1300 return ERR_PTR(-ENOMEM);
1302 init_completion(&devmem->completion);
1303 devmem->pfn_first = -1UL;
1304 devmem->pfn_last = -1UL;
1305 devmem->resource = res;
1306 devmem->device = device;
1309 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1312 goto error_percpu_ref;
1314 ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
1316 goto error_devm_add_action;
1319 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1320 devmem->pfn_last = devmem->pfn_first +
1321 (resource_size(devmem->resource) >> PAGE_SHIFT);
1323 ret = hmm_devmem_pages_create(devmem);
1325 goto error_devm_add_action;
1327 devres_add(device, devmem);
1329 ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
1331 hmm_devmem_remove(devmem);
1332 return ERR_PTR(ret);
1337 error_devm_add_action:
1338 hmm_devmem_ref_kill(&devmem->ref);
1339 hmm_devmem_ref_exit(&devmem->ref);
1341 devres_free(devmem);
1342 return ERR_PTR(ret);
1344 EXPORT_SYMBOL(hmm_devmem_add_resource);
1347 * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE)
1349 * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory
1351 * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf
1352 * of the device driver. It will free struct page and remove the resource that
1353 * reserved the physical address range for this device memory.
1355 void hmm_devmem_remove(struct hmm_devmem *devmem)
1357 resource_size_t start, size;
1358 struct device *device;
1364 device = devmem->device;
1365 start = devmem->resource->start;
1366 size = resource_size(devmem->resource);
1368 cdm = devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY;
1369 hmm_devmem_ref_kill(&devmem->ref);
1370 hmm_devmem_ref_exit(&devmem->ref);
1371 hmm_devmem_pages_remove(devmem);
1374 devm_release_mem_region(device, start, size);
1376 EXPORT_SYMBOL(hmm_devmem_remove);
1379 * A device driver that wants to handle multiple devices memory through a
1380 * single fake device can use hmm_device to do so. This is purely a helper
1381 * and it is not needed to make use of any HMM functionality.
1383 #define HMM_DEVICE_MAX 256
1385 static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX);
1386 static DEFINE_SPINLOCK(hmm_device_lock);
1387 static struct class *hmm_device_class;
1388 static dev_t hmm_device_devt;
1390 static void hmm_device_release(struct device *device)
1392 struct hmm_device *hmm_device;
1394 hmm_device = container_of(device, struct hmm_device, device);
1395 spin_lock(&hmm_device_lock);
1396 clear_bit(hmm_device->minor, hmm_device_mask);
1397 spin_unlock(&hmm_device_lock);
1402 struct hmm_device *hmm_device_new(void *drvdata)
1404 struct hmm_device *hmm_device;
1406 hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL);
1408 return ERR_PTR(-ENOMEM);
1410 spin_lock(&hmm_device_lock);
1411 hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX);
1412 if (hmm_device->minor >= HMM_DEVICE_MAX) {
1413 spin_unlock(&hmm_device_lock);
1415 return ERR_PTR(-EBUSY);
1417 set_bit(hmm_device->minor, hmm_device_mask);
1418 spin_unlock(&hmm_device_lock);
1420 dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor);
1421 hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt),
1423 hmm_device->device.release = hmm_device_release;
1424 dev_set_drvdata(&hmm_device->device, drvdata);
1425 hmm_device->device.class = hmm_device_class;
1426 device_initialize(&hmm_device->device);
1430 EXPORT_SYMBOL(hmm_device_new);
1432 void hmm_device_put(struct hmm_device *hmm_device)
1434 put_device(&hmm_device->device);
1436 EXPORT_SYMBOL(hmm_device_put);
1438 static int __init hmm_init(void)
1442 ret = alloc_chrdev_region(&hmm_device_devt, 0,
1448 hmm_device_class = class_create(THIS_MODULE, "hmm_device");
1449 if (IS_ERR(hmm_device_class)) {
1450 unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX);
1451 return PTR_ERR(hmm_device_class);
1456 device_initcall(hmm_init);
1457 #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */