1 // SPDX-License-Identifier: GPL-2.0
3 * This is a module to test the HMM (Heterogeneous Memory Management)
4 * mirror and zone device private memory migration APIs of the kernel.
5 * Userspace programs can register with the driver to mirror their own address
6 * space and can use the device to read/write any valid virtual address.
8 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/cdev.h>
14 #include <linux/device.h>
15 #include <linux/memremap.h>
16 #include <linux/mutex.h>
17 #include <linux/rwsem.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/highmem.h>
21 #include <linux/delay.h>
22 #include <linux/pagemap.h>
23 #include <linux/hmm.h>
24 #include <linux/vmalloc.h>
25 #include <linux/swap.h>
26 #include <linux/swapops.h>
27 #include <linux/sched/mm.h>
28 #include <linux/platform_device.h>
29 #include <linux/rmap.h>
30 #include <linux/mmu_notifier.h>
31 #include <linux/migrate.h>
33 #include "test_hmm_uapi.h"
35 #define DMIRROR_NDEVICES 2
36 #define DMIRROR_RANGE_FAULT_TIMEOUT 1000
37 #define DEVMEM_CHUNK_SIZE (256 * 1024 * 1024U)
38 #define DEVMEM_CHUNKS_RESERVE 16
40 static unsigned long spm_addr_dev0;
41 module_param(spm_addr_dev0, long, 0644);
42 MODULE_PARM_DESC(spm_addr_dev0,
43 "Specify start address for SPM (special purpose memory) used for device 0. By setting this Coherent device type will be used. Make sure spm_addr_dev1 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE.");
45 static unsigned long spm_addr_dev1;
46 module_param(spm_addr_dev1, long, 0644);
47 MODULE_PARM_DESC(spm_addr_dev1,
48 "Specify start address for SPM (special purpose memory) used for device 1. By setting this Coherent device type will be used. Make sure spm_addr_dev0 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE.");
50 static const struct dev_pagemap_ops dmirror_devmem_ops;
51 static const struct mmu_interval_notifier_ops dmirror_min_ops;
52 static dev_t dmirror_dev;
54 struct dmirror_device;
56 struct dmirror_bounce {
63 #define DPT_XA_TAG_ATOMIC 1UL
64 #define DPT_XA_TAG_WRITE 3UL
67 * Data structure to track address ranges and register for mmu interval
70 struct dmirror_interval {
71 struct mmu_interval_notifier notifier;
72 struct dmirror *dmirror;
76 * Data attached to the open device file.
77 * Note that it might be shared after a fork().
80 struct dmirror_device *mdevice;
82 struct mmu_interval_notifier notifier;
87 * ZONE_DEVICE pages for migration and simulating device memory.
89 struct dmirror_chunk {
90 struct dev_pagemap pagemap;
91 struct dmirror_device *mdevice;
97 struct dmirror_device {
99 struct hmm_devmem *devmem;
100 unsigned int zone_device_type;
102 unsigned int devmem_capacity;
103 unsigned int devmem_count;
104 struct dmirror_chunk **devmem_chunks;
105 struct mutex devmem_lock; /* protects the above */
107 unsigned long calloc;
109 struct page *free_pages;
110 spinlock_t lock; /* protects the above */
113 static struct dmirror_device dmirror_devices[DMIRROR_NDEVICES];
115 static int dmirror_bounce_init(struct dmirror_bounce *bounce,
122 bounce->ptr = vmalloc(size);
128 static void dmirror_bounce_fini(struct dmirror_bounce *bounce)
133 static int dmirror_fops_open(struct inode *inode, struct file *filp)
135 struct cdev *cdev = inode->i_cdev;
136 struct dmirror *dmirror;
139 /* Mirror this process address space */
140 dmirror = kzalloc(sizeof(*dmirror), GFP_KERNEL);
144 dmirror->mdevice = container_of(cdev, struct dmirror_device, cdevice);
145 mutex_init(&dmirror->mutex);
146 xa_init(&dmirror->pt);
148 ret = mmu_interval_notifier_insert(&dmirror->notifier, current->mm,
149 0, ULONG_MAX & PAGE_MASK, &dmirror_min_ops);
155 filp->private_data = dmirror;
159 static int dmirror_fops_release(struct inode *inode, struct file *filp)
161 struct dmirror *dmirror = filp->private_data;
163 mmu_interval_notifier_remove(&dmirror->notifier);
164 xa_destroy(&dmirror->pt);
169 static struct dmirror_device *dmirror_page_to_device(struct page *page)
172 return container_of(page->pgmap, struct dmirror_chunk,
176 static int dmirror_do_fault(struct dmirror *dmirror, struct hmm_range *range)
178 unsigned long *pfns = range->hmm_pfns;
181 for (pfn = (range->start >> PAGE_SHIFT);
182 pfn < (range->end >> PAGE_SHIFT);
188 * Since we asked for hmm_range_fault() to populate pages,
189 * it shouldn't return an error entry on success.
191 WARN_ON(*pfns & HMM_PFN_ERROR);
192 WARN_ON(!(*pfns & HMM_PFN_VALID));
194 page = hmm_pfn_to_page(*pfns);
198 if (*pfns & HMM_PFN_WRITE)
199 entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE);
200 else if (WARN_ON(range->default_flags & HMM_PFN_WRITE))
202 entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
203 if (xa_is_err(entry))
204 return xa_err(entry);
210 static void dmirror_do_update(struct dmirror *dmirror, unsigned long start,
217 * The XArray doesn't hold references to pages since it relies on
218 * the mmu notifier to clear page pointers when they become stale.
219 * Therefore, it is OK to just clear the entry.
221 xa_for_each_range(&dmirror->pt, pfn, entry, start >> PAGE_SHIFT,
223 xa_erase(&dmirror->pt, pfn);
226 static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni,
227 const struct mmu_notifier_range *range,
228 unsigned long cur_seq)
230 struct dmirror *dmirror = container_of(mni, struct dmirror, notifier);
233 * Ignore invalidation callbacks for device private pages since
234 * the invalidation is handled as part of the migration process.
236 if (range->event == MMU_NOTIFY_MIGRATE &&
237 range->owner == dmirror->mdevice)
240 if (mmu_notifier_range_blockable(range))
241 mutex_lock(&dmirror->mutex);
242 else if (!mutex_trylock(&dmirror->mutex))
245 mmu_interval_set_seq(mni, cur_seq);
246 dmirror_do_update(dmirror, range->start, range->end);
248 mutex_unlock(&dmirror->mutex);
252 static const struct mmu_interval_notifier_ops dmirror_min_ops = {
253 .invalidate = dmirror_interval_invalidate,
256 static int dmirror_range_fault(struct dmirror *dmirror,
257 struct hmm_range *range)
259 struct mm_struct *mm = dmirror->notifier.mm;
260 unsigned long timeout =
261 jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
265 if (time_after(jiffies, timeout)) {
270 range->notifier_seq = mmu_interval_read_begin(range->notifier);
272 ret = hmm_range_fault(range);
273 mmap_read_unlock(mm);
280 mutex_lock(&dmirror->mutex);
281 if (mmu_interval_read_retry(range->notifier,
282 range->notifier_seq)) {
283 mutex_unlock(&dmirror->mutex);
289 ret = dmirror_do_fault(dmirror, range);
291 mutex_unlock(&dmirror->mutex);
296 static int dmirror_fault(struct dmirror *dmirror, unsigned long start,
297 unsigned long end, bool write)
299 struct mm_struct *mm = dmirror->notifier.mm;
301 unsigned long pfns[64];
302 struct hmm_range range = {
303 .notifier = &dmirror->notifier,
307 HMM_PFN_REQ_FAULT | (write ? HMM_PFN_REQ_WRITE : 0),
308 .dev_private_owner = dmirror->mdevice,
312 /* Since the mm is for the mirrored process, get a reference first. */
313 if (!mmget_not_zero(mm))
316 for (addr = start; addr < end; addr = range.end) {
318 range.end = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end);
320 ret = dmirror_range_fault(dmirror, &range);
329 static int dmirror_do_read(struct dmirror *dmirror, unsigned long start,
330 unsigned long end, struct dmirror_bounce *bounce)
335 ptr = bounce->ptr + ((start - bounce->addr) & PAGE_MASK);
337 for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
342 entry = xa_load(&dmirror->pt, pfn);
343 page = xa_untag_pointer(entry);
348 memcpy(ptr, tmp, PAGE_SIZE);
358 static int dmirror_read(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd)
360 struct dmirror_bounce bounce;
361 unsigned long start, end;
362 unsigned long size = cmd->npages << PAGE_SHIFT;
370 ret = dmirror_bounce_init(&bounce, start, size);
375 mutex_lock(&dmirror->mutex);
376 ret = dmirror_do_read(dmirror, start, end, &bounce);
377 mutex_unlock(&dmirror->mutex);
381 start = cmd->addr + (bounce.cpages << PAGE_SHIFT);
382 ret = dmirror_fault(dmirror, start, end, false);
389 if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr,
393 cmd->cpages = bounce.cpages;
394 dmirror_bounce_fini(&bounce);
398 static int dmirror_do_write(struct dmirror *dmirror, unsigned long start,
399 unsigned long end, struct dmirror_bounce *bounce)
404 ptr = bounce->ptr + ((start - bounce->addr) & PAGE_MASK);
406 for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
411 entry = xa_load(&dmirror->pt, pfn);
412 page = xa_untag_pointer(entry);
413 if (!page || xa_pointer_tag(entry) != DPT_XA_TAG_WRITE)
417 memcpy(tmp, ptr, PAGE_SIZE);
427 static int dmirror_write(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd)
429 struct dmirror_bounce bounce;
430 unsigned long start, end;
431 unsigned long size = cmd->npages << PAGE_SHIFT;
439 ret = dmirror_bounce_init(&bounce, start, size);
442 if (copy_from_user(bounce.ptr, u64_to_user_ptr(cmd->ptr),
449 mutex_lock(&dmirror->mutex);
450 ret = dmirror_do_write(dmirror, start, end, &bounce);
451 mutex_unlock(&dmirror->mutex);
455 start = cmd->addr + (bounce.cpages << PAGE_SHIFT);
456 ret = dmirror_fault(dmirror, start, end, true);
463 cmd->cpages = bounce.cpages;
464 dmirror_bounce_fini(&bounce);
468 static int dmirror_allocate_chunk(struct dmirror_device *mdevice,
471 struct dmirror_chunk *devmem;
472 struct resource *res = NULL;
474 unsigned long pfn_first;
475 unsigned long pfn_last;
479 devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
483 switch (mdevice->zone_device_type) {
484 case HMM_DMIRROR_MEMORY_DEVICE_PRIVATE:
485 res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
487 if (IS_ERR_OR_NULL(res))
489 devmem->pagemap.range.start = res->start;
490 devmem->pagemap.range.end = res->end;
491 devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
493 case HMM_DMIRROR_MEMORY_DEVICE_COHERENT:
494 devmem->pagemap.range.start = (MINOR(mdevice->cdevice.dev) - 2) ?
497 devmem->pagemap.range.end = devmem->pagemap.range.start +
498 DEVMEM_CHUNK_SIZE - 1;
499 devmem->pagemap.type = MEMORY_DEVICE_COHERENT;
506 devmem->pagemap.nr_range = 1;
507 devmem->pagemap.ops = &dmirror_devmem_ops;
508 devmem->pagemap.owner = mdevice;
510 mutex_lock(&mdevice->devmem_lock);
512 if (mdevice->devmem_count == mdevice->devmem_capacity) {
513 struct dmirror_chunk **new_chunks;
514 unsigned int new_capacity;
516 new_capacity = mdevice->devmem_capacity +
517 DEVMEM_CHUNKS_RESERVE;
518 new_chunks = krealloc(mdevice->devmem_chunks,
519 sizeof(new_chunks[0]) * new_capacity,
523 mdevice->devmem_capacity = new_capacity;
524 mdevice->devmem_chunks = new_chunks;
526 ptr = memremap_pages(&devmem->pagemap, numa_node_id());
527 if (IS_ERR_OR_NULL(ptr)) {
535 devmem->mdevice = mdevice;
536 pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT;
537 pfn_last = pfn_first + (range_len(&devmem->pagemap.range) >> PAGE_SHIFT);
538 mdevice->devmem_chunks[mdevice->devmem_count++] = devmem;
540 mutex_unlock(&mdevice->devmem_lock);
542 pr_info("added new %u MB chunk (total %u chunks, %u MB) PFNs [0x%lx 0x%lx)\n",
543 DEVMEM_CHUNK_SIZE / (1024 * 1024),
544 mdevice->devmem_count,
545 mdevice->devmem_count * (DEVMEM_CHUNK_SIZE / (1024 * 1024)),
546 pfn_first, pfn_last);
548 spin_lock(&mdevice->lock);
549 for (pfn = pfn_first; pfn < pfn_last; pfn++) {
550 struct page *page = pfn_to_page(pfn);
552 page->zone_device_data = mdevice->free_pages;
553 mdevice->free_pages = page;
556 *ppage = mdevice->free_pages;
557 mdevice->free_pages = (*ppage)->zone_device_data;
560 spin_unlock(&mdevice->lock);
565 mutex_unlock(&mdevice->devmem_lock);
566 if (res && devmem->pagemap.type == MEMORY_DEVICE_PRIVATE)
567 release_mem_region(devmem->pagemap.range.start,
568 range_len(&devmem->pagemap.range));
575 static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice)
577 struct page *dpage = NULL;
581 * This is a fake device so we alloc real system memory to store
584 rpage = alloc_page(GFP_HIGHUSER);
588 spin_lock(&mdevice->lock);
590 if (mdevice->free_pages) {
591 dpage = mdevice->free_pages;
592 mdevice->free_pages = dpage->zone_device_data;
594 spin_unlock(&mdevice->lock);
596 spin_unlock(&mdevice->lock);
597 if (dmirror_allocate_chunk(mdevice, &dpage))
601 dpage->zone_device_data = rpage;
610 static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
611 struct dmirror *dmirror)
613 struct dmirror_device *mdevice = dmirror->mdevice;
614 const unsigned long *src = args->src;
615 unsigned long *dst = args->dst;
618 for (addr = args->start; addr < args->end; addr += PAGE_SIZE,
624 if (!(*src & MIGRATE_PFN_MIGRATE))
628 * Note that spage might be NULL which is OK since it is an
629 * unallocated pte_none() or read-only zero page.
631 spage = migrate_pfn_to_page(*src);
633 dpage = dmirror_devmem_alloc_page(mdevice);
637 rpage = dpage->zone_device_data;
639 copy_highpage(rpage, spage);
641 clear_highpage(rpage);
644 * Normally, a device would use the page->zone_device_data to
645 * point to the mirror but here we use it to hold the page for
646 * the simulated device memory and that page holds the pointer
649 rpage->zone_device_data = dmirror;
651 *dst = migrate_pfn(page_to_pfn(dpage));
652 if ((*src & MIGRATE_PFN_WRITE) ||
653 (!spage && args->vma->vm_flags & VM_WRITE))
654 *dst |= MIGRATE_PFN_WRITE;
658 static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
663 for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
666 entry = xa_load(&dmirror->pt, pfn);
667 if (xa_pointer_tag(entry) == DPT_XA_TAG_ATOMIC)
674 static int dmirror_atomic_map(unsigned long start, unsigned long end,
675 struct page **pages, struct dmirror *dmirror)
677 unsigned long pfn, mapped = 0;
680 /* Map the migrated pages into the device's page tables. */
681 mutex_lock(&dmirror->mutex);
683 for (i = 0, pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, i++) {
690 entry = xa_tag_pointer(entry, DPT_XA_TAG_ATOMIC);
691 entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
692 if (xa_is_err(entry)) {
693 mutex_unlock(&dmirror->mutex);
694 return xa_err(entry);
700 mutex_unlock(&dmirror->mutex);
704 static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
705 struct dmirror *dmirror)
707 unsigned long start = args->start;
708 unsigned long end = args->end;
709 const unsigned long *src = args->src;
710 const unsigned long *dst = args->dst;
713 /* Map the migrated pages into the device's page tables. */
714 mutex_lock(&dmirror->mutex);
716 for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++,
721 if (!(*src & MIGRATE_PFN_MIGRATE))
724 dpage = migrate_pfn_to_page(*dst);
729 * Store the page that holds the data so the page table
730 * doesn't have to deal with ZONE_DEVICE private pages.
732 entry = dpage->zone_device_data;
733 if (*dst & MIGRATE_PFN_WRITE)
734 entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE);
735 entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
736 if (xa_is_err(entry)) {
737 mutex_unlock(&dmirror->mutex);
738 return xa_err(entry);
742 mutex_unlock(&dmirror->mutex);
746 static int dmirror_exclusive(struct dmirror *dmirror,
747 struct hmm_dmirror_cmd *cmd)
749 unsigned long start, end, addr;
750 unsigned long size = cmd->npages << PAGE_SHIFT;
751 struct mm_struct *mm = dmirror->notifier.mm;
752 struct page *pages[64];
753 struct dmirror_bounce bounce;
762 /* Since the mm is for the mirrored process, get a reference first. */
763 if (!mmget_not_zero(mm))
767 for (addr = start; addr < end; addr = next) {
768 unsigned long mapped = 0;
771 if (end < addr + (ARRAY_SIZE(pages) << PAGE_SHIFT))
774 next = addr + (ARRAY_SIZE(pages) << PAGE_SHIFT);
776 ret = make_device_exclusive_range(mm, addr, next, pages, NULL);
778 * Do dmirror_atomic_map() iff all pages are marked for
779 * exclusive access to avoid accessing uninitialized
782 if (ret == (next - addr) >> PAGE_SHIFT)
783 mapped = dmirror_atomic_map(addr, next, pages, dmirror);
784 for (i = 0; i < ret; i++) {
786 unlock_page(pages[i]);
791 if (addr + (mapped << PAGE_SHIFT) < next) {
792 mmap_read_unlock(mm);
797 mmap_read_unlock(mm);
800 /* Return the migrated data for verification. */
801 ret = dmirror_bounce_init(&bounce, start, size);
804 mutex_lock(&dmirror->mutex);
805 ret = dmirror_do_read(dmirror, start, end, &bounce);
806 mutex_unlock(&dmirror->mutex);
808 if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr,
813 cmd->cpages = bounce.cpages;
814 dmirror_bounce_fini(&bounce);
818 static int dmirror_migrate(struct dmirror *dmirror,
819 struct hmm_dmirror_cmd *cmd)
821 unsigned long start, end, addr;
822 unsigned long size = cmd->npages << PAGE_SHIFT;
823 struct mm_struct *mm = dmirror->notifier.mm;
824 struct vm_area_struct *vma;
825 unsigned long src_pfns[64];
826 unsigned long dst_pfns[64];
827 struct dmirror_bounce bounce;
828 struct migrate_vma args;
837 /* Since the mm is for the mirrored process, get a reference first. */
838 if (!mmget_not_zero(mm))
842 for (addr = start; addr < end; addr = next) {
843 vma = vma_lookup(mm, addr);
844 if (!vma || !(vma->vm_flags & VM_READ)) {
848 next = min(end, addr + (ARRAY_SIZE(src_pfns) << PAGE_SHIFT));
849 if (next > vma->vm_end)
857 args.pgmap_owner = dmirror->mdevice;
858 args.flags = MIGRATE_VMA_SELECT_SYSTEM;
859 ret = migrate_vma_setup(&args);
863 dmirror_migrate_alloc_and_copy(&args, dmirror);
864 migrate_vma_pages(&args);
865 dmirror_migrate_finalize_and_map(&args, dmirror);
866 migrate_vma_finalize(&args);
868 mmap_read_unlock(mm);
871 /* Return the migrated data for verification. */
872 ret = dmirror_bounce_init(&bounce, start, size);
875 mutex_lock(&dmirror->mutex);
876 ret = dmirror_do_read(dmirror, start, end, &bounce);
877 mutex_unlock(&dmirror->mutex);
879 if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr,
883 cmd->cpages = bounce.cpages;
884 dmirror_bounce_fini(&bounce);
888 mmap_read_unlock(mm);
893 static void dmirror_mkentry(struct dmirror *dmirror, struct hmm_range *range,
894 unsigned char *perm, unsigned long entry)
898 if (entry & HMM_PFN_ERROR) {
899 *perm = HMM_DMIRROR_PROT_ERROR;
902 if (!(entry & HMM_PFN_VALID)) {
903 *perm = HMM_DMIRROR_PROT_NONE;
907 page = hmm_pfn_to_page(entry);
908 if (is_device_private_page(page)) {
909 /* Is the page migrated to this device or some other? */
910 if (dmirror->mdevice == dmirror_page_to_device(page))
911 *perm = HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL;
913 *perm = HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE;
914 } else if (is_zero_pfn(page_to_pfn(page)))
915 *perm = HMM_DMIRROR_PROT_ZERO;
917 *perm = HMM_DMIRROR_PROT_NONE;
918 if (entry & HMM_PFN_WRITE)
919 *perm |= HMM_DMIRROR_PROT_WRITE;
921 *perm |= HMM_DMIRROR_PROT_READ;
922 if (hmm_pfn_to_map_order(entry) + PAGE_SHIFT == PMD_SHIFT)
923 *perm |= HMM_DMIRROR_PROT_PMD;
924 else if (hmm_pfn_to_map_order(entry) + PAGE_SHIFT == PUD_SHIFT)
925 *perm |= HMM_DMIRROR_PROT_PUD;
928 static bool dmirror_snapshot_invalidate(struct mmu_interval_notifier *mni,
929 const struct mmu_notifier_range *range,
930 unsigned long cur_seq)
932 struct dmirror_interval *dmi =
933 container_of(mni, struct dmirror_interval, notifier);
934 struct dmirror *dmirror = dmi->dmirror;
936 if (mmu_notifier_range_blockable(range))
937 mutex_lock(&dmirror->mutex);
938 else if (!mutex_trylock(&dmirror->mutex))
942 * Snapshots only need to set the sequence number since any
943 * invalidation in the interval invalidates the whole snapshot.
945 mmu_interval_set_seq(mni, cur_seq);
947 mutex_unlock(&dmirror->mutex);
951 static const struct mmu_interval_notifier_ops dmirror_mrn_ops = {
952 .invalidate = dmirror_snapshot_invalidate,
955 static int dmirror_range_snapshot(struct dmirror *dmirror,
956 struct hmm_range *range,
959 struct mm_struct *mm = dmirror->notifier.mm;
960 struct dmirror_interval notifier;
961 unsigned long timeout =
962 jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
967 notifier.dmirror = dmirror;
968 range->notifier = ¬ifier.notifier;
970 ret = mmu_interval_notifier_insert(range->notifier, mm,
971 range->start, range->end - range->start,
977 if (time_after(jiffies, timeout)) {
982 range->notifier_seq = mmu_interval_read_begin(range->notifier);
985 ret = hmm_range_fault(range);
986 mmap_read_unlock(mm);
993 mutex_lock(&dmirror->mutex);
994 if (mmu_interval_read_retry(range->notifier,
995 range->notifier_seq)) {
996 mutex_unlock(&dmirror->mutex);
1002 n = (range->end - range->start) >> PAGE_SHIFT;
1003 for (i = 0; i < n; i++)
1004 dmirror_mkentry(dmirror, range, perm + i, range->hmm_pfns[i]);
1006 mutex_unlock(&dmirror->mutex);
1008 mmu_interval_notifier_remove(range->notifier);
1012 static int dmirror_snapshot(struct dmirror *dmirror,
1013 struct hmm_dmirror_cmd *cmd)
1015 struct mm_struct *mm = dmirror->notifier.mm;
1016 unsigned long start, end;
1017 unsigned long size = cmd->npages << PAGE_SHIFT;
1020 unsigned long pfns[64];
1021 unsigned char perm[64];
1023 struct hmm_range range = {
1025 .dev_private_owner = dmirror->mdevice,
1034 /* Since the mm is for the mirrored process, get a reference first. */
1035 if (!mmget_not_zero(mm))
1039 * Register a temporary notifier to detect invalidations even if it
1040 * overlaps with other mmu_interval_notifiers.
1042 uptr = u64_to_user_ptr(cmd->ptr);
1043 for (addr = start; addr < end; addr = next) {
1046 next = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end);
1050 ret = dmirror_range_snapshot(dmirror, &range, perm);
1054 n = (range.end - range.start) >> PAGE_SHIFT;
1055 if (copy_to_user(uptr, perm, n)) {
1068 static long dmirror_fops_unlocked_ioctl(struct file *filp,
1069 unsigned int command,
1072 void __user *uarg = (void __user *)arg;
1073 struct hmm_dmirror_cmd cmd;
1074 struct dmirror *dmirror;
1077 dmirror = filp->private_data;
1081 if (copy_from_user(&cmd, uarg, sizeof(cmd)))
1084 if (cmd.addr & ~PAGE_MASK)
1086 if (cmd.addr >= (cmd.addr + (cmd.npages << PAGE_SHIFT)))
1093 case HMM_DMIRROR_READ:
1094 ret = dmirror_read(dmirror, &cmd);
1097 case HMM_DMIRROR_WRITE:
1098 ret = dmirror_write(dmirror, &cmd);
1101 case HMM_DMIRROR_MIGRATE:
1102 ret = dmirror_migrate(dmirror, &cmd);
1105 case HMM_DMIRROR_EXCLUSIVE:
1106 ret = dmirror_exclusive(dmirror, &cmd);
1109 case HMM_DMIRROR_CHECK_EXCLUSIVE:
1110 ret = dmirror_check_atomic(dmirror, cmd.addr,
1111 cmd.addr + (cmd.npages << PAGE_SHIFT));
1114 case HMM_DMIRROR_SNAPSHOT:
1115 ret = dmirror_snapshot(dmirror, &cmd);
1124 if (copy_to_user(uarg, &cmd, sizeof(cmd)))
1130 static int dmirror_fops_mmap(struct file *file, struct vm_area_struct *vma)
1134 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
1138 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1142 ret = vm_insert_page(vma, addr, page);
1153 static const struct file_operations dmirror_fops = {
1154 .open = dmirror_fops_open,
1155 .release = dmirror_fops_release,
1156 .mmap = dmirror_fops_mmap,
1157 .unlocked_ioctl = dmirror_fops_unlocked_ioctl,
1158 .llseek = default_llseek,
1159 .owner = THIS_MODULE,
1162 static void dmirror_devmem_free(struct page *page)
1164 struct page *rpage = page->zone_device_data;
1165 struct dmirror_device *mdevice;
1170 mdevice = dmirror_page_to_device(page);
1172 spin_lock(&mdevice->lock);
1174 page->zone_device_data = mdevice->free_pages;
1175 mdevice->free_pages = page;
1176 spin_unlock(&mdevice->lock);
1179 static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
1180 struct dmirror *dmirror)
1182 const unsigned long *src = args->src;
1183 unsigned long *dst = args->dst;
1184 unsigned long start = args->start;
1185 unsigned long end = args->end;
1188 for (addr = start; addr < end; addr += PAGE_SIZE,
1190 struct page *dpage, *spage;
1192 spage = migrate_pfn_to_page(*src);
1193 if (!spage || !(*src & MIGRATE_PFN_MIGRATE))
1195 spage = spage->zone_device_data;
1197 dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr);
1202 xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
1203 copy_highpage(dpage, spage);
1204 *dst = migrate_pfn(page_to_pfn(dpage));
1205 if (*src & MIGRATE_PFN_WRITE)
1206 *dst |= MIGRATE_PFN_WRITE;
1211 static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
1213 struct migrate_vma args;
1214 unsigned long src_pfns;
1215 unsigned long dst_pfns;
1217 struct dmirror *dmirror;
1221 * Normally, a device would use the page->zone_device_data to point to
1222 * the mirror but here we use it to hold the page for the simulated
1223 * device memory and that page holds the pointer to the mirror.
1225 rpage = vmf->page->zone_device_data;
1226 dmirror = rpage->zone_device_data;
1228 /* FIXME demonstrate how we can adjust migrate range */
1229 args.vma = vmf->vma;
1230 args.start = vmf->address;
1231 args.end = args.start + PAGE_SIZE;
1232 args.src = &src_pfns;
1233 args.dst = &dst_pfns;
1234 args.pgmap_owner = dmirror->mdevice;
1235 args.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
1237 if (migrate_vma_setup(&args))
1238 return VM_FAULT_SIGBUS;
1240 ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror);
1243 migrate_vma_pages(&args);
1245 * No device finalize step is needed since
1246 * dmirror_devmem_fault_alloc_and_copy() will have already
1247 * invalidated the device page table.
1249 migrate_vma_finalize(&args);
1253 static const struct dev_pagemap_ops dmirror_devmem_ops = {
1254 .page_free = dmirror_devmem_free,
1255 .migrate_to_ram = dmirror_devmem_fault,
1258 static int dmirror_device_init(struct dmirror_device *mdevice, int id)
1263 dev = MKDEV(MAJOR(dmirror_dev), id);
1264 mutex_init(&mdevice->devmem_lock);
1265 spin_lock_init(&mdevice->lock);
1267 cdev_init(&mdevice->cdevice, &dmirror_fops);
1268 mdevice->cdevice.owner = THIS_MODULE;
1269 ret = cdev_add(&mdevice->cdevice, dev, 1);
1273 /* Build a list of free ZONE_DEVICE struct pages */
1274 return dmirror_allocate_chunk(mdevice, NULL);
1277 static void dmirror_device_remove(struct dmirror_device *mdevice)
1281 if (mdevice->devmem_chunks) {
1282 for (i = 0; i < mdevice->devmem_count; i++) {
1283 struct dmirror_chunk *devmem =
1284 mdevice->devmem_chunks[i];
1286 memunmap_pages(&devmem->pagemap);
1287 if (devmem->pagemap.type == MEMORY_DEVICE_PRIVATE)
1288 release_mem_region(devmem->pagemap.range.start,
1289 range_len(&devmem->pagemap.range));
1292 kfree(mdevice->devmem_chunks);
1295 cdev_del(&mdevice->cdevice);
1298 static int __init hmm_dmirror_init(void)
1304 ret = alloc_chrdev_region(&dmirror_dev, 0, DMIRROR_NDEVICES,
1309 memset(dmirror_devices, 0, DMIRROR_NDEVICES * sizeof(dmirror_devices[0]));
1310 dmirror_devices[ndevices++].zone_device_type =
1311 HMM_DMIRROR_MEMORY_DEVICE_PRIVATE;
1312 dmirror_devices[ndevices++].zone_device_type =
1313 HMM_DMIRROR_MEMORY_DEVICE_PRIVATE;
1314 for (id = 0; id < ndevices; id++) {
1315 ret = dmirror_device_init(dmirror_devices + id, id);
1320 pr_info("HMM test module loaded. This is only for testing HMM.\n");
1325 dmirror_device_remove(dmirror_devices + id);
1326 unregister_chrdev_region(dmirror_dev, DMIRROR_NDEVICES);
1331 static void __exit hmm_dmirror_exit(void)
1335 for (id = 0; id < DMIRROR_NDEVICES; id++)
1336 dmirror_device_remove(dmirror_devices + id);
1337 unregister_chrdev_region(dmirror_dev, DMIRROR_NDEVICES);
1340 module_init(hmm_dmirror_init);
1341 module_exit(hmm_dmirror_exit);
1342 MODULE_LICENSE("GPL");