1 // SPDX-License-Identifier: GPL-2.0
3 * HMM stands for Heterogeneous Memory Management, it is a helper layer inside
4 * the linux kernel to help device drivers mirror a process address space in
5 * the device. This allows the device to use the same address space which
6 * makes communication and data exchange a lot easier.
8 * This framework's sole purpose is to exercise various code paths inside
9 * the kernel to make sure that HMM performs as expected and to flush out any
13 #include "../kselftest_harness.h"
24 #include <sys/types.h>
27 #include <sys/ioctl.h>
31 * This is a private UAPI to the kernel test module so it isn't exported
32 * in the usual include/uapi/... directory.
34 #include <lib/test_hmm_uapi.h>
35 #include <mm/gup_test.h>
47 HMM_PRIVATE_DEVICE_ONE,
48 HMM_PRIVATE_DEVICE_TWO,
49 HMM_COHERENCE_DEVICE_ONE,
50 HMM_COHERENCE_DEVICE_TWO,
53 #define TWOMEG (1 << 21)
54 #define HMM_BUFFER_SIZE (1024 << 12)
55 #define HMM_PATH_MAX 64
58 #define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
59 /* Just the flags we need, copied from mm.h: */
60 #define FOLL_WRITE 0x01 /* check pte is writable */
61 #define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite */
66 unsigned int page_size;
67 unsigned int page_shift;
75 FIXTURE_VARIANT_ADD(hmm, hmm_device_private)
77 .device_number = HMM_PRIVATE_DEVICE_ONE,
80 FIXTURE_VARIANT_ADD(hmm, hmm_device_coherent)
82 .device_number = HMM_COHERENCE_DEVICE_ONE,
89 unsigned int page_size;
90 unsigned int page_shift;
99 FIXTURE_VARIANT_ADD(hmm2, hmm2_device_private)
101 .device_number0 = HMM_PRIVATE_DEVICE_ONE,
102 .device_number1 = HMM_PRIVATE_DEVICE_TWO,
105 FIXTURE_VARIANT_ADD(hmm2, hmm2_device_coherent)
107 .device_number0 = HMM_COHERENCE_DEVICE_ONE,
108 .device_number1 = HMM_COHERENCE_DEVICE_TWO,
111 static int hmm_open(int unit)
113 char pathname[HMM_PATH_MAX];
116 snprintf(pathname, sizeof(pathname), "/dev/hmm_dmirror%d", unit);
117 fd = open(pathname, O_RDWR, 0);
119 fprintf(stderr, "could not open hmm dmirror driver (%s)\n",
124 static bool hmm_is_coherent_type(int dev_num)
126 return (dev_num >= HMM_COHERENCE_DEVICE_ONE);
131 self->page_size = sysconf(_SC_PAGE_SIZE);
132 self->page_shift = ffs(self->page_size) - 1;
134 self->fd = hmm_open(variant->device_number);
135 if (self->fd < 0 && hmm_is_coherent_type(variant->device_number))
136 SKIP(exit(0), "DEVICE_COHERENT not available");
137 ASSERT_GE(self->fd, 0);
142 self->page_size = sysconf(_SC_PAGE_SIZE);
143 self->page_shift = ffs(self->page_size) - 1;
145 self->fd0 = hmm_open(variant->device_number0);
146 if (self->fd0 < 0 && hmm_is_coherent_type(variant->device_number0))
147 SKIP(exit(0), "DEVICE_COHERENT not available");
148 ASSERT_GE(self->fd0, 0);
149 self->fd1 = hmm_open(variant->device_number1);
150 ASSERT_GE(self->fd1, 0);
153 FIXTURE_TEARDOWN(hmm)
155 int ret = close(self->fd);
161 FIXTURE_TEARDOWN(hmm2)
163 int ret = close(self->fd0);
168 ret = close(self->fd1);
173 static int hmm_dmirror_cmd(int fd,
174 unsigned long request,
175 struct hmm_buffer *buffer,
176 unsigned long npages)
178 struct hmm_dmirror_cmd cmd;
181 /* Simulate a device reading system memory. */
182 cmd.addr = (__u64)buffer->ptr;
183 cmd.ptr = (__u64)buffer->mirror;
187 ret = ioctl(fd, request, &cmd);
194 buffer->cpages = cmd.cpages;
195 buffer->faults = cmd.faults;
200 static void hmm_buffer_free(struct hmm_buffer *buffer)
206 munmap(buffer->ptr, buffer->size);
207 free(buffer->mirror);
212 * Create a temporary file that will be deleted on close.
214 static int hmm_create_file(unsigned long size)
216 char path[HMM_PATH_MAX];
219 strcpy(path, "/tmp");
220 fd = open(path, O_TMPFILE | O_EXCL | O_RDWR, 0600);
225 r = ftruncate(fd, size);
226 } while (r == -1 && errno == EINTR);
235 * Return a random unsigned number.
237 static unsigned int hmm_random(void)
243 fd = open("/dev/urandom", O_RDONLY);
245 fprintf(stderr, "%s:%d failed to open /dev/urandom\n",
250 read(fd, &r, sizeof(r));
254 static void hmm_nanosleep(unsigned int n)
263 static int hmm_migrate_sys_to_dev(int fd,
264 struct hmm_buffer *buffer,
265 unsigned long npages)
267 return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_DEV, buffer, npages);
270 static int hmm_migrate_dev_to_sys(int fd,
271 struct hmm_buffer *buffer,
272 unsigned long npages)
274 return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_SYS, buffer, npages);
278 * Simple NULL test of device open/close.
280 TEST_F(hmm, open_close)
285 * Read private anonymous memory.
287 TEST_F(hmm, anon_read)
289 struct hmm_buffer *buffer;
290 unsigned long npages;
297 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
298 ASSERT_NE(npages, 0);
299 size = npages << self->page_shift;
301 buffer = malloc(sizeof(*buffer));
302 ASSERT_NE(buffer, NULL);
306 buffer->mirror = malloc(size);
307 ASSERT_NE(buffer->mirror, NULL);
309 buffer->ptr = mmap(NULL, size,
310 PROT_READ | PROT_WRITE,
311 MAP_PRIVATE | MAP_ANONYMOUS,
313 ASSERT_NE(buffer->ptr, MAP_FAILED);
316 * Initialize buffer in system memory but leave the first two pages
317 * zero (pte_none and pfn_zero).
319 i = 2 * self->page_size / sizeof(*ptr);
320 for (ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
323 /* Set buffer permission to read-only. */
324 ret = mprotect(buffer->ptr, size, PROT_READ);
327 /* Populate the CPU page table with a special zero page. */
328 val = *(int *)(buffer->ptr + self->page_size);
331 /* Simulate a device reading system memory. */
332 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
334 ASSERT_EQ(buffer->cpages, npages);
335 ASSERT_EQ(buffer->faults, 1);
337 /* Check what the device read. */
338 ptr = buffer->mirror;
339 for (i = 0; i < 2 * self->page_size / sizeof(*ptr); ++i)
340 ASSERT_EQ(ptr[i], 0);
341 for (; i < size / sizeof(*ptr); ++i)
342 ASSERT_EQ(ptr[i], i);
344 hmm_buffer_free(buffer);
348 * Read private anonymous memory which has been protected with
349 * mprotect() PROT_NONE.
351 TEST_F(hmm, anon_read_prot)
353 struct hmm_buffer *buffer;
354 unsigned long npages;
360 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
361 ASSERT_NE(npages, 0);
362 size = npages << self->page_shift;
364 buffer = malloc(sizeof(*buffer));
365 ASSERT_NE(buffer, NULL);
369 buffer->mirror = malloc(size);
370 ASSERT_NE(buffer->mirror, NULL);
372 buffer->ptr = mmap(NULL, size,
373 PROT_READ | PROT_WRITE,
374 MAP_PRIVATE | MAP_ANONYMOUS,
376 ASSERT_NE(buffer->ptr, MAP_FAILED);
378 /* Initialize buffer in system memory. */
379 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
382 /* Initialize mirror buffer so we can verify it isn't written. */
383 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
386 /* Protect buffer from reading. */
387 ret = mprotect(buffer->ptr, size, PROT_NONE);
390 /* Simulate a device reading system memory. */
391 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
392 ASSERT_EQ(ret, -EFAULT);
394 /* Allow CPU to read the buffer so we can check it. */
395 ret = mprotect(buffer->ptr, size, PROT_READ);
397 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
398 ASSERT_EQ(ptr[i], i);
400 /* Check what the device read. */
401 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
402 ASSERT_EQ(ptr[i], -i);
404 hmm_buffer_free(buffer);
408 * Write private anonymous memory.
410 TEST_F(hmm, anon_write)
412 struct hmm_buffer *buffer;
413 unsigned long npages;
419 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
420 ASSERT_NE(npages, 0);
421 size = npages << self->page_shift;
423 buffer = malloc(sizeof(*buffer));
424 ASSERT_NE(buffer, NULL);
428 buffer->mirror = malloc(size);
429 ASSERT_NE(buffer->mirror, NULL);
431 buffer->ptr = mmap(NULL, size,
432 PROT_READ | PROT_WRITE,
433 MAP_PRIVATE | MAP_ANONYMOUS,
435 ASSERT_NE(buffer->ptr, MAP_FAILED);
437 /* Initialize data that the device will write to buffer->ptr. */
438 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
441 /* Simulate a device writing system memory. */
442 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
444 ASSERT_EQ(buffer->cpages, npages);
445 ASSERT_EQ(buffer->faults, 1);
447 /* Check what the device wrote. */
448 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
449 ASSERT_EQ(ptr[i], i);
451 hmm_buffer_free(buffer);
455 * Write private anonymous memory which has been protected with
456 * mprotect() PROT_READ.
458 TEST_F(hmm, anon_write_prot)
460 struct hmm_buffer *buffer;
461 unsigned long npages;
467 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
468 ASSERT_NE(npages, 0);
469 size = npages << self->page_shift;
471 buffer = malloc(sizeof(*buffer));
472 ASSERT_NE(buffer, NULL);
476 buffer->mirror = malloc(size);
477 ASSERT_NE(buffer->mirror, NULL);
479 buffer->ptr = mmap(NULL, size,
481 MAP_PRIVATE | MAP_ANONYMOUS,
483 ASSERT_NE(buffer->ptr, MAP_FAILED);
485 /* Simulate a device reading a zero page of memory. */
486 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, 1);
488 ASSERT_EQ(buffer->cpages, 1);
489 ASSERT_EQ(buffer->faults, 1);
491 /* Initialize data that the device will write to buffer->ptr. */
492 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
495 /* Simulate a device writing system memory. */
496 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
497 ASSERT_EQ(ret, -EPERM);
499 /* Check what the device wrote. */
500 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
501 ASSERT_EQ(ptr[i], 0);
503 /* Now allow writing and see that the zero page is replaced. */
504 ret = mprotect(buffer->ptr, size, PROT_WRITE | PROT_READ);
507 /* Simulate a device writing system memory. */
508 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
510 ASSERT_EQ(buffer->cpages, npages);
511 ASSERT_EQ(buffer->faults, 1);
513 /* Check what the device wrote. */
514 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
515 ASSERT_EQ(ptr[i], i);
517 hmm_buffer_free(buffer);
521 * Check that a device writing an anonymous private mapping
522 * will copy-on-write if a child process inherits the mapping.
524 TEST_F(hmm, anon_write_child)
526 struct hmm_buffer *buffer;
527 unsigned long npages;
535 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
536 ASSERT_NE(npages, 0);
537 size = npages << self->page_shift;
539 buffer = malloc(sizeof(*buffer));
540 ASSERT_NE(buffer, NULL);
544 buffer->mirror = malloc(size);
545 ASSERT_NE(buffer->mirror, NULL);
547 buffer->ptr = mmap(NULL, size,
548 PROT_READ | PROT_WRITE,
549 MAP_PRIVATE | MAP_ANONYMOUS,
551 ASSERT_NE(buffer->ptr, MAP_FAILED);
553 /* Initialize buffer->ptr so we can tell if it is written. */
554 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
557 /* Initialize data that the device will write to buffer->ptr. */
558 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
565 waitpid(pid, &ret, 0);
566 ASSERT_EQ(WIFEXITED(ret), 1);
568 /* Check that the parent's buffer did not change. */
569 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
570 ASSERT_EQ(ptr[i], i);
574 /* Check that we see the parent's values. */
575 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
576 ASSERT_EQ(ptr[i], i);
577 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
578 ASSERT_EQ(ptr[i], -i);
580 /* The child process needs its own mirror to its own mm. */
581 child_fd = hmm_open(0);
582 ASSERT_GE(child_fd, 0);
584 /* Simulate a device writing system memory. */
585 ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
587 ASSERT_EQ(buffer->cpages, npages);
588 ASSERT_EQ(buffer->faults, 1);
590 /* Check what the device wrote. */
591 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
592 ASSERT_EQ(ptr[i], -i);
599 * Check that a device writing an anonymous shared mapping
600 * will not copy-on-write if a child process inherits the mapping.
602 TEST_F(hmm, anon_write_child_shared)
604 struct hmm_buffer *buffer;
605 unsigned long npages;
613 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
614 ASSERT_NE(npages, 0);
615 size = npages << self->page_shift;
617 buffer = malloc(sizeof(*buffer));
618 ASSERT_NE(buffer, NULL);
622 buffer->mirror = malloc(size);
623 ASSERT_NE(buffer->mirror, NULL);
625 buffer->ptr = mmap(NULL, size,
626 PROT_READ | PROT_WRITE,
627 MAP_SHARED | MAP_ANONYMOUS,
629 ASSERT_NE(buffer->ptr, MAP_FAILED);
631 /* Initialize buffer->ptr so we can tell if it is written. */
632 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
635 /* Initialize data that the device will write to buffer->ptr. */
636 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
643 waitpid(pid, &ret, 0);
644 ASSERT_EQ(WIFEXITED(ret), 1);
646 /* Check that the parent's buffer did change. */
647 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
648 ASSERT_EQ(ptr[i], -i);
652 /* Check that we see the parent's values. */
653 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
654 ASSERT_EQ(ptr[i], i);
655 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
656 ASSERT_EQ(ptr[i], -i);
658 /* The child process needs its own mirror to its own mm. */
659 child_fd = hmm_open(0);
660 ASSERT_GE(child_fd, 0);
662 /* Simulate a device writing system memory. */
663 ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
665 ASSERT_EQ(buffer->cpages, npages);
666 ASSERT_EQ(buffer->faults, 1);
668 /* Check what the device wrote. */
669 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
670 ASSERT_EQ(ptr[i], -i);
677 * Write private anonymous huge page.
679 TEST_F(hmm, anon_write_huge)
681 struct hmm_buffer *buffer;
682 unsigned long npages;
692 buffer = malloc(sizeof(*buffer));
693 ASSERT_NE(buffer, NULL);
697 buffer->mirror = malloc(size);
698 ASSERT_NE(buffer->mirror, NULL);
700 buffer->ptr = mmap(NULL, size,
701 PROT_READ | PROT_WRITE,
702 MAP_PRIVATE | MAP_ANONYMOUS,
704 ASSERT_NE(buffer->ptr, MAP_FAILED);
707 npages = size >> self->page_shift;
708 map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
709 ret = madvise(map, size, MADV_HUGEPAGE);
711 old_ptr = buffer->ptr;
714 /* Initialize data that the device will write to buffer->ptr. */
715 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
718 /* Simulate a device writing system memory. */
719 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
721 ASSERT_EQ(buffer->cpages, npages);
722 ASSERT_EQ(buffer->faults, 1);
724 /* Check what the device wrote. */
725 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
726 ASSERT_EQ(ptr[i], i);
728 buffer->ptr = old_ptr;
729 hmm_buffer_free(buffer);
733 * Read numeric data from raw and tagged kernel status files. Used to read
734 * /proc and /sys data (without a tag) and from /proc/meminfo (with a tag).
736 static long file_read_ulong(char *file, const char *tag)
744 fd = open(file, O_RDONLY);
746 /* Error opening the file */
750 len = read(fd, buf, sizeof(buf));
753 /* Error in reading the file */
756 if (len == sizeof(buf)) {
757 /* Error file is too large */
762 /* Search for a tag if provided */
764 p = strstr(buf, tag);
766 return -1; /* looks like the line we want isn't there */
771 val = strtol(p, &q, 0);
773 /* Error parsing the file */
781 * Write huge TLBFS page.
783 TEST_F(hmm, anon_write_hugetlbfs)
785 struct hmm_buffer *buffer;
786 unsigned long npages;
788 unsigned long default_hsize;
793 default_hsize = file_read_ulong("/proc/meminfo", "Hugepagesize:");
794 if (default_hsize < 0 || default_hsize*1024 < default_hsize)
795 SKIP(return, "Huge page size could not be determined");
796 default_hsize = default_hsize*1024; /* KB to B */
798 size = ALIGN(TWOMEG, default_hsize);
799 npages = size >> self->page_shift;
801 buffer = malloc(sizeof(*buffer));
802 ASSERT_NE(buffer, NULL);
804 buffer->ptr = mmap(NULL, size,
805 PROT_READ | PROT_WRITE,
806 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
808 if (buffer->ptr == MAP_FAILED) {
810 SKIP(return, "Huge page could not be allocated");
815 buffer->mirror = malloc(size);
816 ASSERT_NE(buffer->mirror, NULL);
818 /* Initialize data that the device will write to buffer->ptr. */
819 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
822 /* Simulate a device writing system memory. */
823 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
825 ASSERT_EQ(buffer->cpages, npages);
826 ASSERT_EQ(buffer->faults, 1);
828 /* Check what the device wrote. */
829 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
830 ASSERT_EQ(ptr[i], i);
832 munmap(buffer->ptr, buffer->size);
834 hmm_buffer_free(buffer);
838 * Read mmap'ed file memory.
840 TEST_F(hmm, file_read)
842 struct hmm_buffer *buffer;
843 unsigned long npages;
851 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
852 ASSERT_NE(npages, 0);
853 size = npages << self->page_shift;
855 fd = hmm_create_file(size);
858 buffer = malloc(sizeof(*buffer));
859 ASSERT_NE(buffer, NULL);
863 buffer->mirror = malloc(size);
864 ASSERT_NE(buffer->mirror, NULL);
866 /* Write initial contents of the file. */
867 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
869 len = pwrite(fd, buffer->mirror, size, 0);
870 ASSERT_EQ(len, size);
871 memset(buffer->mirror, 0, size);
873 buffer->ptr = mmap(NULL, size,
877 ASSERT_NE(buffer->ptr, MAP_FAILED);
879 /* Simulate a device reading system memory. */
880 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
882 ASSERT_EQ(buffer->cpages, npages);
883 ASSERT_EQ(buffer->faults, 1);
885 /* Check what the device read. */
886 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
887 ASSERT_EQ(ptr[i], i);
889 hmm_buffer_free(buffer);
893 * Write mmap'ed file memory.
895 TEST_F(hmm, file_write)
897 struct hmm_buffer *buffer;
898 unsigned long npages;
906 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
907 ASSERT_NE(npages, 0);
908 size = npages << self->page_shift;
910 fd = hmm_create_file(size);
913 buffer = malloc(sizeof(*buffer));
914 ASSERT_NE(buffer, NULL);
918 buffer->mirror = malloc(size);
919 ASSERT_NE(buffer->mirror, NULL);
921 buffer->ptr = mmap(NULL, size,
922 PROT_READ | PROT_WRITE,
925 ASSERT_NE(buffer->ptr, MAP_FAILED);
927 /* Initialize data that the device will write to buffer->ptr. */
928 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
931 /* Simulate a device writing system memory. */
932 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
934 ASSERT_EQ(buffer->cpages, npages);
935 ASSERT_EQ(buffer->faults, 1);
937 /* Check what the device wrote. */
938 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
939 ASSERT_EQ(ptr[i], i);
941 /* Check that the device also wrote the file. */
942 len = pread(fd, buffer->mirror, size, 0);
943 ASSERT_EQ(len, size);
944 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
945 ASSERT_EQ(ptr[i], i);
947 hmm_buffer_free(buffer);
951 * Migrate anonymous memory to device private memory.
955 struct hmm_buffer *buffer;
956 unsigned long npages;
962 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
963 ASSERT_NE(npages, 0);
964 size = npages << self->page_shift;
966 buffer = malloc(sizeof(*buffer));
967 ASSERT_NE(buffer, NULL);
971 buffer->mirror = malloc(size);
972 ASSERT_NE(buffer->mirror, NULL);
974 buffer->ptr = mmap(NULL, size,
975 PROT_READ | PROT_WRITE,
976 MAP_PRIVATE | MAP_ANONYMOUS,
978 ASSERT_NE(buffer->ptr, MAP_FAILED);
980 /* Initialize buffer in system memory. */
981 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
984 /* Migrate memory to device. */
985 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
987 ASSERT_EQ(buffer->cpages, npages);
989 /* Check what the device read. */
990 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
991 ASSERT_EQ(ptr[i], i);
993 hmm_buffer_free(buffer);
997 * Migrate anonymous memory to device private memory and fault some of it back
998 * to system memory, then try migrating the resulting mix of system and device
999 * private memory to the device.
1001 TEST_F(hmm, migrate_fault)
1003 struct hmm_buffer *buffer;
1004 unsigned long npages;
1010 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1011 ASSERT_NE(npages, 0);
1012 size = npages << self->page_shift;
1014 buffer = malloc(sizeof(*buffer));
1015 ASSERT_NE(buffer, NULL);
1018 buffer->size = size;
1019 buffer->mirror = malloc(size);
1020 ASSERT_NE(buffer->mirror, NULL);
1022 buffer->ptr = mmap(NULL, size,
1023 PROT_READ | PROT_WRITE,
1024 MAP_PRIVATE | MAP_ANONYMOUS,
1026 ASSERT_NE(buffer->ptr, MAP_FAILED);
1028 /* Initialize buffer in system memory. */
1029 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1032 /* Migrate memory to device. */
1033 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1035 ASSERT_EQ(buffer->cpages, npages);
1037 /* Check what the device read. */
1038 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1039 ASSERT_EQ(ptr[i], i);
1041 /* Fault half the pages back to system memory and check them. */
1042 for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
1043 ASSERT_EQ(ptr[i], i);
1045 /* Migrate memory to the device again. */
1046 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1048 ASSERT_EQ(buffer->cpages, npages);
1050 /* Check what the device read. */
1051 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1052 ASSERT_EQ(ptr[i], i);
1054 hmm_buffer_free(buffer);
1057 TEST_F(hmm, migrate_release)
1059 struct hmm_buffer *buffer;
1060 unsigned long npages;
1066 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1067 ASSERT_NE(npages, 0);
1068 size = npages << self->page_shift;
1070 buffer = malloc(sizeof(*buffer));
1071 ASSERT_NE(buffer, NULL);
1074 buffer->size = size;
1075 buffer->mirror = malloc(size);
1076 ASSERT_NE(buffer->mirror, NULL);
1078 buffer->ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
1079 MAP_PRIVATE | MAP_ANONYMOUS, buffer->fd, 0);
1080 ASSERT_NE(buffer->ptr, MAP_FAILED);
1082 /* Initialize buffer in system memory. */
1083 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1086 /* Migrate memory to device. */
1087 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1089 ASSERT_EQ(buffer->cpages, npages);
1091 /* Check what the device read. */
1092 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1093 ASSERT_EQ(ptr[i], i);
1095 /* Release device memory. */
1096 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_RELEASE, buffer, npages);
1099 /* Fault pages back to system memory and check them. */
1100 for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
1101 ASSERT_EQ(ptr[i], i);
1103 hmm_buffer_free(buffer);
1107 * Migrate anonymous shared memory to device private memory.
1109 TEST_F(hmm, migrate_shared)
1111 struct hmm_buffer *buffer;
1112 unsigned long npages;
1116 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1117 ASSERT_NE(npages, 0);
1118 size = npages << self->page_shift;
1120 buffer = malloc(sizeof(*buffer));
1121 ASSERT_NE(buffer, NULL);
1124 buffer->size = size;
1125 buffer->mirror = malloc(size);
1126 ASSERT_NE(buffer->mirror, NULL);
1128 buffer->ptr = mmap(NULL, size,
1129 PROT_READ | PROT_WRITE,
1130 MAP_SHARED | MAP_ANONYMOUS,
1132 ASSERT_NE(buffer->ptr, MAP_FAILED);
1134 /* Migrate memory to device. */
1135 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1136 ASSERT_EQ(ret, -ENOENT);
1138 hmm_buffer_free(buffer);
1142 * Try to migrate various memory types to device private memory.
1144 TEST_F(hmm2, migrate_mixed)
1146 struct hmm_buffer *buffer;
1147 unsigned long npages;
1155 size = npages << self->page_shift;
1157 buffer = malloc(sizeof(*buffer));
1158 ASSERT_NE(buffer, NULL);
1161 buffer->size = size;
1162 buffer->mirror = malloc(size);
1163 ASSERT_NE(buffer->mirror, NULL);
1165 /* Reserve a range of addresses. */
1166 buffer->ptr = mmap(NULL, size,
1168 MAP_PRIVATE | MAP_ANONYMOUS,
1170 ASSERT_NE(buffer->ptr, MAP_FAILED);
1173 /* Migrating a protected area should be an error. */
1174 ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
1175 ASSERT_EQ(ret, -EINVAL);
1177 /* Punch a hole after the first page address. */
1178 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1181 /* We expect an error if the vma doesn't cover the range. */
1182 ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 3);
1183 ASSERT_EQ(ret, -EINVAL);
1185 /* Page 2 will be a read-only zero page. */
1186 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1189 ptr = (int *)(buffer->ptr + 2 * self->page_size);
1193 /* Page 3 will be read-only. */
1194 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1195 PROT_READ | PROT_WRITE);
1197 ptr = (int *)(buffer->ptr + 3 * self->page_size);
1199 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1203 /* Page 4-5 will be read-write. */
1204 ret = mprotect(buffer->ptr + 4 * self->page_size, 2 * self->page_size,
1205 PROT_READ | PROT_WRITE);
1207 ptr = (int *)(buffer->ptr + 4 * self->page_size);
1209 ptr = (int *)(buffer->ptr + 5 * self->page_size);
1212 /* Now try to migrate pages 2-5 to device 1. */
1213 buffer->ptr = p + 2 * self->page_size;
1214 ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 4);
1216 ASSERT_EQ(buffer->cpages, 4);
1218 /* Page 5 won't be migrated to device 0 because it's on device 1. */
1219 buffer->ptr = p + 5 * self->page_size;
1220 ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
1221 ASSERT_EQ(ret, -ENOENT);
1225 hmm_buffer_free(buffer);
1229 * Migrate anonymous memory to device memory and back to system memory
1230 * multiple times. In case of private zone configuration, this is done
1231 * through fault pages accessed by CPU. In case of coherent zone configuration,
1232 * the pages from the device should be explicitly migrated back to system memory.
1233 * The reason is Coherent device zone has coherent access by CPU, therefore
1234 * it will not generate any page fault.
1236 TEST_F(hmm, migrate_multiple)
1238 struct hmm_buffer *buffer;
1239 unsigned long npages;
1246 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1247 ASSERT_NE(npages, 0);
1248 size = npages << self->page_shift;
1250 for (c = 0; c < NTIMES; c++) {
1251 buffer = malloc(sizeof(*buffer));
1252 ASSERT_NE(buffer, NULL);
1255 buffer->size = size;
1256 buffer->mirror = malloc(size);
1257 ASSERT_NE(buffer->mirror, NULL);
1259 buffer->ptr = mmap(NULL, size,
1260 PROT_READ | PROT_WRITE,
1261 MAP_PRIVATE | MAP_ANONYMOUS,
1263 ASSERT_NE(buffer->ptr, MAP_FAILED);
1265 /* Initialize buffer in system memory. */
1266 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1269 /* Migrate memory to device. */
1270 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1272 ASSERT_EQ(buffer->cpages, npages);
1274 /* Check what the device read. */
1275 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1276 ASSERT_EQ(ptr[i], i);
1278 /* Migrate back to system memory and check them. */
1279 if (hmm_is_coherent_type(variant->device_number)) {
1280 ret = hmm_migrate_dev_to_sys(self->fd, buffer, npages);
1282 ASSERT_EQ(buffer->cpages, npages);
1285 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1286 ASSERT_EQ(ptr[i], i);
1288 hmm_buffer_free(buffer);
1293 * Read anonymous memory multiple times.
1295 TEST_F(hmm, anon_read_multiple)
1297 struct hmm_buffer *buffer;
1298 unsigned long npages;
1305 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1306 ASSERT_NE(npages, 0);
1307 size = npages << self->page_shift;
1309 for (c = 0; c < NTIMES; c++) {
1310 buffer = malloc(sizeof(*buffer));
1311 ASSERT_NE(buffer, NULL);
1314 buffer->size = size;
1315 buffer->mirror = malloc(size);
1316 ASSERT_NE(buffer->mirror, NULL);
1318 buffer->ptr = mmap(NULL, size,
1319 PROT_READ | PROT_WRITE,
1320 MAP_PRIVATE | MAP_ANONYMOUS,
1322 ASSERT_NE(buffer->ptr, MAP_FAILED);
1324 /* Initialize buffer in system memory. */
1325 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1328 /* Simulate a device reading system memory. */
1329 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1332 ASSERT_EQ(buffer->cpages, npages);
1333 ASSERT_EQ(buffer->faults, 1);
1335 /* Check what the device read. */
1336 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1337 ASSERT_EQ(ptr[i], i + c);
1339 hmm_buffer_free(buffer);
1343 void *unmap_buffer(void *p)
1345 struct hmm_buffer *buffer = p;
1347 /* Delay for a bit and then unmap buffer while it is being read. */
1348 hmm_nanosleep(hmm_random() % 32000);
1349 munmap(buffer->ptr + buffer->size / 2, buffer->size / 2);
1356 * Try reading anonymous memory while it is being unmapped.
1358 TEST_F(hmm, anon_teardown)
1360 unsigned long npages;
1365 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1366 ASSERT_NE(npages, 0);
1367 size = npages << self->page_shift;
1369 for (c = 0; c < NTIMES; ++c) {
1371 struct hmm_buffer *buffer;
1376 buffer = malloc(sizeof(*buffer));
1377 ASSERT_NE(buffer, NULL);
1380 buffer->size = size;
1381 buffer->mirror = malloc(size);
1382 ASSERT_NE(buffer->mirror, NULL);
1384 buffer->ptr = mmap(NULL, size,
1385 PROT_READ | PROT_WRITE,
1386 MAP_PRIVATE | MAP_ANONYMOUS,
1388 ASSERT_NE(buffer->ptr, MAP_FAILED);
1390 /* Initialize buffer in system memory. */
1391 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1394 rc = pthread_create(&thread, NULL, unmap_buffer, buffer);
1397 /* Simulate a device reading system memory. */
1398 rc = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1401 ASSERT_EQ(buffer->cpages, npages);
1402 ASSERT_EQ(buffer->faults, 1);
1404 /* Check what the device read. */
1405 for (i = 0, ptr = buffer->mirror;
1406 i < size / sizeof(*ptr);
1408 ASSERT_EQ(ptr[i], i + c);
1411 pthread_join(thread, &ret);
1412 hmm_buffer_free(buffer);
1417 * Test memory snapshot without faulting in pages accessed by the device.
1419 TEST_F(hmm, mixedmap)
1421 struct hmm_buffer *buffer;
1422 unsigned long npages;
1428 size = npages << self->page_shift;
1430 buffer = malloc(sizeof(*buffer));
1431 ASSERT_NE(buffer, NULL);
1434 buffer->size = size;
1435 buffer->mirror = malloc(npages);
1436 ASSERT_NE(buffer->mirror, NULL);
1439 /* Reserve a range of addresses. */
1440 buffer->ptr = mmap(NULL, size,
1441 PROT_READ | PROT_WRITE,
1444 ASSERT_NE(buffer->ptr, MAP_FAILED);
1446 /* Simulate a device snapshotting CPU pagetables. */
1447 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1449 ASSERT_EQ(buffer->cpages, npages);
1451 /* Check what the device saw. */
1453 ASSERT_EQ(m[0], HMM_DMIRROR_PROT_READ);
1455 hmm_buffer_free(buffer);
1459 * Test memory snapshot without faulting in pages accessed by the device.
1461 TEST_F(hmm2, snapshot)
1463 struct hmm_buffer *buffer;
1464 unsigned long npages;
1473 size = npages << self->page_shift;
1475 buffer = malloc(sizeof(*buffer));
1476 ASSERT_NE(buffer, NULL);
1479 buffer->size = size;
1480 buffer->mirror = malloc(npages);
1481 ASSERT_NE(buffer->mirror, NULL);
1483 /* Reserve a range of addresses. */
1484 buffer->ptr = mmap(NULL, size,
1486 MAP_PRIVATE | MAP_ANONYMOUS,
1488 ASSERT_NE(buffer->ptr, MAP_FAILED);
1491 /* Punch a hole after the first page address. */
1492 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1495 /* Page 2 will be read-only zero page. */
1496 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1499 ptr = (int *)(buffer->ptr + 2 * self->page_size);
1503 /* Page 3 will be read-only. */
1504 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1505 PROT_READ | PROT_WRITE);
1507 ptr = (int *)(buffer->ptr + 3 * self->page_size);
1509 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1513 /* Page 4-6 will be read-write. */
1514 ret = mprotect(buffer->ptr + 4 * self->page_size, 3 * self->page_size,
1515 PROT_READ | PROT_WRITE);
1517 ptr = (int *)(buffer->ptr + 4 * self->page_size);
1520 /* Page 5 will be migrated to device 0. */
1521 buffer->ptr = p + 5 * self->page_size;
1522 ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
1524 ASSERT_EQ(buffer->cpages, 1);
1526 /* Page 6 will be migrated to device 1. */
1527 buffer->ptr = p + 6 * self->page_size;
1528 ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 1);
1530 ASSERT_EQ(buffer->cpages, 1);
1532 /* Simulate a device snapshotting CPU pagetables. */
1534 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1536 ASSERT_EQ(buffer->cpages, npages);
1538 /* Check what the device saw. */
1540 ASSERT_EQ(m[0], HMM_DMIRROR_PROT_ERROR);
1541 ASSERT_EQ(m[1], HMM_DMIRROR_PROT_ERROR);
1542 ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ);
1543 ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ);
1544 ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE);
1545 if (!hmm_is_coherent_type(variant->device_number0)) {
1546 ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
1547 HMM_DMIRROR_PROT_WRITE);
1548 ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
1550 ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL |
1551 HMM_DMIRROR_PROT_WRITE);
1552 ASSERT_EQ(m[6], HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE |
1553 HMM_DMIRROR_PROT_WRITE);
1556 hmm_buffer_free(buffer);
1560 * Test the hmm_range_fault() HMM_PFN_PMD flag for large pages that
1561 * should be mapped by a large page table entry.
1563 TEST_F(hmm, compound)
1565 struct hmm_buffer *buffer;
1566 unsigned long npages;
1568 unsigned long default_hsize;
1574 /* Skip test if we can't allocate a hugetlbfs page. */
1576 default_hsize = file_read_ulong("/proc/meminfo", "Hugepagesize:");
1577 if (default_hsize < 0 || default_hsize*1024 < default_hsize)
1578 SKIP(return, "Huge page size could not be determined");
1579 default_hsize = default_hsize*1024; /* KB to B */
1581 size = ALIGN(TWOMEG, default_hsize);
1582 npages = size >> self->page_shift;
1584 buffer = malloc(sizeof(*buffer));
1585 ASSERT_NE(buffer, NULL);
1587 buffer->ptr = mmap(NULL, size,
1588 PROT_READ | PROT_WRITE,
1589 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
1591 if (buffer->ptr == MAP_FAILED) {
1596 buffer->size = size;
1597 buffer->mirror = malloc(npages);
1598 ASSERT_NE(buffer->mirror, NULL);
1600 /* Initialize the pages the device will snapshot in buffer->ptr. */
1601 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1604 /* Simulate a device snapshotting CPU pagetables. */
1605 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1607 ASSERT_EQ(buffer->cpages, npages);
1609 /* Check what the device saw. */
1611 for (i = 0; i < npages; ++i)
1612 ASSERT_EQ(m[i], HMM_DMIRROR_PROT_WRITE |
1613 HMM_DMIRROR_PROT_PMD);
1615 /* Make the region read-only. */
1616 ret = mprotect(buffer->ptr, size, PROT_READ);
1619 /* Simulate a device snapshotting CPU pagetables. */
1620 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1622 ASSERT_EQ(buffer->cpages, npages);
1624 /* Check what the device saw. */
1626 for (i = 0; i < npages; ++i)
1627 ASSERT_EQ(m[i], HMM_DMIRROR_PROT_READ |
1628 HMM_DMIRROR_PROT_PMD);
1630 munmap(buffer->ptr, buffer->size);
1632 hmm_buffer_free(buffer);
1636 * Test two devices reading the same memory (double mapped).
1638 TEST_F(hmm2, double_map)
1640 struct hmm_buffer *buffer;
1641 unsigned long npages;
1648 size = npages << self->page_shift;
1650 buffer = malloc(sizeof(*buffer));
1651 ASSERT_NE(buffer, NULL);
1654 buffer->size = size;
1655 buffer->mirror = malloc(npages);
1656 ASSERT_NE(buffer->mirror, NULL);
1658 /* Reserve a range of addresses. */
1659 buffer->ptr = mmap(NULL, size,
1660 PROT_READ | PROT_WRITE,
1661 MAP_PRIVATE | MAP_ANONYMOUS,
1663 ASSERT_NE(buffer->ptr, MAP_FAILED);
1665 /* Initialize buffer in system memory. */
1666 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1669 /* Make region read-only. */
1670 ret = mprotect(buffer->ptr, size, PROT_READ);
1673 /* Simulate device 0 reading system memory. */
1674 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
1676 ASSERT_EQ(buffer->cpages, npages);
1677 ASSERT_EQ(buffer->faults, 1);
1679 /* Check what the device read. */
1680 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1681 ASSERT_EQ(ptr[i], i);
1683 /* Simulate device 1 reading system memory. */
1684 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_READ, buffer, npages);
1686 ASSERT_EQ(buffer->cpages, npages);
1687 ASSERT_EQ(buffer->faults, 1);
1689 /* Check what the device read. */
1690 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1691 ASSERT_EQ(ptr[i], i);
1693 /* Migrate pages to device 1 and try to read from device 0. */
1694 ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
1696 ASSERT_EQ(buffer->cpages, npages);
1698 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
1700 ASSERT_EQ(buffer->cpages, npages);
1701 ASSERT_EQ(buffer->faults, 1);
1703 /* Check what device 0 read. */
1704 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1705 ASSERT_EQ(ptr[i], i);
1707 hmm_buffer_free(buffer);
1711 * Basic check of exclusive faulting.
1713 TEST_F(hmm, exclusive)
1715 struct hmm_buffer *buffer;
1716 unsigned long npages;
1722 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1723 ASSERT_NE(npages, 0);
1724 size = npages << self->page_shift;
1726 buffer = malloc(sizeof(*buffer));
1727 ASSERT_NE(buffer, NULL);
1730 buffer->size = size;
1731 buffer->mirror = malloc(size);
1732 ASSERT_NE(buffer->mirror, NULL);
1734 buffer->ptr = mmap(NULL, size,
1735 PROT_READ | PROT_WRITE,
1736 MAP_PRIVATE | MAP_ANONYMOUS,
1738 ASSERT_NE(buffer->ptr, MAP_FAILED);
1740 /* Initialize buffer in system memory. */
1741 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1744 /* Map memory exclusively for device access. */
1745 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1747 ASSERT_EQ(buffer->cpages, npages);
1749 /* Check what the device read. */
1750 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1751 ASSERT_EQ(ptr[i], i);
1753 /* Fault pages back to system memory and check them. */
1754 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1755 ASSERT_EQ(ptr[i]++, i);
1757 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1758 ASSERT_EQ(ptr[i], i+1);
1760 /* Check atomic access revoked */
1761 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_CHECK_EXCLUSIVE, buffer, npages);
1764 hmm_buffer_free(buffer);
1767 TEST_F(hmm, exclusive_mprotect)
1769 struct hmm_buffer *buffer;
1770 unsigned long npages;
1776 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1777 ASSERT_NE(npages, 0);
1778 size = npages << self->page_shift;
1780 buffer = malloc(sizeof(*buffer));
1781 ASSERT_NE(buffer, NULL);
1784 buffer->size = size;
1785 buffer->mirror = malloc(size);
1786 ASSERT_NE(buffer->mirror, NULL);
1788 buffer->ptr = mmap(NULL, size,
1789 PROT_READ | PROT_WRITE,
1790 MAP_PRIVATE | MAP_ANONYMOUS,
1792 ASSERT_NE(buffer->ptr, MAP_FAILED);
1794 /* Initialize buffer in system memory. */
1795 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1798 /* Map memory exclusively for device access. */
1799 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1801 ASSERT_EQ(buffer->cpages, npages);
1803 /* Check what the device read. */
1804 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1805 ASSERT_EQ(ptr[i], i);
1807 ret = mprotect(buffer->ptr, size, PROT_READ);
1810 /* Simulate a device writing system memory. */
1811 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
1812 ASSERT_EQ(ret, -EPERM);
1814 hmm_buffer_free(buffer);
1818 * Check copy-on-write works.
1820 TEST_F(hmm, exclusive_cow)
1822 struct hmm_buffer *buffer;
1823 unsigned long npages;
1829 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1830 ASSERT_NE(npages, 0);
1831 size = npages << self->page_shift;
1833 buffer = malloc(sizeof(*buffer));
1834 ASSERT_NE(buffer, NULL);
1837 buffer->size = size;
1838 buffer->mirror = malloc(size);
1839 ASSERT_NE(buffer->mirror, NULL);
1841 buffer->ptr = mmap(NULL, size,
1842 PROT_READ | PROT_WRITE,
1843 MAP_PRIVATE | MAP_ANONYMOUS,
1845 ASSERT_NE(buffer->ptr, MAP_FAILED);
1847 /* Initialize buffer in system memory. */
1848 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1851 /* Map memory exclusively for device access. */
1852 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1854 ASSERT_EQ(buffer->cpages, npages);
1858 /* Fault pages back to system memory and check them. */
1859 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1860 ASSERT_EQ(ptr[i]++, i);
1862 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1863 ASSERT_EQ(ptr[i], i+1);
1865 hmm_buffer_free(buffer);
1868 static int gup_test_exec(int gup_fd, unsigned long addr, int cmd,
1869 int npages, int size, int flags)
1871 struct gup_test gup = {
1872 .nr_pages_per_call = npages,
1874 .gup_flags = FOLL_WRITE | flags,
1878 if (ioctl(gup_fd, cmd, &gup)) {
1879 perror("ioctl on error\n");
1887 * Test get user device pages through gup_test. Setting PIN_LONGTERM flag.
1888 * This should trigger a migration back to system memory for both, private
1889 * and coherent type pages.
1890 * This test makes use of gup_test module. Make sure GUP_TEST_CONFIG is added
1891 * to your configuration before you run it.
1893 TEST_F(hmm, hmm_gup_test)
1895 struct hmm_buffer *buffer;
1897 unsigned long npages;
1904 gup_fd = open("/sys/kernel/debug/gup_test", O_RDWR);
1906 SKIP(return, "Skipping test, could not find gup_test driver");
1909 size = npages << self->page_shift;
1911 buffer = malloc(sizeof(*buffer));
1912 ASSERT_NE(buffer, NULL);
1915 buffer->size = size;
1916 buffer->mirror = malloc(size);
1917 ASSERT_NE(buffer->mirror, NULL);
1919 buffer->ptr = mmap(NULL, size,
1920 PROT_READ | PROT_WRITE,
1921 MAP_PRIVATE | MAP_ANONYMOUS,
1923 ASSERT_NE(buffer->ptr, MAP_FAILED);
1925 /* Initialize buffer in system memory. */
1926 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1929 /* Migrate memory to device. */
1930 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1932 ASSERT_EQ(buffer->cpages, npages);
1933 /* Check what the device read. */
1934 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1935 ASSERT_EQ(ptr[i], i);
1937 ASSERT_EQ(gup_test_exec(gup_fd,
1938 (unsigned long)buffer->ptr,
1939 GUP_BASIC_TEST, 1, self->page_size, 0), 0);
1940 ASSERT_EQ(gup_test_exec(gup_fd,
1941 (unsigned long)buffer->ptr + 1 * self->page_size,
1942 GUP_FAST_BENCHMARK, 1, self->page_size, 0), 0);
1943 ASSERT_EQ(gup_test_exec(gup_fd,
1944 (unsigned long)buffer->ptr + 2 * self->page_size,
1945 PIN_FAST_BENCHMARK, 1, self->page_size, FOLL_LONGTERM), 0);
1946 ASSERT_EQ(gup_test_exec(gup_fd,
1947 (unsigned long)buffer->ptr + 3 * self->page_size,
1948 PIN_LONGTERM_BENCHMARK, 1, self->page_size, 0), 0);
1950 /* Take snapshot to CPU pagetables */
1951 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1953 ASSERT_EQ(buffer->cpages, npages);
1955 if (hmm_is_coherent_type(variant->device_number)) {
1956 ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[0]);
1957 ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[1]);
1959 ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[0]);
1960 ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[1]);
1962 ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[2]);
1963 ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[3]);
1965 * Check again the content on the pages. Make sure there's no
1968 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1969 ASSERT_EQ(ptr[i], i);
1972 hmm_buffer_free(buffer);
1976 * Test copy-on-write in device pages.
1977 * In case of writing to COW private page(s), a page fault will migrate pages
1978 * back to system memory first. Then, these pages will be duplicated. In case
1979 * of COW device coherent type, pages are duplicated directly from device
1982 TEST_F(hmm, hmm_cow_in_device)
1984 struct hmm_buffer *buffer;
1985 unsigned long npages;
1995 size = npages << self->page_shift;
1997 buffer = malloc(sizeof(*buffer));
1998 ASSERT_NE(buffer, NULL);
2001 buffer->size = size;
2002 buffer->mirror = malloc(size);
2003 ASSERT_NE(buffer->mirror, NULL);
2005 buffer->ptr = mmap(NULL, size,
2006 PROT_READ | PROT_WRITE,
2007 MAP_PRIVATE | MAP_ANONYMOUS,
2009 ASSERT_NE(buffer->ptr, MAP_FAILED);
2011 /* Initialize buffer in system memory. */
2012 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
2015 /* Migrate memory to device. */
2017 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
2019 ASSERT_EQ(buffer->cpages, npages);
2025 /* Child process waitd for SIGTERM from the parent. */
2028 perror("Should not reach this\n");
2031 /* Parent process writes to COW pages(s) and gets a
2032 * new copy in system. In case of device private pages,
2033 * this write causes a migration to system mem first.
2035 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
2038 /* Terminate child and wait */
2039 EXPECT_EQ(0, kill(pid, SIGTERM));
2040 EXPECT_EQ(pid, waitpid(pid, &status, 0));
2041 EXPECT_NE(0, WIFSIGNALED(status));
2042 EXPECT_EQ(SIGTERM, WTERMSIG(status));
2044 /* Take snapshot to CPU pagetables */
2045 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
2047 ASSERT_EQ(buffer->cpages, npages);
2049 for (i = 0; i < npages; i++)
2050 ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[i]);
2052 hmm_buffer_free(buffer);