1 // SPDX-License-Identifier: GPL-2.0
3 * HMM stands for Heterogeneous Memory Management, it is a helper layer inside
4 * the linux kernel to help device drivers mirror a process address space in
5 * the device. This allows the device to use the same address space which
6 * makes communication and data exchange a lot easier.
8 * This framework's sole purpose is to exercise various code paths inside
9 * the kernel to make sure that HMM performs as expected and to flush out any
13 #include "../kselftest_harness.h"
24 #include <hugetlbfs.h>
25 #include <sys/types.h>
28 #include <sys/ioctl.h>
31 * This is a private UAPI to the kernel test module so it isn't exported
32 * in the usual include/uapi/... directory.
34 #include "../../../../lib/test_hmm_uapi.h"
45 #define TWOMEG (1 << 21)
46 #define HMM_BUFFER_SIZE (1024 << 12)
47 #define HMM_PATH_MAX 64
50 #define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
55 unsigned int page_size;
56 unsigned int page_shift;
63 unsigned int page_size;
64 unsigned int page_shift;
67 static int hmm_open(int unit)
69 char pathname[HMM_PATH_MAX];
72 snprintf(pathname, sizeof(pathname), "/dev/hmm_dmirror%d", unit);
73 fd = open(pathname, O_RDWR, 0);
75 fprintf(stderr, "could not open hmm dmirror driver (%s)\n",
82 self->page_size = sysconf(_SC_PAGE_SIZE);
83 self->page_shift = ffs(self->page_size) - 1;
85 self->fd = hmm_open(0);
86 ASSERT_GE(self->fd, 0);
91 self->page_size = sysconf(_SC_PAGE_SIZE);
92 self->page_shift = ffs(self->page_size) - 1;
94 self->fd0 = hmm_open(0);
95 ASSERT_GE(self->fd0, 0);
96 self->fd1 = hmm_open(1);
97 ASSERT_GE(self->fd1, 0);
100 FIXTURE_TEARDOWN(hmm)
102 int ret = close(self->fd);
108 FIXTURE_TEARDOWN(hmm2)
110 int ret = close(self->fd0);
115 ret = close(self->fd1);
120 static int hmm_dmirror_cmd(int fd,
121 unsigned long request,
122 struct hmm_buffer *buffer,
123 unsigned long npages)
125 struct hmm_dmirror_cmd cmd;
128 /* Simulate a device reading system memory. */
129 cmd.addr = (__u64)buffer->ptr;
130 cmd.ptr = (__u64)buffer->mirror;
134 ret = ioctl(fd, request, &cmd);
141 buffer->cpages = cmd.cpages;
142 buffer->faults = cmd.faults;
147 static void hmm_buffer_free(struct hmm_buffer *buffer)
153 munmap(buffer->ptr, buffer->size);
154 free(buffer->mirror);
159 * Create a temporary file that will be deleted on close.
161 static int hmm_create_file(unsigned long size)
163 char path[HMM_PATH_MAX];
166 strcpy(path, "/tmp");
167 fd = open(path, O_TMPFILE | O_EXCL | O_RDWR, 0600);
172 r = ftruncate(fd, size);
173 } while (r == -1 && errno == EINTR);
182 * Return a random unsigned number.
184 static unsigned int hmm_random(void)
190 fd = open("/dev/urandom", O_RDONLY);
192 fprintf(stderr, "%s:%d failed to open /dev/urandom\n",
197 read(fd, &r, sizeof(r));
201 static void hmm_nanosleep(unsigned int n)
211 * Simple NULL test of device open/close.
213 TEST_F(hmm, open_close)
218 * Read private anonymous memory.
220 TEST_F(hmm, anon_read)
222 struct hmm_buffer *buffer;
223 unsigned long npages;
230 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
231 ASSERT_NE(npages, 0);
232 size = npages << self->page_shift;
234 buffer = malloc(sizeof(*buffer));
235 ASSERT_NE(buffer, NULL);
239 buffer->mirror = malloc(size);
240 ASSERT_NE(buffer->mirror, NULL);
242 buffer->ptr = mmap(NULL, size,
243 PROT_READ | PROT_WRITE,
244 MAP_PRIVATE | MAP_ANONYMOUS,
246 ASSERT_NE(buffer->ptr, MAP_FAILED);
249 * Initialize buffer in system memory but leave the first two pages
250 * zero (pte_none and pfn_zero).
252 i = 2 * self->page_size / sizeof(*ptr);
253 for (ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
256 /* Set buffer permission to read-only. */
257 ret = mprotect(buffer->ptr, size, PROT_READ);
260 /* Populate the CPU page table with a special zero page. */
261 val = *(int *)(buffer->ptr + self->page_size);
264 /* Simulate a device reading system memory. */
265 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
267 ASSERT_EQ(buffer->cpages, npages);
268 ASSERT_EQ(buffer->faults, 1);
270 /* Check what the device read. */
271 ptr = buffer->mirror;
272 for (i = 0; i < 2 * self->page_size / sizeof(*ptr); ++i)
273 ASSERT_EQ(ptr[i], 0);
274 for (; i < size / sizeof(*ptr); ++i)
275 ASSERT_EQ(ptr[i], i);
277 hmm_buffer_free(buffer);
281 * Read private anonymous memory which has been protected with
282 * mprotect() PROT_NONE.
284 TEST_F(hmm, anon_read_prot)
286 struct hmm_buffer *buffer;
287 unsigned long npages;
293 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
294 ASSERT_NE(npages, 0);
295 size = npages << self->page_shift;
297 buffer = malloc(sizeof(*buffer));
298 ASSERT_NE(buffer, NULL);
302 buffer->mirror = malloc(size);
303 ASSERT_NE(buffer->mirror, NULL);
305 buffer->ptr = mmap(NULL, size,
306 PROT_READ | PROT_WRITE,
307 MAP_PRIVATE | MAP_ANONYMOUS,
309 ASSERT_NE(buffer->ptr, MAP_FAILED);
311 /* Initialize buffer in system memory. */
312 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
315 /* Initialize mirror buffer so we can verify it isn't written. */
316 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
319 /* Protect buffer from reading. */
320 ret = mprotect(buffer->ptr, size, PROT_NONE);
323 /* Simulate a device reading system memory. */
324 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
325 ASSERT_EQ(ret, -EFAULT);
327 /* Allow CPU to read the buffer so we can check it. */
328 ret = mprotect(buffer->ptr, size, PROT_READ);
330 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
331 ASSERT_EQ(ptr[i], i);
333 /* Check what the device read. */
334 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
335 ASSERT_EQ(ptr[i], -i);
337 hmm_buffer_free(buffer);
341 * Write private anonymous memory.
343 TEST_F(hmm, anon_write)
345 struct hmm_buffer *buffer;
346 unsigned long npages;
352 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
353 ASSERT_NE(npages, 0);
354 size = npages << self->page_shift;
356 buffer = malloc(sizeof(*buffer));
357 ASSERT_NE(buffer, NULL);
361 buffer->mirror = malloc(size);
362 ASSERT_NE(buffer->mirror, NULL);
364 buffer->ptr = mmap(NULL, size,
365 PROT_READ | PROT_WRITE,
366 MAP_PRIVATE | MAP_ANONYMOUS,
368 ASSERT_NE(buffer->ptr, MAP_FAILED);
370 /* Initialize data that the device will write to buffer->ptr. */
371 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
374 /* Simulate a device writing system memory. */
375 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
377 ASSERT_EQ(buffer->cpages, npages);
378 ASSERT_EQ(buffer->faults, 1);
380 /* Check what the device wrote. */
381 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
382 ASSERT_EQ(ptr[i], i);
384 hmm_buffer_free(buffer);
388 * Write private anonymous memory which has been protected with
389 * mprotect() PROT_READ.
391 TEST_F(hmm, anon_write_prot)
393 struct hmm_buffer *buffer;
394 unsigned long npages;
400 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
401 ASSERT_NE(npages, 0);
402 size = npages << self->page_shift;
404 buffer = malloc(sizeof(*buffer));
405 ASSERT_NE(buffer, NULL);
409 buffer->mirror = malloc(size);
410 ASSERT_NE(buffer->mirror, NULL);
412 buffer->ptr = mmap(NULL, size,
414 MAP_PRIVATE | MAP_ANONYMOUS,
416 ASSERT_NE(buffer->ptr, MAP_FAILED);
418 /* Simulate a device reading a zero page of memory. */
419 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, 1);
421 ASSERT_EQ(buffer->cpages, 1);
422 ASSERT_EQ(buffer->faults, 1);
424 /* Initialize data that the device will write to buffer->ptr. */
425 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
428 /* Simulate a device writing system memory. */
429 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
430 ASSERT_EQ(ret, -EPERM);
432 /* Check what the device wrote. */
433 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
434 ASSERT_EQ(ptr[i], 0);
436 /* Now allow writing and see that the zero page is replaced. */
437 ret = mprotect(buffer->ptr, size, PROT_WRITE | PROT_READ);
440 /* Simulate a device writing system memory. */
441 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
443 ASSERT_EQ(buffer->cpages, npages);
444 ASSERT_EQ(buffer->faults, 1);
446 /* Check what the device wrote. */
447 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
448 ASSERT_EQ(ptr[i], i);
450 hmm_buffer_free(buffer);
454 * Check that a device writing an anonymous private mapping
455 * will copy-on-write if a child process inherits the mapping.
457 TEST_F(hmm, anon_write_child)
459 struct hmm_buffer *buffer;
460 unsigned long npages;
468 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
469 ASSERT_NE(npages, 0);
470 size = npages << self->page_shift;
472 buffer = malloc(sizeof(*buffer));
473 ASSERT_NE(buffer, NULL);
477 buffer->mirror = malloc(size);
478 ASSERT_NE(buffer->mirror, NULL);
480 buffer->ptr = mmap(NULL, size,
481 PROT_READ | PROT_WRITE,
482 MAP_PRIVATE | MAP_ANONYMOUS,
484 ASSERT_NE(buffer->ptr, MAP_FAILED);
486 /* Initialize buffer->ptr so we can tell if it is written. */
487 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
490 /* Initialize data that the device will write to buffer->ptr. */
491 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
498 waitpid(pid, &ret, 0);
499 ASSERT_EQ(WIFEXITED(ret), 1);
501 /* Check that the parent's buffer did not change. */
502 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
503 ASSERT_EQ(ptr[i], i);
507 /* Check that we see the parent's values. */
508 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
509 ASSERT_EQ(ptr[i], i);
510 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
511 ASSERT_EQ(ptr[i], -i);
513 /* The child process needs its own mirror to its own mm. */
514 child_fd = hmm_open(0);
515 ASSERT_GE(child_fd, 0);
517 /* Simulate a device writing system memory. */
518 ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
520 ASSERT_EQ(buffer->cpages, npages);
521 ASSERT_EQ(buffer->faults, 1);
523 /* Check what the device wrote. */
524 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
525 ASSERT_EQ(ptr[i], -i);
532 * Check that a device writing an anonymous shared mapping
533 * will not copy-on-write if a child process inherits the mapping.
535 TEST_F(hmm, anon_write_child_shared)
537 struct hmm_buffer *buffer;
538 unsigned long npages;
546 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
547 ASSERT_NE(npages, 0);
548 size = npages << self->page_shift;
550 buffer = malloc(sizeof(*buffer));
551 ASSERT_NE(buffer, NULL);
555 buffer->mirror = malloc(size);
556 ASSERT_NE(buffer->mirror, NULL);
558 buffer->ptr = mmap(NULL, size,
559 PROT_READ | PROT_WRITE,
560 MAP_SHARED | MAP_ANONYMOUS,
562 ASSERT_NE(buffer->ptr, MAP_FAILED);
564 /* Initialize buffer->ptr so we can tell if it is written. */
565 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
568 /* Initialize data that the device will write to buffer->ptr. */
569 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
576 waitpid(pid, &ret, 0);
577 ASSERT_EQ(WIFEXITED(ret), 1);
579 /* Check that the parent's buffer did change. */
580 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
581 ASSERT_EQ(ptr[i], -i);
585 /* Check that we see the parent's values. */
586 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
587 ASSERT_EQ(ptr[i], i);
588 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
589 ASSERT_EQ(ptr[i], -i);
591 /* The child process needs its own mirror to its own mm. */
592 child_fd = hmm_open(0);
593 ASSERT_GE(child_fd, 0);
595 /* Simulate a device writing system memory. */
596 ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
598 ASSERT_EQ(buffer->cpages, npages);
599 ASSERT_EQ(buffer->faults, 1);
601 /* Check what the device wrote. */
602 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
603 ASSERT_EQ(ptr[i], -i);
610 * Write private anonymous huge page.
612 TEST_F(hmm, anon_write_huge)
614 struct hmm_buffer *buffer;
615 unsigned long npages;
625 buffer = malloc(sizeof(*buffer));
626 ASSERT_NE(buffer, NULL);
630 buffer->mirror = malloc(size);
631 ASSERT_NE(buffer->mirror, NULL);
633 buffer->ptr = mmap(NULL, size,
634 PROT_READ | PROT_WRITE,
635 MAP_PRIVATE | MAP_ANONYMOUS,
637 ASSERT_NE(buffer->ptr, MAP_FAILED);
640 npages = size >> self->page_shift;
641 map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
642 ret = madvise(map, size, MADV_HUGEPAGE);
644 old_ptr = buffer->ptr;
647 /* Initialize data that the device will write to buffer->ptr. */
648 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
651 /* Simulate a device writing system memory. */
652 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
654 ASSERT_EQ(buffer->cpages, npages);
655 ASSERT_EQ(buffer->faults, 1);
657 /* Check what the device wrote. */
658 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
659 ASSERT_EQ(ptr[i], i);
661 buffer->ptr = old_ptr;
662 hmm_buffer_free(buffer);
666 * Write huge TLBFS page.
668 TEST_F(hmm, anon_write_hugetlbfs)
670 struct hmm_buffer *buffer;
671 unsigned long npages;
679 /* Skip test if we can't allocate a hugetlbfs page. */
681 n = gethugepagesizes(pagesizes, 4);
684 for (idx = 0; --n > 0; ) {
685 if (pagesizes[n] < pagesizes[idx])
688 size = ALIGN(TWOMEG, pagesizes[idx]);
689 npages = size >> self->page_shift;
691 buffer = malloc(sizeof(*buffer));
692 ASSERT_NE(buffer, NULL);
694 buffer->ptr = get_hugepage_region(size, GHR_STRICT);
695 if (buffer->ptr == NULL) {
702 buffer->mirror = malloc(size);
703 ASSERT_NE(buffer->mirror, NULL);
705 /* Initialize data that the device will write to buffer->ptr. */
706 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
709 /* Simulate a device writing system memory. */
710 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
712 ASSERT_EQ(buffer->cpages, npages);
713 ASSERT_EQ(buffer->faults, 1);
715 /* Check what the device wrote. */
716 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
717 ASSERT_EQ(ptr[i], i);
719 free_hugepage_region(buffer->ptr);
721 hmm_buffer_free(buffer);
725 * Read mmap'ed file memory.
727 TEST_F(hmm, file_read)
729 struct hmm_buffer *buffer;
730 unsigned long npages;
738 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
739 ASSERT_NE(npages, 0);
740 size = npages << self->page_shift;
742 fd = hmm_create_file(size);
745 buffer = malloc(sizeof(*buffer));
746 ASSERT_NE(buffer, NULL);
750 buffer->mirror = malloc(size);
751 ASSERT_NE(buffer->mirror, NULL);
753 /* Write initial contents of the file. */
754 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
756 len = pwrite(fd, buffer->mirror, size, 0);
757 ASSERT_EQ(len, size);
758 memset(buffer->mirror, 0, size);
760 buffer->ptr = mmap(NULL, size,
764 ASSERT_NE(buffer->ptr, MAP_FAILED);
766 /* Simulate a device reading system memory. */
767 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
769 ASSERT_EQ(buffer->cpages, npages);
770 ASSERT_EQ(buffer->faults, 1);
772 /* Check what the device read. */
773 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
774 ASSERT_EQ(ptr[i], i);
776 hmm_buffer_free(buffer);
780 * Write mmap'ed file memory.
782 TEST_F(hmm, file_write)
784 struct hmm_buffer *buffer;
785 unsigned long npages;
793 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
794 ASSERT_NE(npages, 0);
795 size = npages << self->page_shift;
797 fd = hmm_create_file(size);
800 buffer = malloc(sizeof(*buffer));
801 ASSERT_NE(buffer, NULL);
805 buffer->mirror = malloc(size);
806 ASSERT_NE(buffer->mirror, NULL);
808 buffer->ptr = mmap(NULL, size,
809 PROT_READ | PROT_WRITE,
812 ASSERT_NE(buffer->ptr, MAP_FAILED);
814 /* Initialize data that the device will write to buffer->ptr. */
815 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
818 /* Simulate a device writing system memory. */
819 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
821 ASSERT_EQ(buffer->cpages, npages);
822 ASSERT_EQ(buffer->faults, 1);
824 /* Check what the device wrote. */
825 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
826 ASSERT_EQ(ptr[i], i);
828 /* Check that the device also wrote the file. */
829 len = pread(fd, buffer->mirror, size, 0);
830 ASSERT_EQ(len, size);
831 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
832 ASSERT_EQ(ptr[i], i);
834 hmm_buffer_free(buffer);
838 * Migrate anonymous memory to device private memory.
842 struct hmm_buffer *buffer;
843 unsigned long npages;
849 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
850 ASSERT_NE(npages, 0);
851 size = npages << self->page_shift;
853 buffer = malloc(sizeof(*buffer));
854 ASSERT_NE(buffer, NULL);
858 buffer->mirror = malloc(size);
859 ASSERT_NE(buffer->mirror, NULL);
861 buffer->ptr = mmap(NULL, size,
862 PROT_READ | PROT_WRITE,
863 MAP_PRIVATE | MAP_ANONYMOUS,
865 ASSERT_NE(buffer->ptr, MAP_FAILED);
867 /* Initialize buffer in system memory. */
868 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
871 /* Migrate memory to device. */
872 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
874 ASSERT_EQ(buffer->cpages, npages);
876 /* Check what the device read. */
877 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
878 ASSERT_EQ(ptr[i], i);
880 hmm_buffer_free(buffer);
884 * Migrate anonymous memory to device private memory and fault it back to system
887 TEST_F(hmm, migrate_fault)
889 struct hmm_buffer *buffer;
890 unsigned long npages;
896 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
897 ASSERT_NE(npages, 0);
898 size = npages << self->page_shift;
900 buffer = malloc(sizeof(*buffer));
901 ASSERT_NE(buffer, NULL);
905 buffer->mirror = malloc(size);
906 ASSERT_NE(buffer->mirror, NULL);
908 buffer->ptr = mmap(NULL, size,
909 PROT_READ | PROT_WRITE,
910 MAP_PRIVATE | MAP_ANONYMOUS,
912 ASSERT_NE(buffer->ptr, MAP_FAILED);
914 /* Initialize buffer in system memory. */
915 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
918 /* Migrate memory to device. */
919 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
921 ASSERT_EQ(buffer->cpages, npages);
923 /* Check what the device read. */
924 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
925 ASSERT_EQ(ptr[i], i);
927 /* Fault pages back to system memory and check them. */
928 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
929 ASSERT_EQ(ptr[i], i);
931 hmm_buffer_free(buffer);
935 * Try to migrate various memory types to device private memory.
937 TEST_F(hmm2, migrate_mixed)
939 struct hmm_buffer *buffer;
940 unsigned long npages;
948 size = npages << self->page_shift;
950 buffer = malloc(sizeof(*buffer));
951 ASSERT_NE(buffer, NULL);
955 buffer->mirror = malloc(size);
956 ASSERT_NE(buffer->mirror, NULL);
958 /* Reserve a range of addresses. */
959 buffer->ptr = mmap(NULL, size,
961 MAP_PRIVATE | MAP_ANONYMOUS,
963 ASSERT_NE(buffer->ptr, MAP_FAILED);
966 /* Migrating a protected area should be an error. */
967 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, npages);
968 ASSERT_EQ(ret, -EINVAL);
970 /* Punch a hole after the first page address. */
971 ret = munmap(buffer->ptr + self->page_size, self->page_size);
974 /* We expect an error if the vma doesn't cover the range. */
975 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 3);
976 ASSERT_EQ(ret, -EINVAL);
978 /* Page 2 will be a read-only zero page. */
979 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
982 ptr = (int *)(buffer->ptr + 2 * self->page_size);
986 /* Page 3 will be read-only. */
987 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
988 PROT_READ | PROT_WRITE);
990 ptr = (int *)(buffer->ptr + 3 * self->page_size);
992 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
996 /* Page 4-5 will be read-write. */
997 ret = mprotect(buffer->ptr + 4 * self->page_size, 2 * self->page_size,
998 PROT_READ | PROT_WRITE);
1000 ptr = (int *)(buffer->ptr + 4 * self->page_size);
1002 ptr = (int *)(buffer->ptr + 5 * self->page_size);
1005 /* Now try to migrate pages 2-5 to device 1. */
1006 buffer->ptr = p + 2 * self->page_size;
1007 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 4);
1009 ASSERT_EQ(buffer->cpages, 4);
1011 /* Page 5 won't be migrated to device 0 because it's on device 1. */
1012 buffer->ptr = p + 5 * self->page_size;
1013 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1);
1014 ASSERT_EQ(ret, -ENOENT);
1018 hmm_buffer_free(buffer);
1022 * Migrate anonymous memory to device private memory and fault it back to system
1023 * memory multiple times.
1025 TEST_F(hmm, migrate_multiple)
1027 struct hmm_buffer *buffer;
1028 unsigned long npages;
1035 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1036 ASSERT_NE(npages, 0);
1037 size = npages << self->page_shift;
1039 for (c = 0; c < NTIMES; c++) {
1040 buffer = malloc(sizeof(*buffer));
1041 ASSERT_NE(buffer, NULL);
1044 buffer->size = size;
1045 buffer->mirror = malloc(size);
1046 ASSERT_NE(buffer->mirror, NULL);
1048 buffer->ptr = mmap(NULL, size,
1049 PROT_READ | PROT_WRITE,
1050 MAP_PRIVATE | MAP_ANONYMOUS,
1052 ASSERT_NE(buffer->ptr, MAP_FAILED);
1054 /* Initialize buffer in system memory. */
1055 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1058 /* Migrate memory to device. */
1059 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer,
1062 ASSERT_EQ(buffer->cpages, npages);
1064 /* Check what the device read. */
1065 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1066 ASSERT_EQ(ptr[i], i);
1068 /* Fault pages back to system memory and check them. */
1069 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1070 ASSERT_EQ(ptr[i], i);
1072 hmm_buffer_free(buffer);
1077 * Read anonymous memory multiple times.
1079 TEST_F(hmm, anon_read_multiple)
1081 struct hmm_buffer *buffer;
1082 unsigned long npages;
1089 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1090 ASSERT_NE(npages, 0);
1091 size = npages << self->page_shift;
1093 for (c = 0; c < NTIMES; c++) {
1094 buffer = malloc(sizeof(*buffer));
1095 ASSERT_NE(buffer, NULL);
1098 buffer->size = size;
1099 buffer->mirror = malloc(size);
1100 ASSERT_NE(buffer->mirror, NULL);
1102 buffer->ptr = mmap(NULL, size,
1103 PROT_READ | PROT_WRITE,
1104 MAP_PRIVATE | MAP_ANONYMOUS,
1106 ASSERT_NE(buffer->ptr, MAP_FAILED);
1108 /* Initialize buffer in system memory. */
1109 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1112 /* Simulate a device reading system memory. */
1113 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1116 ASSERT_EQ(buffer->cpages, npages);
1117 ASSERT_EQ(buffer->faults, 1);
1119 /* Check what the device read. */
1120 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1121 ASSERT_EQ(ptr[i], i + c);
1123 hmm_buffer_free(buffer);
1127 void *unmap_buffer(void *p)
1129 struct hmm_buffer *buffer = p;
1131 /* Delay for a bit and then unmap buffer while it is being read. */
1132 hmm_nanosleep(hmm_random() % 32000);
1133 munmap(buffer->ptr + buffer->size / 2, buffer->size / 2);
1140 * Try reading anonymous memory while it is being unmapped.
1142 TEST_F(hmm, anon_teardown)
1144 unsigned long npages;
1149 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1150 ASSERT_NE(npages, 0);
1151 size = npages << self->page_shift;
1153 for (c = 0; c < NTIMES; ++c) {
1155 struct hmm_buffer *buffer;
1160 buffer = malloc(sizeof(*buffer));
1161 ASSERT_NE(buffer, NULL);
1164 buffer->size = size;
1165 buffer->mirror = malloc(size);
1166 ASSERT_NE(buffer->mirror, NULL);
1168 buffer->ptr = mmap(NULL, size,
1169 PROT_READ | PROT_WRITE,
1170 MAP_PRIVATE | MAP_ANONYMOUS,
1172 ASSERT_NE(buffer->ptr, MAP_FAILED);
1174 /* Initialize buffer in system memory. */
1175 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1178 rc = pthread_create(&thread, NULL, unmap_buffer, buffer);
1181 /* Simulate a device reading system memory. */
1182 rc = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1185 ASSERT_EQ(buffer->cpages, npages);
1186 ASSERT_EQ(buffer->faults, 1);
1188 /* Check what the device read. */
1189 for (i = 0, ptr = buffer->mirror;
1190 i < size / sizeof(*ptr);
1192 ASSERT_EQ(ptr[i], i + c);
1195 pthread_join(thread, &ret);
1196 hmm_buffer_free(buffer);
1201 * Test memory snapshot without faulting in pages accessed by the device.
1203 TEST_F(hmm2, snapshot)
1205 struct hmm_buffer *buffer;
1206 unsigned long npages;
1215 size = npages << self->page_shift;
1217 buffer = malloc(sizeof(*buffer));
1218 ASSERT_NE(buffer, NULL);
1221 buffer->size = size;
1222 buffer->mirror = malloc(npages);
1223 ASSERT_NE(buffer->mirror, NULL);
1225 /* Reserve a range of addresses. */
1226 buffer->ptr = mmap(NULL, size,
1228 MAP_PRIVATE | MAP_ANONYMOUS,
1230 ASSERT_NE(buffer->ptr, MAP_FAILED);
1233 /* Punch a hole after the first page address. */
1234 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1237 /* Page 2 will be read-only zero page. */
1238 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1241 ptr = (int *)(buffer->ptr + 2 * self->page_size);
1245 /* Page 3 will be read-only. */
1246 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1247 PROT_READ | PROT_WRITE);
1249 ptr = (int *)(buffer->ptr + 3 * self->page_size);
1251 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1255 /* Page 4-6 will be read-write. */
1256 ret = mprotect(buffer->ptr + 4 * self->page_size, 3 * self->page_size,
1257 PROT_READ | PROT_WRITE);
1259 ptr = (int *)(buffer->ptr + 4 * self->page_size);
1262 /* Page 5 will be migrated to device 0. */
1263 buffer->ptr = p + 5 * self->page_size;
1264 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1);
1266 ASSERT_EQ(buffer->cpages, 1);
1268 /* Page 6 will be migrated to device 1. */
1269 buffer->ptr = p + 6 * self->page_size;
1270 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 1);
1272 ASSERT_EQ(buffer->cpages, 1);
1274 /* Simulate a device snapshotting CPU pagetables. */
1276 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1278 ASSERT_EQ(buffer->cpages, npages);
1280 /* Check what the device saw. */
1282 ASSERT_EQ(m[0], HMM_DMIRROR_PROT_ERROR);
1283 ASSERT_EQ(m[1], HMM_DMIRROR_PROT_ERROR);
1284 ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ);
1285 ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ);
1286 ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE);
1287 ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
1288 HMM_DMIRROR_PROT_WRITE);
1289 ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
1291 hmm_buffer_free(buffer);
1295 * Test the hmm_range_fault() HMM_PFN_PMD flag for large pages that
1296 * should be mapped by a large page table entry.
1298 TEST_F(hmm, compound)
1300 struct hmm_buffer *buffer;
1301 unsigned long npages;
1310 /* Skip test if we can't allocate a hugetlbfs page. */
1312 n = gethugepagesizes(pagesizes, 4);
1315 for (idx = 0; --n > 0; ) {
1316 if (pagesizes[n] < pagesizes[idx])
1319 size = ALIGN(TWOMEG, pagesizes[idx]);
1320 npages = size >> self->page_shift;
1322 buffer = malloc(sizeof(*buffer));
1323 ASSERT_NE(buffer, NULL);
1325 buffer->ptr = get_hugepage_region(size, GHR_STRICT);
1326 if (buffer->ptr == NULL) {
1331 buffer->size = size;
1332 buffer->mirror = malloc(npages);
1333 ASSERT_NE(buffer->mirror, NULL);
1335 /* Initialize the pages the device will snapshot in buffer->ptr. */
1336 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1339 /* Simulate a device snapshotting CPU pagetables. */
1340 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1342 ASSERT_EQ(buffer->cpages, npages);
1344 /* Check what the device saw. */
1346 for (i = 0; i < npages; ++i)
1347 ASSERT_EQ(m[i], HMM_DMIRROR_PROT_WRITE |
1348 HMM_DMIRROR_PROT_PMD);
1350 /* Make the region read-only. */
1351 ret = mprotect(buffer->ptr, size, PROT_READ);
1354 /* Simulate a device snapshotting CPU pagetables. */
1355 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1357 ASSERT_EQ(buffer->cpages, npages);
1359 /* Check what the device saw. */
1361 for (i = 0; i < npages; ++i)
1362 ASSERT_EQ(m[i], HMM_DMIRROR_PROT_READ |
1363 HMM_DMIRROR_PROT_PMD);
1365 free_hugepage_region(buffer->ptr);
1367 hmm_buffer_free(buffer);
1371 * Test two devices reading the same memory (double mapped).
1373 TEST_F(hmm2, double_map)
1375 struct hmm_buffer *buffer;
1376 unsigned long npages;
1383 size = npages << self->page_shift;
1385 buffer = malloc(sizeof(*buffer));
1386 ASSERT_NE(buffer, NULL);
1389 buffer->size = size;
1390 buffer->mirror = malloc(npages);
1391 ASSERT_NE(buffer->mirror, NULL);
1393 /* Reserve a range of addresses. */
1394 buffer->ptr = mmap(NULL, size,
1395 PROT_READ | PROT_WRITE,
1396 MAP_PRIVATE | MAP_ANONYMOUS,
1398 ASSERT_NE(buffer->ptr, MAP_FAILED);
1400 /* Initialize buffer in system memory. */
1401 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1404 /* Make region read-only. */
1405 ret = mprotect(buffer->ptr, size, PROT_READ);
1408 /* Simulate device 0 reading system memory. */
1409 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
1411 ASSERT_EQ(buffer->cpages, npages);
1412 ASSERT_EQ(buffer->faults, 1);
1414 /* Check what the device read. */
1415 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1416 ASSERT_EQ(ptr[i], i);
1418 /* Simulate device 1 reading system memory. */
1419 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_READ, buffer, npages);
1421 ASSERT_EQ(buffer->cpages, npages);
1422 ASSERT_EQ(buffer->faults, 1);
1424 /* Check what the device read. */
1425 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1426 ASSERT_EQ(ptr[i], i);
1428 /* Punch a hole after the first page address. */
1429 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1432 hmm_buffer_free(buffer);