1 // SPDX-License-Identifier: GPL-2.0
2 #define _GNU_SOURCE /* for program_invocation_short_name */
11 #include <sys/ioctl.h>
14 #include <linux/compiler.h>
16 #include <test_util.h>
18 #include <processor.h>
21 * s390x needs at least 1MB alignment, and the x86_64 MOVE/DELETE tests need a
22 * 2MB sized and aligned region so that the initial region corresponds to
23 * exactly one large page.
25 #define MEM_REGION_SIZE 0x200000
29 * Somewhat arbitrary location and slot, intended to not overlap anything.
31 #define MEM_REGION_GPA 0xc0000000
32 #define MEM_REGION_SLOT 10
34 static const uint64_t MMIO_VAL = 0xbeefull;
36 extern const uint64_t final_rip_start;
37 extern const uint64_t final_rip_end;
39 static sem_t vcpu_ready;
41 static inline uint64_t guest_spin_on_val(uint64_t spin_val)
46 val = READ_ONCE(*((uint64_t *)MEM_REGION_GPA));
47 } while (val == spin_val);
53 static void *vcpu_worker(void *data)
55 struct kvm_vcpu *vcpu = data;
56 struct kvm_run *run = vcpu->run;
61 * Loop until the guest is done. Re-enter the guest on all MMIO exits,
62 * which will occur if the guest attempts to access a memslot after it
63 * has been deleted or while it is being moved .
68 if (run->exit_reason == KVM_EXIT_IO) {
69 cmd = get_ucall(vcpu, &uc);
70 if (cmd != UCALL_SYNC)
73 sem_post(&vcpu_ready);
77 if (run->exit_reason != KVM_EXIT_MMIO)
80 TEST_ASSERT(!run->mmio.is_write, "Unexpected exit mmio write");
81 TEST_ASSERT(run->mmio.len == 8,
82 "Unexpected exit mmio size = %u", run->mmio.len);
84 TEST_ASSERT(run->mmio.phys_addr == MEM_REGION_GPA,
85 "Unexpected exit mmio address = 0x%llx",
87 memcpy(run->mmio.data, &MMIO_VAL, 8);
90 if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT)
91 REPORT_GUEST_ASSERT(uc);
96 static void wait_for_vcpu(void)
100 TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts),
101 "clock_gettime() failed: %d", errno);
104 TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts),
105 "sem_timedwait() failed: %d", errno);
107 /* Wait for the vCPU thread to reenter the guest. */
111 static struct kvm_vm *spawn_vm(struct kvm_vcpu **vcpu, pthread_t *vcpu_thread,
118 vm = vm_create_with_one_vcpu(vcpu, guest_code);
120 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP,
121 MEM_REGION_GPA, MEM_REGION_SLOT,
122 MEM_REGION_SIZE / getpagesize(), 0);
125 * Allocate and map two pages so that the GPA accessed by guest_code()
126 * stays valid across the memslot move.
128 gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT);
129 TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n");
131 virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2);
133 /* Ditto for the host mapping so that both pages can be zeroed. */
134 hva = addr_gpa2hva(vm, MEM_REGION_GPA);
135 memset(hva, 0, 2 * 4096);
137 pthread_create(vcpu_thread, NULL, vcpu_worker, *vcpu);
139 /* Ensure the guest thread is spun up. */
146 static void guest_code_move_memory_region(void)
153 * Spin until the memory region starts getting moved to a
154 * misaligned address.
155 * Every region move may or may not trigger MMIO, as the
156 * window where the memslot is invalid is usually quite small.
158 val = guest_spin_on_val(0);
159 __GUEST_ASSERT(val == 1 || val == MMIO_VAL,
160 "Expected '1' or MMIO ('%lx'), got '%lx'", MMIO_VAL, val);
162 /* Spin until the misaligning memory region move completes. */
163 val = guest_spin_on_val(MMIO_VAL);
164 __GUEST_ASSERT(val == 1 || val == 0,
165 "Expected '0' or '1' (no MMIO), got '%lx'", val);
167 /* Spin until the memory region starts to get re-aligned. */
168 val = guest_spin_on_val(0);
169 __GUEST_ASSERT(val == 1 || val == MMIO_VAL,
170 "Expected '1' or MMIO ('%lx'), got '%lx'", MMIO_VAL, val);
172 /* Spin until the re-aligning memory region move completes. */
173 val = guest_spin_on_val(MMIO_VAL);
174 GUEST_ASSERT_EQ(val, 1);
179 static void test_move_memory_region(void)
181 pthread_t vcpu_thread;
182 struct kvm_vcpu *vcpu;
186 vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_move_memory_region);
188 hva = addr_gpa2hva(vm, MEM_REGION_GPA);
191 * Shift the region's base GPA. The guest should not see "2" as the
192 * hva->gpa translation is misaligned, i.e. the guest is accessing a
193 * different host pfn.
195 vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA - 4096);
199 * The guest _might_ see an invalid memslot and trigger MMIO, but it's
200 * a tiny window. Spin and defer the sync until the memslot is
201 * restored and guest behavior is once again deterministic.
206 * Note, value in memory needs to be changed *before* restoring the
207 * memslot, else the guest could race the update and see "2".
211 /* Restore the original base, the guest should see "1". */
212 vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA);
214 /* Defered sync from when the memslot was misaligned (above). */
217 pthread_join(vcpu_thread, NULL);
222 static void guest_code_delete_memory_region(void)
228 /* Spin until the memory region is deleted. */
229 val = guest_spin_on_val(0);
230 GUEST_ASSERT_EQ(val, MMIO_VAL);
232 /* Spin until the memory region is recreated. */
233 val = guest_spin_on_val(MMIO_VAL);
234 GUEST_ASSERT_EQ(val, 0);
236 /* Spin until the memory region is deleted. */
237 val = guest_spin_on_val(0);
238 GUEST_ASSERT_EQ(val, MMIO_VAL);
241 ".pushsection .rodata\n\t"
242 ".global final_rip_start\n\t"
243 "final_rip_start: .quad 1b\n\t"
246 /* Spin indefinitely (until the code memslot is deleted). */
247 guest_spin_on_val(MMIO_VAL);
250 ".pushsection .rodata\n\t"
251 ".global final_rip_end\n\t"
252 "final_rip_end: .quad 1b\n\t"
258 static void test_delete_memory_region(void)
260 pthread_t vcpu_thread;
261 struct kvm_vcpu *vcpu;
262 struct kvm_regs regs;
266 vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_delete_memory_region);
268 /* Delete the memory region, the guest should not die. */
269 vm_mem_region_delete(vm, MEM_REGION_SLOT);
272 /* Recreate the memory region. The guest should see "0". */
273 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP,
274 MEM_REGION_GPA, MEM_REGION_SLOT,
275 MEM_REGION_SIZE / getpagesize(), 0);
278 /* Delete the region again so that there's only one memslot left. */
279 vm_mem_region_delete(vm, MEM_REGION_SLOT);
283 * Delete the primary memslot. This should cause an emulation error or
284 * shutdown due to the page tables getting nuked.
286 vm_mem_region_delete(vm, 0);
288 pthread_join(vcpu_thread, NULL);
292 TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN ||
293 run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
294 "Unexpected exit reason = %d", run->exit_reason);
296 vcpu_regs_get(vcpu, ®s);
299 * On AMD, after KVM_EXIT_SHUTDOWN the VMCB has been reinitialized already,
300 * so the instruction pointer would point to the reset vector.
302 if (run->exit_reason == KVM_EXIT_INTERNAL_ERROR)
303 TEST_ASSERT(regs.rip >= final_rip_start &&
304 regs.rip < final_rip_end,
305 "Bad rip, expected 0x%lx - 0x%lx, got 0x%llx",
306 final_rip_start, final_rip_end, regs.rip);
311 static void test_zero_memory_regions(void)
313 struct kvm_vcpu *vcpu;
316 pr_info("Testing KVM_RUN with zero added memory regions\n");
318 vm = vm_create_barebones();
319 vcpu = __vm_vcpu_add(vm, 0);
321 vm_ioctl(vm, KVM_SET_NR_MMU_PAGES, (void *)64ul);
323 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR);
327 #endif /* __x86_64__ */
329 static void test_invalid_memory_region_flags(void)
331 uint32_t supported_flags = KVM_MEM_LOG_DIRTY_PAGES;
332 const uint32_t v2_only_flags = KVM_MEM_GUEST_MEMFD;
336 #if defined __aarch64__ || defined __x86_64__
337 supported_flags |= KVM_MEM_READONLY;
341 if (kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM))
342 vm = vm_create_barebones_protected_vm();
345 vm = vm_create_barebones();
347 if (kvm_check_cap(KVM_CAP_MEMORY_ATTRIBUTES) & KVM_MEMORY_ATTRIBUTE_PRIVATE)
348 supported_flags |= KVM_MEM_GUEST_MEMFD;
350 for (i = 0; i < 32; i++) {
351 if ((supported_flags & BIT(i)) && !(v2_only_flags & BIT(i)))
354 r = __vm_set_user_memory_region(vm, 0, BIT(i),
355 0, MEM_REGION_SIZE, NULL);
357 TEST_ASSERT(r && errno == EINVAL,
358 "KVM_SET_USER_MEMORY_REGION should have failed on v2 only flag 0x%lx", BIT(i));
360 if (supported_flags & BIT(i))
363 r = __vm_set_user_memory_region2(vm, 0, BIT(i),
364 0, MEM_REGION_SIZE, NULL, 0, 0);
365 TEST_ASSERT(r && errno == EINVAL,
366 "KVM_SET_USER_MEMORY_REGION2 should have failed on unsupported flag 0x%lx", BIT(i));
369 if (supported_flags & KVM_MEM_GUEST_MEMFD) {
370 r = __vm_set_user_memory_region2(vm, 0,
371 KVM_MEM_LOG_DIRTY_PAGES | KVM_MEM_GUEST_MEMFD,
372 0, MEM_REGION_SIZE, NULL, 0, 0);
373 TEST_ASSERT(r && errno == EINVAL,
374 "KVM_SET_USER_MEMORY_REGION2 should have failed, dirty logging private memory is unsupported");
379 * Test it can be added memory slots up to KVM_CAP_NR_MEMSLOTS, then any
380 * tentative to add further slots should fail.
382 static void test_add_max_memory_regions(void)
386 uint32_t max_mem_slots;
388 void *mem, *mem_aligned, *mem_extra;
392 /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */
393 alignment = 0x100000;
398 max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
399 TEST_ASSERT(max_mem_slots > 0,
400 "KVM_CAP_NR_MEMSLOTS should be greater than 0");
401 pr_info("Allowed number of memory slots: %i\n", max_mem_slots);
403 vm = vm_create_barebones();
405 /* Check it can be added memory slots up to the maximum allowed */
406 pr_info("Adding slots 0..%i, each memory region with %dK size\n",
407 (max_mem_slots - 1), MEM_REGION_SIZE >> 10);
409 mem = mmap(NULL, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment,
410 PROT_READ | PROT_WRITE,
411 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
412 TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host");
413 mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1));
415 for (slot = 0; slot < max_mem_slots; slot++)
416 vm_set_user_memory_region(vm, slot, 0,
417 ((uint64_t)slot * MEM_REGION_SIZE),
419 mem_aligned + (uint64_t)slot * MEM_REGION_SIZE);
421 /* Check it cannot be added memory slots beyond the limit */
422 mem_extra = mmap(NULL, MEM_REGION_SIZE, PROT_READ | PROT_WRITE,
423 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
424 TEST_ASSERT(mem_extra != MAP_FAILED, "Failed to mmap() host");
426 ret = __vm_set_user_memory_region(vm, max_mem_slots, 0,
427 (uint64_t)max_mem_slots * MEM_REGION_SIZE,
428 MEM_REGION_SIZE, mem_extra);
429 TEST_ASSERT(ret == -1 && errno == EINVAL,
430 "Adding one more memory slot should fail with EINVAL");
432 munmap(mem, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment);
433 munmap(mem_extra, MEM_REGION_SIZE);
439 static void test_invalid_guest_memfd(struct kvm_vm *vm, int memfd,
440 size_t offset, const char *msg)
442 int r = __vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD,
443 MEM_REGION_GPA, MEM_REGION_SIZE,
445 TEST_ASSERT(r == -1 && errno == EINVAL, "%s", msg);
448 static void test_add_private_memory_region(void)
450 struct kvm_vm *vm, *vm2;
453 pr_info("Testing ADD of KVM_MEM_GUEST_MEMFD memory regions\n");
455 vm = vm_create_barebones_protected_vm();
457 test_invalid_guest_memfd(vm, vm->kvm_fd, 0, "KVM fd should fail");
458 test_invalid_guest_memfd(vm, vm->fd, 0, "VM's fd should fail");
460 memfd = kvm_memfd_alloc(MEM_REGION_SIZE, false);
461 test_invalid_guest_memfd(vm, memfd, 0, "Regular memfd() should fail");
464 vm2 = vm_create_barebones_protected_vm();
465 memfd = vm_create_guest_memfd(vm2, MEM_REGION_SIZE, 0);
466 test_invalid_guest_memfd(vm, memfd, 0, "Other VM's guest_memfd() should fail");
468 vm_set_user_memory_region2(vm2, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD,
469 MEM_REGION_GPA, MEM_REGION_SIZE, 0, memfd, 0);
473 memfd = vm_create_guest_memfd(vm, MEM_REGION_SIZE, 0);
474 for (i = 1; i < PAGE_SIZE; i++)
475 test_invalid_guest_memfd(vm, memfd, i, "Unaligned offset should fail");
477 vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD,
478 MEM_REGION_GPA, MEM_REGION_SIZE, 0, memfd, 0);
484 static void test_add_overlapping_private_memory_regions(void)
490 pr_info("Testing ADD of overlapping KVM_MEM_GUEST_MEMFD memory regions\n");
492 vm = vm_create_barebones_protected_vm();
494 memfd = vm_create_guest_memfd(vm, MEM_REGION_SIZE * 4, 0);
496 vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD,
497 MEM_REGION_GPA, MEM_REGION_SIZE * 2, 0, memfd, 0);
499 vm_set_user_memory_region2(vm, MEM_REGION_SLOT + 1, KVM_MEM_GUEST_MEMFD,
500 MEM_REGION_GPA * 2, MEM_REGION_SIZE * 2,
501 0, memfd, MEM_REGION_SIZE * 2);
504 * Delete the first memslot, and then attempt to recreate it except
505 * with a "bad" offset that results in overlap in the guest_memfd().
507 vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD,
508 MEM_REGION_GPA, 0, NULL, -1, 0);
510 /* Overlap the front half of the other slot. */
511 r = __vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD,
512 MEM_REGION_GPA * 2 - MEM_REGION_SIZE,
515 TEST_ASSERT(r == -1 && errno == EEXIST, "%s",
516 "Overlapping guest_memfd() bindings should fail with EEXIST");
518 /* And now the back half of the other slot. */
519 r = __vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD,
520 MEM_REGION_GPA * 2 + MEM_REGION_SIZE,
523 TEST_ASSERT(r == -1 && errno == EEXIST, "%s",
524 "Overlapping guest_memfd() bindings should fail with EEXIST");
531 int main(int argc, char *argv[])
537 * FIXME: the zero-memslot test fails on aarch64 and s390x because
538 * KVM_RUN fails with ENOEXEC or EFAULT.
540 test_zero_memory_regions();
543 test_invalid_memory_region_flags();
545 test_add_max_memory_regions();
548 if (kvm_has_cap(KVM_CAP_GUEST_MEMFD) &&
549 (kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM))) {
550 test_add_private_memory_region();
551 test_add_overlapping_private_memory_regions();
553 pr_info("Skipping tests for KVM_MEM_GUEST_MEMFD memory regions\n");
557 loops = atoi_positive("Number of iterations", argv[1]);
561 pr_info("Testing MOVE of in-use region, %d loops\n", loops);
562 for (i = 0; i < loops; i++)
563 test_move_memory_region();
565 pr_info("Testing DELETE of in-use region, %d loops\n", loops);
566 for (i = 0; i < loops; i++)
567 test_delete_memory_region();