1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2021, Red Hat, Inc.
5 * Tests for Hyper-V features enablement
7 #include <asm/kvm_para.h>
8 #include <linux/kvm_para.h>
11 #include "test_util.h"
13 #include "processor.h"
17 * HYPERV_CPUID_ENLIGHTMENT_INFO.EBX is not a 'feature' CPUID leaf
18 * but to activate the feature it is sufficient to set it to a non-zero
19 * value. Use BIT(0) for that.
21 #define HV_PV_SPINLOCKS_TEST \
22 KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EBX, 0)
37 static bool is_write_only_msr(uint32_t msr)
39 return msr == HV_X64_MSR_EOI;
42 static void guest_msr(struct msr_data *msr)
47 GUEST_ASSERT(msr->idx);
50 vector = wrmsr_safe(msr->idx, msr->write_val);
52 if (!vector && (!msr->write || !is_write_only_msr(msr->idx)))
53 vector = rdmsr_safe(msr->idx, &msr_val);
55 if (msr->fault_expected)
56 __GUEST_ASSERT(vector == GP_VECTOR,
57 "Expected #GP on %sMSR(0x%x), got vector '0x%x'",
58 msr->write ? "WR" : "RD", msr->idx, vector);
60 __GUEST_ASSERT(!vector,
61 "Expected success on %sMSR(0x%x), got vector '0x%x'",
62 msr->write ? "WR" : "RD", msr->idx, vector);
64 if (vector || is_write_only_msr(msr->idx))
68 __GUEST_ASSERT(!vector,
69 "WRMSR(0x%x) to '0x%lx', RDMSR read '0x%lx'",
70 msr->idx, msr->write_val, msr_val);
72 /* Invariant TSC bit appears when TSC invariant control MSR is written to */
73 if (msr->idx == HV_X64_MSR_TSC_INVARIANT_CONTROL) {
74 if (!this_cpu_has(HV_ACCESS_TSC_INVARIANT))
75 GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC));
77 GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC) ==
78 !!(msr_val & HV_INVARIANT_TSC_EXPOSED));
85 static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
87 u64 res, input, output;
90 GUEST_ASSERT_NE(hcall->control, 0);
92 wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
93 wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
95 if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) {
97 output = pgs_gpa + 4096;
102 vector = __hyperv_hypercall(hcall->control, input, output, &res);
103 if (hcall->ud_expected) {
104 __GUEST_ASSERT(vector == UD_VECTOR,
105 "Expected #UD for control '%lu', got vector '0x%x'",
106 hcall->control, vector);
108 __GUEST_ASSERT(!vector,
109 "Expected no exception for control '%lu', got vector '0x%x'",
110 hcall->control, vector);
111 GUEST_ASSERT_EQ(res, hcall->expect);
117 static void vcpu_reset_hv_cpuid(struct kvm_vcpu *vcpu)
120 * Enable all supported Hyper-V features, then clear the leafs holding
121 * the features that will be tested one by one.
123 vcpu_set_hv_cpuid(vcpu);
125 vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
126 vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
127 vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
130 static void guest_test_msrs_access(void)
132 struct kvm_cpuid2 *prev_cpuid = NULL;
133 struct kvm_vcpu *vcpu;
138 struct msr_data *msr;
139 bool has_invtsc = kvm_cpu_has(X86_FEATURE_INVTSC);
142 vm = vm_create_with_one_vcpu(&vcpu, guest_msr);
144 msr_gva = vm_vaddr_alloc_page(vm);
145 memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
146 msr = addr_gva2hva(vm, msr_gva);
148 vcpu_args_set(vcpu, 1, msr_gva);
149 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
152 vcpu_reset_hv_cpuid(vcpu);
154 prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent);
156 vcpu_init_cpuid(vcpu, prev_cpuid);
159 vm_init_descriptor_tables(vm);
160 vcpu_init_descriptor_tables(vcpu);
162 /* TODO: Make this entire test easier to maintain. */
164 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_SYNIC2, 0);
169 * Only available when Hyper-V identification is set
171 msr->idx = HV_X64_MSR_GUEST_OS_ID;
173 msr->fault_expected = true;
176 msr->idx = HV_X64_MSR_HYPERCALL;
178 msr->fault_expected = true;
181 vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE);
183 * HV_X64_MSR_GUEST_OS_ID has to be written first to make
184 * HV_X64_MSR_HYPERCALL available.
186 msr->idx = HV_X64_MSR_GUEST_OS_ID;
188 msr->write_val = HYPERV_LINUX_OS_ID;
189 msr->fault_expected = false;
192 msr->idx = HV_X64_MSR_GUEST_OS_ID;
194 msr->fault_expected = false;
197 msr->idx = HV_X64_MSR_HYPERCALL;
199 msr->fault_expected = false;
203 msr->idx = HV_X64_MSR_VP_RUNTIME;
205 msr->fault_expected = true;
208 vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_RUNTIME_AVAILABLE);
209 msr->idx = HV_X64_MSR_VP_RUNTIME;
211 msr->fault_expected = false;
215 msr->idx = HV_X64_MSR_VP_RUNTIME;
218 msr->fault_expected = true;
222 msr->idx = HV_X64_MSR_TIME_REF_COUNT;
224 msr->fault_expected = true;
227 vcpu_set_cpuid_feature(vcpu, HV_MSR_TIME_REF_COUNT_AVAILABLE);
228 msr->idx = HV_X64_MSR_TIME_REF_COUNT;
230 msr->fault_expected = false;
234 msr->idx = HV_X64_MSR_TIME_REF_COUNT;
237 msr->fault_expected = true;
241 msr->idx = HV_X64_MSR_VP_INDEX;
243 msr->fault_expected = true;
246 vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_INDEX_AVAILABLE);
247 msr->idx = HV_X64_MSR_VP_INDEX;
249 msr->fault_expected = false;
253 msr->idx = HV_X64_MSR_VP_INDEX;
256 msr->fault_expected = true;
260 msr->idx = HV_X64_MSR_RESET;
262 msr->fault_expected = true;
265 vcpu_set_cpuid_feature(vcpu, HV_MSR_RESET_AVAILABLE);
266 msr->idx = HV_X64_MSR_RESET;
268 msr->fault_expected = false;
271 msr->idx = HV_X64_MSR_RESET;
274 * TODO: the test only writes '0' to HV_X64_MSR_RESET
275 * at the moment, writing some other value there will
276 * trigger real vCPU reset and the code is not prepared
280 msr->fault_expected = false;
284 msr->idx = HV_X64_MSR_REFERENCE_TSC;
286 msr->fault_expected = true;
289 vcpu_set_cpuid_feature(vcpu, HV_MSR_REFERENCE_TSC_AVAILABLE);
290 msr->idx = HV_X64_MSR_REFERENCE_TSC;
292 msr->fault_expected = false;
295 msr->idx = HV_X64_MSR_REFERENCE_TSC;
298 msr->fault_expected = false;
302 msr->idx = HV_X64_MSR_EOM;
304 msr->fault_expected = true;
308 * Remains unavailable even with KVM_CAP_HYPERV_SYNIC2
309 * capability enabled and guest visible CPUID bit unset.
311 msr->idx = HV_X64_MSR_EOM;
313 msr->fault_expected = true;
316 vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNIC_AVAILABLE);
317 msr->idx = HV_X64_MSR_EOM;
319 msr->fault_expected = false;
322 msr->idx = HV_X64_MSR_EOM;
325 msr->fault_expected = false;
329 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
331 msr->fault_expected = true;
334 vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNTIMER_AVAILABLE);
335 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
337 msr->fault_expected = false;
340 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
343 msr->fault_expected = false;
346 /* Direct mode test */
347 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
349 msr->write_val = 1 << 12;
350 msr->fault_expected = true;
353 vcpu_set_cpuid_feature(vcpu, HV_STIMER_DIRECT_MODE_AVAILABLE);
354 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
356 msr->write_val = 1 << 12;
357 msr->fault_expected = false;
361 msr->idx = HV_X64_MSR_EOI;
363 msr->fault_expected = true;
366 vcpu_set_cpuid_feature(vcpu, HV_MSR_APIC_ACCESS_AVAILABLE);
367 msr->idx = HV_X64_MSR_EOI;
370 msr->fault_expected = false;
374 msr->idx = HV_X64_MSR_TSC_FREQUENCY;
376 msr->fault_expected = true;
379 vcpu_set_cpuid_feature(vcpu, HV_ACCESS_FREQUENCY_MSRS);
380 msr->idx = HV_X64_MSR_TSC_FREQUENCY;
382 msr->fault_expected = false;
386 msr->idx = HV_X64_MSR_TSC_FREQUENCY;
389 msr->fault_expected = true;
393 msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
395 msr->fault_expected = true;
398 vcpu_set_cpuid_feature(vcpu, HV_ACCESS_REENLIGHTENMENT);
399 msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
401 msr->fault_expected = false;
404 msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
407 msr->fault_expected = false;
410 /* Can only write '0' */
411 msr->idx = HV_X64_MSR_TSC_EMULATION_STATUS;
414 msr->fault_expected = true;
418 msr->idx = HV_X64_MSR_CRASH_P0;
420 msr->fault_expected = true;
423 vcpu_set_cpuid_feature(vcpu, HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE);
424 msr->idx = HV_X64_MSR_CRASH_P0;
426 msr->fault_expected = false;
429 msr->idx = HV_X64_MSR_CRASH_P0;
432 msr->fault_expected = false;
436 msr->idx = HV_X64_MSR_SYNDBG_STATUS;
438 msr->fault_expected = true;
441 vcpu_set_cpuid_feature(vcpu, HV_FEATURE_DEBUG_MSRS_AVAILABLE);
442 vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING);
443 msr->idx = HV_X64_MSR_SYNDBG_STATUS;
445 msr->fault_expected = false;
448 msr->idx = HV_X64_MSR_SYNDBG_STATUS;
451 msr->fault_expected = false;
455 /* MSR is not available when CPUID feature bit is unset */
458 msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
460 msr->fault_expected = true;
463 /* MSR is vailable when CPUID feature bit is set */
466 vcpu_set_cpuid_feature(vcpu, HV_ACCESS_TSC_INVARIANT);
467 msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
469 msr->fault_expected = false;
472 /* Writing bits other than 0 is forbidden */
475 msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
477 msr->write_val = 0xdeadbeef;
478 msr->fault_expected = true;
481 /* Setting bit 0 enables the feature */
484 msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
487 msr->fault_expected = false;
495 vcpu_set_cpuid(vcpu);
497 memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent));
499 pr_debug("Stage %d: testing msr: 0x%x for %s\n", stage,
500 msr->idx, msr->write ? "write" : "read");
503 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
505 switch (get_ucall(vcpu, &uc)) {
507 REPORT_GUEST_ASSERT(uc);
512 TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
521 static void guest_test_hcalls_access(void)
523 struct kvm_cpuid2 *prev_cpuid = NULL;
524 struct kvm_vcpu *vcpu;
528 vm_vaddr_t hcall_page, hcall_params;
529 struct hcall_data *hcall;
532 vm = vm_create_with_one_vcpu(&vcpu, guest_hcall);
534 vm_init_descriptor_tables(vm);
535 vcpu_init_descriptor_tables(vcpu);
537 /* Hypercall input/output */
538 hcall_page = vm_vaddr_alloc_pages(vm, 2);
539 memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
541 hcall_params = vm_vaddr_alloc_page(vm);
542 memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
543 hcall = addr_gva2hva(vm, hcall_params);
545 vcpu_args_set(vcpu, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
546 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
549 vcpu_reset_hv_cpuid(vcpu);
551 prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent);
553 vcpu_init_cpuid(vcpu, prev_cpuid);
558 vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE);
559 hcall->control = 0xbeef;
560 hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
564 hcall->control = HVCALL_POST_MESSAGE;
565 hcall->expect = HV_STATUS_ACCESS_DENIED;
568 vcpu_set_cpuid_feature(vcpu, HV_POST_MESSAGES);
569 hcall->control = HVCALL_POST_MESSAGE;
570 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
574 hcall->control = HVCALL_SIGNAL_EVENT;
575 hcall->expect = HV_STATUS_ACCESS_DENIED;
578 vcpu_set_cpuid_feature(vcpu, HV_SIGNAL_EVENTS);
579 hcall->control = HVCALL_SIGNAL_EVENT;
580 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
584 hcall->control = HVCALL_RESET_DEBUG_SESSION;
585 hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
588 vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING);
589 hcall->control = HVCALL_RESET_DEBUG_SESSION;
590 hcall->expect = HV_STATUS_ACCESS_DENIED;
593 vcpu_set_cpuid_feature(vcpu, HV_DEBUGGING);
594 hcall->control = HVCALL_RESET_DEBUG_SESSION;
595 hcall->expect = HV_STATUS_OPERATION_DENIED;
599 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
600 hcall->expect = HV_STATUS_ACCESS_DENIED;
603 vcpu_set_cpuid_feature(vcpu, HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED);
604 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
605 hcall->expect = HV_STATUS_SUCCESS;
608 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
609 hcall->expect = HV_STATUS_ACCESS_DENIED;
612 vcpu_set_cpuid_feature(vcpu, HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED);
613 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
614 hcall->expect = HV_STATUS_SUCCESS;
618 hcall->control = HVCALL_SEND_IPI;
619 hcall->expect = HV_STATUS_ACCESS_DENIED;
622 vcpu_set_cpuid_feature(vcpu, HV_X64_CLUSTER_IPI_RECOMMENDED);
623 hcall->control = HVCALL_SEND_IPI;
624 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
627 /* Nothing in 'sparse banks' -> success */
628 hcall->control = HVCALL_SEND_IPI_EX;
629 hcall->expect = HV_STATUS_SUCCESS;
633 hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
634 hcall->expect = HV_STATUS_ACCESS_DENIED;
637 vcpu_set_cpuid_feature(vcpu, HV_PV_SPINLOCKS_TEST);
638 hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
639 hcall->expect = HV_STATUS_SUCCESS;
642 /* XMM fast hypercall */
643 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
644 hcall->ud_expected = true;
647 vcpu_set_cpuid_feature(vcpu, HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE);
648 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
649 hcall->ud_expected = false;
650 hcall->expect = HV_STATUS_SUCCESS;
653 hcall->control = HV_EXT_CALL_QUERY_CAPABILITIES;
654 hcall->expect = HV_STATUS_ACCESS_DENIED;
657 vcpu_set_cpuid_feature(vcpu, HV_ENABLE_EXTENDED_HYPERCALLS);
658 hcall->control = HV_EXT_CALL_QUERY_CAPABILITIES | HV_HYPERCALL_FAST_BIT;
659 hcall->expect = HV_STATUS_INVALID_PARAMETER;
666 vcpu_set_cpuid(vcpu);
668 memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent));
670 pr_debug("Stage %d: testing hcall: 0x%lx\n", stage, hcall->control);
673 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
675 switch (get_ucall(vcpu, &uc)) {
677 REPORT_GUEST_ASSERT(uc);
682 TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
693 TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENFORCE_CPUID));
695 pr_info("Testing access to Hyper-V specific MSRs\n");
696 guest_test_msrs_access();
698 pr_info("Testing access to Hyper-V hypercalls\n");
699 guest_test_hcalls_access();