Merge tag 'kvm-x86-misc-6.9' of https://github.com/kvm-x86/linux into HEAD
[linux-2.6-block.git] / tools / testing / selftests / kvm / x86_64 / kvm_pv_test.c
CommitLineData
ac4a4d6d
OU
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020, Google LLC.
4 *
5 * Tests for KVM paravirtual feature disablement
6 */
7#include <asm/kvm_para.h>
8#include <linux/kvm_para.h>
9#include <stdint.h>
10
11#include "test_util.h"
12#include "kvm_util.h"
13#include "processor.h"
14
ac4a4d6d
OU
15struct msr_data {
16 uint32_t idx;
17 const char *name;
18};
19
20#define TEST_MSR(msr) { .idx = msr, .name = #msr }
21#define UCALL_PR_MSR 0xdeadbeef
22#define PR_MSR(msr) ucall(UCALL_PR_MSR, 1, msr)
23
24/*
25 * KVM paravirtual msrs to test. Expect a #GP if any of these msrs are read or
26 * written, as the KVM_CPUID_FEATURES leaf is cleared.
27 */
28static struct msr_data msrs_to_test[] = {
29 TEST_MSR(MSR_KVM_SYSTEM_TIME),
30 TEST_MSR(MSR_KVM_SYSTEM_TIME_NEW),
31 TEST_MSR(MSR_KVM_WALL_CLOCK),
32 TEST_MSR(MSR_KVM_WALL_CLOCK_NEW),
33 TEST_MSR(MSR_KVM_ASYNC_PF_EN),
34 TEST_MSR(MSR_KVM_STEAL_TIME),
35 TEST_MSR(MSR_KVM_PV_EOI_EN),
36 TEST_MSR(MSR_KVM_POLL_CONTROL),
37 TEST_MSR(MSR_KVM_ASYNC_PF_INT),
38 TEST_MSR(MSR_KVM_ASYNC_PF_ACK),
39};
40
41static void test_msr(struct msr_data *msr)
42{
3b23054c
SC
43 uint64_t ignored;
44 uint8_t vector;
45
ac4a4d6d 46 PR_MSR(msr);
ac4a4d6d 47
3b23054c 48 vector = rdmsr_safe(msr->idx, &ignored);
bf6c760b 49 GUEST_ASSERT_EQ(vector, GP_VECTOR);
3b23054c
SC
50
51 vector = wrmsr_safe(msr->idx, 0);
bf6c760b 52 GUEST_ASSERT_EQ(vector, GP_VECTOR);
ac4a4d6d
OU
53}
54
55struct hcall_data {
56 uint64_t nr;
57 const char *name;
58};
59
60#define TEST_HCALL(hc) { .nr = hc, .name = #hc }
61#define UCALL_PR_HCALL 0xdeadc0de
62#define PR_HCALL(hc) ucall(UCALL_PR_HCALL, 1, hc)
63
64/*
65 * KVM hypercalls to test. Expect -KVM_ENOSYS when called, as the corresponding
66 * features have been cleared in KVM_CPUID_FEATURES.
67 */
68static struct hcall_data hcalls_to_test[] = {
69 TEST_HCALL(KVM_HC_KICK_CPU),
70 TEST_HCALL(KVM_HC_SEND_IPI),
71 TEST_HCALL(KVM_HC_SCHED_YIELD),
72};
73
74static void test_hcall(struct hcall_data *hc)
75{
76 uint64_t r;
77
78 PR_HCALL(hc);
79 r = kvm_hypercall(hc->nr, 0, 0, 0, 0);
bf6c760b 80 GUEST_ASSERT_EQ(r, -KVM_ENOSYS);
ac4a4d6d
OU
81}
82
83static void guest_main(void)
84{
85 int i;
86
87 for (i = 0; i < ARRAY_SIZE(msrs_to_test); i++) {
88 test_msr(&msrs_to_test[i]);
89 }
90
91 for (i = 0; i < ARRAY_SIZE(hcalls_to_test); i++) {
92 test_hcall(&hcalls_to_test[i]);
93 }
94
95 GUEST_DONE();
96}
97
ac4a4d6d
OU
98static void pr_msr(struct ucall *uc)
99{
100 struct msr_data *msr = (struct msr_data *)uc->args[0];
101
102 pr_info("testing msr: %s (%#x)\n", msr->name, msr->idx);
103}
104
105static void pr_hcall(struct ucall *uc)
106{
107 struct hcall_data *hc = (struct hcall_data *)uc->args[0];
108
109 pr_info("testing hcall: %s (%lu)\n", hc->name, hc->nr);
110}
111
f323dbce 112static void enter_guest(struct kvm_vcpu *vcpu)
ac4a4d6d 113{
ac4a4d6d 114 struct ucall uc;
ac4a4d6d
OU
115
116 while (true) {
768e9a61 117 vcpu_run(vcpu);
c96f57b0 118 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
ac4a4d6d 119
768e9a61 120 switch (get_ucall(vcpu, &uc)) {
ac4a4d6d
OU
121 case UCALL_PR_MSR:
122 pr_msr(&uc);
123 break;
124 case UCALL_PR_HCALL:
125 pr_hcall(&uc);
126 break;
127 case UCALL_ABORT:
bf6c760b 128 REPORT_GUEST_ASSERT(uc);
ac4a4d6d
OU
129 return;
130 case UCALL_DONE:
131 return;
132 }
133 }
134}
135
136int main(void)
137{
f323dbce 138 struct kvm_vcpu *vcpu;
ac4a4d6d
OU
139 struct kvm_vm *vm;
140
7ed397d1 141 TEST_REQUIRE(kvm_has_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID));
ac4a4d6d 142
f323dbce 143 vm = vm_create_with_one_vcpu(&vcpu, guest_main);
ac4a4d6d 144
768e9a61 145 vcpu_enable_cap(vcpu, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 1);
ac4a4d6d 146
3a5d36b3 147 vcpu_clear_cpuid_entry(vcpu, KVM_CPUID_FEATURES);
ac4a4d6d
OU
148
149 vm_init_descriptor_tables(vm);
768e9a61 150 vcpu_init_descriptor_tables(vcpu);
ac4a4d6d 151
f323dbce 152 enter_guest(vcpu);
ac4a4d6d
OU
153 kvm_vm_free(vm);
154}