KVM: selftests: Require vCPU output array when creating VM with vCPUs
[linux-2.6-block.git] / tools / testing / selftests / kvm / x86_64 / kvm_pv_test.c
CommitLineData
ac4a4d6d
OU
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020, Google LLC.
4 *
5 * Tests for KVM paravirtual feature disablement
6 */
7#include <asm/kvm_para.h>
8#include <linux/kvm_para.h>
9#include <stdint.h>
10
11#include "test_util.h"
12#include "kvm_util.h"
13#include "processor.h"
14
15extern unsigned char rdmsr_start;
16extern unsigned char rdmsr_end;
17
18static u64 do_rdmsr(u32 idx)
19{
20 u32 lo, hi;
21
22 asm volatile("rdmsr_start: rdmsr;"
23 "rdmsr_end:"
24 : "=a"(lo), "=c"(hi)
25 : "c"(idx));
26
27 return (((u64) hi) << 32) | lo;
28}
29
30extern unsigned char wrmsr_start;
31extern unsigned char wrmsr_end;
32
33static void do_wrmsr(u32 idx, u64 val)
34{
35 u32 lo, hi;
36
37 lo = val;
38 hi = val >> 32;
39
40 asm volatile("wrmsr_start: wrmsr;"
41 "wrmsr_end:"
42 : : "a"(lo), "c"(idx), "d"(hi));
43}
44
45static int nr_gp;
46
47static void guest_gp_handler(struct ex_regs *regs)
48{
49 unsigned char *rip = (unsigned char *)regs->rip;
50 bool r, w;
51
52 r = rip == &rdmsr_start;
53 w = rip == &wrmsr_start;
54 GUEST_ASSERT(r || w);
55
56 nr_gp++;
57
58 if (r)
59 regs->rip = (uint64_t)&rdmsr_end;
60 else
61 regs->rip = (uint64_t)&wrmsr_end;
62}
63
64struct msr_data {
65 uint32_t idx;
66 const char *name;
67};
68
69#define TEST_MSR(msr) { .idx = msr, .name = #msr }
70#define UCALL_PR_MSR 0xdeadbeef
71#define PR_MSR(msr) ucall(UCALL_PR_MSR, 1, msr)
72
73/*
74 * KVM paravirtual msrs to test. Expect a #GP if any of these msrs are read or
75 * written, as the KVM_CPUID_FEATURES leaf is cleared.
76 */
77static struct msr_data msrs_to_test[] = {
78 TEST_MSR(MSR_KVM_SYSTEM_TIME),
79 TEST_MSR(MSR_KVM_SYSTEM_TIME_NEW),
80 TEST_MSR(MSR_KVM_WALL_CLOCK),
81 TEST_MSR(MSR_KVM_WALL_CLOCK_NEW),
82 TEST_MSR(MSR_KVM_ASYNC_PF_EN),
83 TEST_MSR(MSR_KVM_STEAL_TIME),
84 TEST_MSR(MSR_KVM_PV_EOI_EN),
85 TEST_MSR(MSR_KVM_POLL_CONTROL),
86 TEST_MSR(MSR_KVM_ASYNC_PF_INT),
87 TEST_MSR(MSR_KVM_ASYNC_PF_ACK),
88};
89
90static void test_msr(struct msr_data *msr)
91{
92 PR_MSR(msr);
93 do_rdmsr(msr->idx);
94 GUEST_ASSERT(READ_ONCE(nr_gp) == 1);
95
96 nr_gp = 0;
97 do_wrmsr(msr->idx, 0);
98 GUEST_ASSERT(READ_ONCE(nr_gp) == 1);
99 nr_gp = 0;
100}
101
102struct hcall_data {
103 uint64_t nr;
104 const char *name;
105};
106
107#define TEST_HCALL(hc) { .nr = hc, .name = #hc }
108#define UCALL_PR_HCALL 0xdeadc0de
109#define PR_HCALL(hc) ucall(UCALL_PR_HCALL, 1, hc)
110
111/*
112 * KVM hypercalls to test. Expect -KVM_ENOSYS when called, as the corresponding
113 * features have been cleared in KVM_CPUID_FEATURES.
114 */
115static struct hcall_data hcalls_to_test[] = {
116 TEST_HCALL(KVM_HC_KICK_CPU),
117 TEST_HCALL(KVM_HC_SEND_IPI),
118 TEST_HCALL(KVM_HC_SCHED_YIELD),
119};
120
121static void test_hcall(struct hcall_data *hc)
122{
123 uint64_t r;
124
125 PR_HCALL(hc);
126 r = kvm_hypercall(hc->nr, 0, 0, 0, 0);
127 GUEST_ASSERT(r == -KVM_ENOSYS);
128}
129
130static void guest_main(void)
131{
132 int i;
133
134 for (i = 0; i < ARRAY_SIZE(msrs_to_test); i++) {
135 test_msr(&msrs_to_test[i]);
136 }
137
138 for (i = 0; i < ARRAY_SIZE(hcalls_to_test); i++) {
139 test_hcall(&hcalls_to_test[i]);
140 }
141
142 GUEST_DONE();
143}
144
145static void clear_kvm_cpuid_features(struct kvm_cpuid2 *cpuid)
146{
147 struct kvm_cpuid_entry2 ent = {0};
148
149 ent.function = KVM_CPUID_FEATURES;
150 TEST_ASSERT(set_cpuid(cpuid, &ent),
151 "failed to clear KVM_CPUID_FEATURES leaf");
152}
153
154static void pr_msr(struct ucall *uc)
155{
156 struct msr_data *msr = (struct msr_data *)uc->args[0];
157
158 pr_info("testing msr: %s (%#x)\n", msr->name, msr->idx);
159}
160
161static void pr_hcall(struct ucall *uc)
162{
163 struct hcall_data *hc = (struct hcall_data *)uc->args[0];
164
165 pr_info("testing hcall: %s (%lu)\n", hc->name, hc->nr);
166}
167
168static void handle_abort(struct ucall *uc)
169{
170 TEST_FAIL("%s at %s:%ld", (const char *)uc->args[0],
171 __FILE__, uc->args[1]);
172}
173
f323dbce 174static void enter_guest(struct kvm_vcpu *vcpu)
ac4a4d6d 175{
f323dbce 176 struct kvm_run *run = vcpu->run;
ac4a4d6d 177 struct ucall uc;
ac4a4d6d
OU
178
179 while (true) {
f323dbce 180 vcpu_run(vcpu->vm, vcpu->id);
ac4a4d6d
OU
181 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
182 "unexpected exit reason: %u (%s)",
183 run->exit_reason, exit_reason_str(run->exit_reason));
184
f323dbce 185 switch (get_ucall(vcpu->vm, vcpu->id, &uc)) {
ac4a4d6d
OU
186 case UCALL_PR_MSR:
187 pr_msr(&uc);
188 break;
189 case UCALL_PR_HCALL:
190 pr_hcall(&uc);
191 break;
192 case UCALL_ABORT:
193 handle_abort(&uc);
194 return;
195 case UCALL_DONE:
196 return;
197 }
198 }
199}
200
201int main(void)
202{
ac4a4d6d 203 struct kvm_cpuid2 *best;
f323dbce 204 struct kvm_vcpu *vcpu;
ac4a4d6d
OU
205 struct kvm_vm *vm;
206
207 if (!kvm_check_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID)) {
08d3e277
AJ
208 print_skip("KVM_CAP_ENFORCE_PV_FEATURE_CPUID not supported");
209 exit(KSFT_SKIP);
ac4a4d6d
OU
210 }
211
f323dbce 212 vm = vm_create_with_one_vcpu(&vcpu, guest_main);
ac4a4d6d 213
f323dbce 214 vcpu_enable_cap(vm, vcpu->id, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 1);
ac4a4d6d
OU
215
216 best = kvm_get_supported_cpuid();
217 clear_kvm_cpuid_features(best);
f323dbce 218 vcpu_set_cpuid(vm, vcpu->id, best);
ac4a4d6d
OU
219
220 vm_init_descriptor_tables(vm);
f323dbce 221 vcpu_init_descriptor_tables(vm, vcpu->id);
b78f4a59 222 vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
ac4a4d6d 223
f323dbce 224 enter_guest(vcpu);
ac4a4d6d
OU
225 kvm_vm_free(vm);
226}