1 // SPDX-License-Identifier: GPL-2.0-only
3 * Test for x86 KVM_CAP_SYNC_REGS
5 * Copyright (C) 2018, Google LLC.
7 * Verifies expected behavior of x86 KVM_CAP_SYNC_REGS functionality,
8 * including requesting an invalid register set, updates to/from values
9 * in kvm_run.s.regs when kvm_valid_regs and kvm_dirty_regs are toggled.
12 #define _GNU_SOURCE /* for program_invocation_short_name */
17 #include <sys/ioctl.h>
20 #include "test_util.h"
22 #include "processor.h"
24 #define UCALL_PIO_PORT ((uint16_t)0x1000)
26 struct ucall uc_none = {
31 * ucall is embedded here to protect against compiler reshuffling registers
32 * before calling a function. In this test we only need to get KVM_EXIT_IO
33 * vmexit and preserve RBX, no additional information is needed.
37 asm volatile("1: in %[port], %%al\n"
40 : : [port] "d" (UCALL_PIO_PORT), "D" (&uc_none)
44 static void compare_regs(struct kvm_regs *left, struct kvm_regs *right)
46 #define REG_COMPARE(reg) \
47 TEST_ASSERT(left->reg == right->reg, \
49 " values did not match: 0x%llx, 0x%llx\n", \
50 left->reg, right->reg)
72 static void compare_sregs(struct kvm_sregs *left, struct kvm_sregs *right)
76 static void compare_vcpu_events(struct kvm_vcpu_events *left,
77 struct kvm_vcpu_events *right)
81 #define TEST_SYNC_FIELDS (KVM_SYNC_X86_REGS|KVM_SYNC_X86_SREGS|KVM_SYNC_X86_EVENTS)
82 #define INVALID_SYNC_FIELD 0x80000000
85 * Set an exception as pending *and* injected while KVM is processing events.
86 * KVM is supposed to ignore/drop pending exceptions if userspace is also
87 * requesting that an exception be injected.
89 static void *race_events_inj_pen(void *arg)
91 struct kvm_run *run = (struct kvm_run *)arg;
92 struct kvm_vcpu_events *events = &run->s.regs.events;
94 WRITE_ONCE(events->exception.nr, UD_VECTOR);
97 WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_EVENTS);
98 WRITE_ONCE(events->flags, 0);
99 WRITE_ONCE(events->exception.injected, 1);
100 WRITE_ONCE(events->exception.pending, 1);
102 pthread_testcancel();
109 * Set an invalid exception vector while KVM is processing events. KVM is
110 * supposed to reject any vector >= 32, as well as NMIs (vector 2).
112 static void *race_events_exc(void *arg)
114 struct kvm_run *run = (struct kvm_run *)arg;
115 struct kvm_vcpu_events *events = &run->s.regs.events;
118 WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_EVENTS);
119 WRITE_ONCE(events->flags, 0);
120 WRITE_ONCE(events->exception.nr, UD_VECTOR);
121 WRITE_ONCE(events->exception.pending, 1);
122 WRITE_ONCE(events->exception.nr, 255);
124 pthread_testcancel();
131 * Toggle CR4.PAE while KVM is processing SREGS, EFER.LME=1 with CR4.PAE=0 is
132 * illegal, and KVM's MMU heavily relies on vCPU state being valid.
134 static noinline void *race_sregs_cr4(void *arg)
136 struct kvm_run *run = (struct kvm_run *)arg;
137 __u64 *cr4 = &run->s.regs.sregs.cr4;
138 __u64 pae_enabled = *cr4;
139 __u64 pae_disabled = *cr4 & ~X86_CR4_PAE;
142 WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_SREGS);
143 WRITE_ONCE(*cr4, pae_enabled);
144 asm volatile(".rept 512\n\t"
147 WRITE_ONCE(*cr4, pae_disabled);
149 pthread_testcancel();
155 static void race_sync_regs(void *racer)
157 const time_t TIMEOUT = 2; /* seconds, roughly */
158 struct kvm_x86_state *state;
159 struct kvm_translation tr;
160 struct kvm_vcpu *vcpu;
166 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
169 run->kvm_valid_regs = KVM_SYNC_X86_SREGS;
171 run->kvm_valid_regs = 0;
173 /* Save state *before* spawning the thread that mucks with vCPU state. */
174 state = vcpu_save_state(vcpu);
177 * Selftests run 64-bit guests by default, both EFER.LME and CR4.PAE
178 * should already be set in guest state.
180 TEST_ASSERT((run->s.regs.sregs.cr4 & X86_CR4_PAE) &&
181 (run->s.regs.sregs.efer & EFER_LME),
182 "vCPU should be in long mode, CR4.PAE=%d, EFER.LME=%d",
183 !!(run->s.regs.sregs.cr4 & X86_CR4_PAE),
184 !!(run->s.regs.sregs.efer & EFER_LME));
186 TEST_ASSERT_EQ(pthread_create(&thread, NULL, racer, (void *)run), 0);
188 for (t = time(NULL) + TIMEOUT; time(NULL) < t;) {
190 * Reload known good state if the vCPU triple faults, e.g. due
191 * to the unhandled #GPs being injected. VMX preserves state
192 * on shutdown, but SVM synthesizes an INIT as the VMCB state
193 * is architecturally undefined on triple fault.
195 if (!__vcpu_run(vcpu) && run->exit_reason == KVM_EXIT_SHUTDOWN)
196 vcpu_load_state(vcpu, state);
198 if (racer == race_sregs_cr4) {
199 tr = (struct kvm_translation) { .linear_address = 0 };
200 __vcpu_ioctl(vcpu, KVM_TRANSLATE, &tr);
204 TEST_ASSERT_EQ(pthread_cancel(thread), 0);
205 TEST_ASSERT_EQ(pthread_join(thread, NULL), 0);
207 kvm_x86_state_cleanup(state);
211 int main(int argc, char *argv[])
213 struct kvm_vcpu *vcpu;
216 struct kvm_regs regs;
217 struct kvm_sregs sregs;
218 struct kvm_vcpu_events events;
221 cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
222 TEST_REQUIRE((cap & TEST_SYNC_FIELDS) == TEST_SYNC_FIELDS);
223 TEST_REQUIRE(!(cap & INVALID_SYNC_FIELD));
225 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
229 /* Request reading invalid register set from VCPU. */
230 run->kvm_valid_regs = INVALID_SYNC_FIELD;
231 rv = _vcpu_run(vcpu);
232 TEST_ASSERT(rv < 0 && errno == EINVAL,
233 "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
235 run->kvm_valid_regs = 0;
237 run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
238 rv = _vcpu_run(vcpu);
239 TEST_ASSERT(rv < 0 && errno == EINVAL,
240 "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
242 run->kvm_valid_regs = 0;
244 /* Request setting invalid register set into VCPU. */
245 run->kvm_dirty_regs = INVALID_SYNC_FIELD;
246 rv = _vcpu_run(vcpu);
247 TEST_ASSERT(rv < 0 && errno == EINVAL,
248 "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
250 run->kvm_dirty_regs = 0;
252 run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
253 rv = _vcpu_run(vcpu);
254 TEST_ASSERT(rv < 0 && errno == EINVAL,
255 "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
257 run->kvm_dirty_regs = 0;
259 /* Request and verify all valid register sets. */
260 /* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */
261 run->kvm_valid_regs = TEST_SYNC_FIELDS;
262 rv = _vcpu_run(vcpu);
263 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
265 vcpu_regs_get(vcpu, ®s);
266 compare_regs(®s, &run->s.regs.regs);
268 vcpu_sregs_get(vcpu, &sregs);
269 compare_sregs(&sregs, &run->s.regs.sregs);
271 vcpu_events_get(vcpu, &events);
272 compare_vcpu_events(&events, &run->s.regs.events);
274 /* Set and verify various register values. */
275 run->s.regs.regs.rbx = 0xBAD1DEA;
276 run->s.regs.sregs.apic_base = 1 << 11;
277 /* TODO run->s.regs.events.XYZ = ABC; */
279 run->kvm_valid_regs = TEST_SYNC_FIELDS;
280 run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS;
281 rv = _vcpu_run(vcpu);
282 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
283 TEST_ASSERT(run->s.regs.regs.rbx == 0xBAD1DEA + 1,
284 "rbx sync regs value incorrect 0x%llx.",
285 run->s.regs.regs.rbx);
286 TEST_ASSERT(run->s.regs.sregs.apic_base == 1 << 11,
287 "apic_base sync regs value incorrect 0x%llx.",
288 run->s.regs.sregs.apic_base);
290 vcpu_regs_get(vcpu, ®s);
291 compare_regs(®s, &run->s.regs.regs);
293 vcpu_sregs_get(vcpu, &sregs);
294 compare_sregs(&sregs, &run->s.regs.sregs);
296 vcpu_events_get(vcpu, &events);
297 compare_vcpu_events(&events, &run->s.regs.events);
299 /* Clear kvm_dirty_regs bits, verify new s.regs values are
300 * overwritten with existing guest values.
302 run->kvm_valid_regs = TEST_SYNC_FIELDS;
303 run->kvm_dirty_regs = 0;
304 run->s.regs.regs.rbx = 0xDEADBEEF;
305 rv = _vcpu_run(vcpu);
306 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
307 TEST_ASSERT(run->s.regs.regs.rbx != 0xDEADBEEF,
308 "rbx sync regs value incorrect 0x%llx.",
309 run->s.regs.regs.rbx);
311 /* Clear kvm_valid_regs bits and kvm_dirty_bits.
312 * Verify s.regs values are not overwritten with existing guest values
313 * and that guest values are not overwritten with kvm_sync_regs values.
315 run->kvm_valid_regs = 0;
316 run->kvm_dirty_regs = 0;
317 run->s.regs.regs.rbx = 0xAAAA;
319 vcpu_regs_set(vcpu, ®s);
320 rv = _vcpu_run(vcpu);
321 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
322 TEST_ASSERT(run->s.regs.regs.rbx == 0xAAAA,
323 "rbx sync regs value incorrect 0x%llx.",
324 run->s.regs.regs.rbx);
325 vcpu_regs_get(vcpu, ®s);
326 TEST_ASSERT(regs.rbx == 0xBAC0 + 1,
327 "rbx guest value incorrect 0x%llx.",
330 /* Clear kvm_valid_regs bits. Verify s.regs values are not overwritten
331 * with existing guest values but that guest values are overwritten
332 * with kvm_sync_regs values.
334 run->kvm_valid_regs = 0;
335 run->kvm_dirty_regs = TEST_SYNC_FIELDS;
336 run->s.regs.regs.rbx = 0xBBBB;
337 rv = _vcpu_run(vcpu);
338 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
339 TEST_ASSERT(run->s.regs.regs.rbx == 0xBBBB,
340 "rbx sync regs value incorrect 0x%llx.",
341 run->s.regs.regs.rbx);
342 vcpu_regs_get(vcpu, ®s);
343 TEST_ASSERT(regs.rbx == 0xBBBB + 1,
344 "rbx guest value incorrect 0x%llx.",
349 race_sync_regs(race_sregs_cr4);
350 race_sync_regs(race_events_exc);
351 race_sync_regs(race_events_inj_pen);