Merge tag 'selinux-pr-20220523' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / arch / x86 / kvm / vmx / nested.c
CommitLineData
55d2375e
SC
1// SPDX-License-Identifier: GPL-2.0
2
00089c04 3#include <linux/objtool.h>
55d2375e
SC
4#include <linux/percpu.h>
5
6#include <asm/debugreg.h>
7#include <asm/mmu_context.h>
8
9#include "cpuid.h"
6cbbaab6 10#include "evmcs.h"
55d2375e
SC
11#include "hyperv.h"
12#include "mmu.h"
13#include "nested.h"
bfc6ad6a 14#include "pmu.h"
72add915 15#include "sgx.h"
55d2375e 16#include "trace.h"
150f17bf 17#include "vmx.h"
55d2375e
SC
18#include "x86.h"
19
20static bool __read_mostly enable_shadow_vmcs = 1;
21module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
22
23static bool __read_mostly nested_early_check = 0;
24module_param(nested_early_check, bool, S_IRUGO);
25
648fc8ae 26#define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
5497b955 27
55d2375e
SC
28/*
29 * Hyper-V requires all of these, so mark them as supported even though
30 * they are just treated the same as all-context.
31 */
32#define VMX_VPID_EXTENT_SUPPORTED_MASK \
33 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
34 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
35 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
36 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
37
38#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
39
40enum {
41 VMX_VMREAD_BITMAP,
42 VMX_VMWRITE_BITMAP,
43 VMX_BITMAP_NR
44};
45static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
46
47#define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
48#define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
49
1c6f0b47
SC
50struct shadow_vmcs_field {
51 u16 encoding;
52 u16 offset;
53};
54static struct shadow_vmcs_field shadow_read_only_fields[] = {
55#define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) },
55d2375e
SC
56#include "vmcs_shadow_fields.h"
57};
58static int max_shadow_read_only_fields =
59 ARRAY_SIZE(shadow_read_only_fields);
60
1c6f0b47
SC
61static struct shadow_vmcs_field shadow_read_write_fields[] = {
62#define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) },
55d2375e
SC
63#include "vmcs_shadow_fields.h"
64};
65static int max_shadow_read_write_fields =
66 ARRAY_SIZE(shadow_read_write_fields);
67
8997f657 68static void init_vmcs_shadow_fields(void)
55d2375e
SC
69{
70 int i, j;
71
72 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
73 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
74
75 for (i = j = 0; i < max_shadow_read_only_fields; i++) {
1c6f0b47
SC
76 struct shadow_vmcs_field entry = shadow_read_only_fields[i];
77 u16 field = entry.encoding;
55d2375e
SC
78
79 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
80 (i + 1 == max_shadow_read_only_fields ||
1c6f0b47 81 shadow_read_only_fields[i + 1].encoding != field + 1))
55d2375e
SC
82 pr_err("Missing field from shadow_read_only_field %x\n",
83 field + 1);
84
85 clear_bit(field, vmx_vmread_bitmap);
55d2375e 86 if (field & 1)
1c6f0b47 87#ifdef CONFIG_X86_64
55d2375e 88 continue;
1c6f0b47
SC
89#else
90 entry.offset += sizeof(u32);
55d2375e 91#endif
1c6f0b47 92 shadow_read_only_fields[j++] = entry;
55d2375e
SC
93 }
94 max_shadow_read_only_fields = j;
95
96 for (i = j = 0; i < max_shadow_read_write_fields; i++) {
1c6f0b47
SC
97 struct shadow_vmcs_field entry = shadow_read_write_fields[i];
98 u16 field = entry.encoding;
55d2375e
SC
99
100 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
101 (i + 1 == max_shadow_read_write_fields ||
1c6f0b47 102 shadow_read_write_fields[i + 1].encoding != field + 1))
55d2375e
SC
103 pr_err("Missing field from shadow_read_write_field %x\n",
104 field + 1);
105
b6437805
SC
106 WARN_ONCE(field >= GUEST_ES_AR_BYTES &&
107 field <= GUEST_TR_AR_BYTES,
1c6f0b47 108 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES");
b6437805 109
55d2375e
SC
110 /*
111 * PML and the preemption timer can be emulated, but the
112 * processor cannot vmwrite to fields that don't exist
113 * on bare metal.
114 */
115 switch (field) {
116 case GUEST_PML_INDEX:
117 if (!cpu_has_vmx_pml())
118 continue;
119 break;
120 case VMX_PREEMPTION_TIMER_VALUE:
121 if (!cpu_has_vmx_preemption_timer())
122 continue;
123 break;
124 case GUEST_INTR_STATUS:
125 if (!cpu_has_vmx_apicv())
126 continue;
127 break;
128 default:
129 break;
130 }
131
132 clear_bit(field, vmx_vmwrite_bitmap);
133 clear_bit(field, vmx_vmread_bitmap);
55d2375e 134 if (field & 1)
1c6f0b47 135#ifdef CONFIG_X86_64
55d2375e 136 continue;
1c6f0b47
SC
137#else
138 entry.offset += sizeof(u32);
55d2375e 139#endif
1c6f0b47 140 shadow_read_write_fields[j++] = entry;
55d2375e
SC
141 }
142 max_shadow_read_write_fields = j;
143}
144
145/*
146 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
147 * set the success or error code of an emulated VMX instruction (as specified
148 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
149 * instruction.
150 */
151static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
152{
153 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
154 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
155 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
156 return kvm_skip_emulated_instruction(vcpu);
157}
158
159static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
160{
161 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
162 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
163 X86_EFLAGS_SF | X86_EFLAGS_OF))
164 | X86_EFLAGS_CF);
165 return kvm_skip_emulated_instruction(vcpu);
166}
167
168static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
169 u32 vm_instruction_error)
170{
55d2375e
SC
171 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
172 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
173 X86_EFLAGS_SF | X86_EFLAGS_OF))
174 | X86_EFLAGS_ZF);
175 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
176 /*
b7685cfd
VK
177 * We don't need to force sync to shadow VMCS because
178 * VM_INSTRUCTION_ERROR is not shadowed. Enlightened VMCS 'shadows' all
179 * fields and thus must be synced.
55d2375e 180 */
b7685cfd
VK
181 if (to_vmx(vcpu)->nested.hv_evmcs_vmptr != EVMPTR_INVALID)
182 to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true;
183
55d2375e
SC
184 return kvm_skip_emulated_instruction(vcpu);
185}
186
b2656e4d
SC
187static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error)
188{
189 struct vcpu_vmx *vmx = to_vmx(vcpu);
190
191 /*
192 * failValid writes the error number to the current VMCS, which
193 * can't be done if there isn't a current VMCS.
194 */
64c78508 195 if (vmx->nested.current_vmptr == INVALID_GPA &&
1e9dfbd7 196 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
b2656e4d
SC
197 return nested_vmx_failInvalid(vcpu);
198
199 return nested_vmx_failValid(vcpu, vm_instruction_error);
200}
201
55d2375e
SC
202static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
203{
204 /* TODO: not to reset guest simply here. */
205 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
206 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
207}
208
f0b5105a
MO
209static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
210{
211 return fixed_bits_valid(control, low, high);
212}
213
214static inline u64 vmx_control_msr(u32 low, u32 high)
215{
216 return low | ((u64)high << 32);
217}
218
55d2375e
SC
219static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
220{
fe7f895d 221 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
64c78508 222 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA);
88dddc11 223 vmx->nested.need_vmcs12_to_shadow_sync = false;
55d2375e
SC
224}
225
226static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
227{
228 struct vcpu_vmx *vmx = to_vmx(vcpu);
229
1e9dfbd7
VK
230 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
231 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
232 vmx->nested.hv_evmcs = NULL;
233 }
55d2375e 234
1e9dfbd7 235 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
55d2375e
SC
236}
237
c61ca2fc
SC
238static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
239 struct loaded_vmcs *prev)
240{
241 struct vmcs_host_state *dest, *src;
242
243 if (unlikely(!vmx->guest_state_loaded))
244 return;
245
246 src = &prev->host_state;
247 dest = &vmx->loaded_vmcs->host_state;
248
bca06b85 249 vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base);
c61ca2fc
SC
250 dest->ldt_sel = src->ldt_sel;
251#ifdef CONFIG_X86_64
252 dest->ds_sel = src->ds_sel;
253 dest->es_sel = src->es_sel;
254#endif
255}
256
257static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
258{
259 struct vcpu_vmx *vmx = to_vmx(vcpu);
260 struct loaded_vmcs *prev;
261 int cpu;
262
138534a8 263 if (WARN_ON_ONCE(vmx->loaded_vmcs == vmcs))
c61ca2fc
SC
264 return;
265
266 cpu = get_cpu();
267 prev = vmx->loaded_vmcs;
268 vmx->loaded_vmcs = vmcs;
269 vmx_vcpu_load_vmcs(vcpu, cpu, prev);
270 vmx_sync_vmcs_host_state(vmx, prev);
271 put_cpu();
272
41e68b69
PB
273 vcpu->arch.regs_avail = ~VMX_REGS_LAZY_LOAD_SET;
274
275 /*
276 * All lazily updated registers will be reloaded from VMCS12 on both
277 * vmentry and vmexit.
278 */
279 vcpu->arch.regs_dirty = 0;
c61ca2fc
SC
280}
281
55d2375e
SC
282/*
283 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
284 * just stops using VMX.
285 */
286static void free_nested(struct kvm_vcpu *vcpu)
287{
288 struct vcpu_vmx *vmx = to_vmx(vcpu);
289
df82a24b
SC
290 if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01))
291 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
292
55d2375e
SC
293 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
294 return;
295
729c15c2 296 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
cf64527b 297
55d2375e
SC
298 vmx->nested.vmxon = false;
299 vmx->nested.smm.vmxon = false;
feb3162f 300 vmx->nested.vmxon_ptr = INVALID_GPA;
55d2375e
SC
301 free_vpid(vmx->nested.vpid02);
302 vmx->nested.posted_intr_nv = -1;
64c78508 303 vmx->nested.current_vmptr = INVALID_GPA;
55d2375e
SC
304 if (enable_shadow_vmcs) {
305 vmx_disable_shadow_vmcs(vmx);
306 vmcs_clear(vmx->vmcs01.shadow_vmcs);
307 free_vmcs(vmx->vmcs01.shadow_vmcs);
308 vmx->vmcs01.shadow_vmcs = NULL;
309 }
310 kfree(vmx->nested.cached_vmcs12);
c6bf2ae9 311 vmx->nested.cached_vmcs12 = NULL;
55d2375e 312 kfree(vmx->nested.cached_shadow_vmcs12);
c6bf2ae9 313 vmx->nested.cached_shadow_vmcs12 = NULL;
55d2375e
SC
314 /* Unpin physical memory we referred to in the vmcs02 */
315 if (vmx->nested.apic_access_page) {
b11494bc 316 kvm_release_page_clean(vmx->nested.apic_access_page);
55d2375e
SC
317 vmx->nested.apic_access_page = NULL;
318 }
96c66e87 319 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
3278e049
KA
320 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
321 vmx->nested.pi_desc = NULL;
55d2375e 322
0c1c92f1 323 kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
55d2375e
SC
324
325 nested_release_evmcs(vcpu);
326
327 free_loaded_vmcs(&vmx->nested.vmcs02);
328}
329
55d2375e
SC
330/*
331 * Ensure that the current vmcs of the logical processor is the
332 * vmcs01 of the vcpu before calling free_nested().
333 */
334void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
335{
336 vcpu_load(vcpu);
b4b65b56 337 vmx_leave_nested(vcpu);
55d2375e
SC
338 vcpu_put(vcpu);
339}
340
85aa8889
JS
341#define EPTP_PA_MASK GENMASK_ULL(51, 12)
342
343static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
344{
345 return VALID_PAGE(root_hpa) &&
346 ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
347}
348
349static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
350 gpa_t addr)
351{
352 uint i;
353 struct kvm_mmu_root_info *cached_root;
354
355 WARN_ON_ONCE(!mmu_is_nested(vcpu));
356
357 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
358 cached_root = &vcpu->arch.mmu->prev_roots[i];
359
360 if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd,
361 eptp))
362 vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa);
363 }
364}
365
55d2375e
SC
366static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
367 struct x86_exception *fault)
368{
369 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
370 struct vcpu_vmx *vmx = to_vmx(vcpu);
4dcefa31 371 u32 vm_exit_reason;
55d2375e
SC
372 unsigned long exit_qualification = vcpu->arch.exit_qualification;
373
374 if (vmx->nested.pml_full) {
4dcefa31 375 vm_exit_reason = EXIT_REASON_PML_FULL;
55d2375e
SC
376 vmx->nested.pml_full = false;
377 exit_qualification &= INTR_INFO_UNBLOCK_NMI;
85aa8889
JS
378 } else {
379 if (fault->error_code & PFERR_RSVD_MASK)
380 vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
381 else
382 vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
383
384 /*
385 * Although the caller (kvm_inject_emulated_page_fault) would
386 * have already synced the faulting address in the shadow EPT
387 * tables for the current EPTP12, we also need to sync it for
388 * any other cached EPTP02s based on the same EP4TA, since the
389 * TLB associates mappings to the EP4TA rather than the full EPTP.
390 */
391 nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer,
392 fault->address);
393 }
55d2375e 394
4dcefa31 395 nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification);
55d2375e
SC
396 vmcs12->guest_physical_address = fault->address;
397}
398
39353ab5
SC
399static void nested_ept_new_eptp(struct kvm_vcpu *vcpu)
400{
cc022ae1
LJ
401 struct vcpu_vmx *vmx = to_vmx(vcpu);
402 bool execonly = vmx->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT;
403 int ept_lpage_level = ept_caps_to_lpage_level(vmx->nested.msrs.ept_caps);
404
405 kvm_init_shadow_ept_mmu(vcpu, execonly, ept_lpage_level,
39353ab5
SC
406 nested_ept_ad_enabled(vcpu),
407 nested_ept_get_eptp(vcpu));
408}
409
55d2375e
SC
410static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
411{
412 WARN_ON(mmu_is_nested(vcpu));
413
414 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
39353ab5 415 nested_ept_new_eptp(vcpu);
d8dd54e0 416 vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp;
55d2375e
SC
417 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
418 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
419
420 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
421}
422
423static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
424{
425 vcpu->arch.mmu = &vcpu->arch.root_mmu;
426 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
427}
428
429static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
430 u16 error_code)
431{
432 bool inequality, bit;
433
434 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
435 inequality =
436 (error_code & vmcs12->page_fault_error_code_mask) !=
437 vmcs12->page_fault_error_code_match;
438 return inequality ^ bit;
439}
440
441
442/*
443 * KVM wants to inject page-faults which it got to the guest. This function
444 * checks whether in a nested guest, we need to inject them to L1 or L2.
445 */
446static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
447{
448 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
449 unsigned int nr = vcpu->arch.exception.nr;
450 bool has_payload = vcpu->arch.exception.has_payload;
451 unsigned long payload = vcpu->arch.exception.payload;
452
453 if (nr == PF_VECTOR) {
454 if (vcpu->arch.exception.nested_apf) {
455 *exit_qual = vcpu->arch.apf.nested_apf_token;
456 return 1;
457 }
458 if (nested_vmx_is_page_fault_vmexit(vmcs12,
459 vcpu->arch.exception.error_code)) {
460 *exit_qual = has_payload ? payload : vcpu->arch.cr2;
461 return 1;
462 }
463 } else if (vmcs12->exception_bitmap & (1u << nr)) {
464 if (nr == DB_VECTOR) {
465 if (!has_payload) {
466 payload = vcpu->arch.dr6;
9a3ecd5e
CQ
467 payload &= ~DR6_BT;
468 payload ^= DR6_ACTIVE_LOW;
55d2375e
SC
469 }
470 *exit_qual = payload;
471 } else
472 *exit_qual = 0;
473 return 1;
474 }
475
476 return 0;
477}
478
479
480static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
481 struct x86_exception *fault)
482{
483 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
484
485 WARN_ON(!is_guest_mode(vcpu));
486
487 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
488 !to_vmx(vcpu)->nested.nested_run_pending) {
489 vmcs12->vm_exit_intr_error_code = fault->error_code;
490 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
491 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
492 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
493 fault->address);
494 } else {
495 kvm_inject_page_fault(vcpu, fault);
496 }
497}
498
55d2375e
SC
499static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
500 struct vmcs12 *vmcs12)
501{
502 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
503 return 0;
504
5497b955
SC
505 if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) ||
506 CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b)))
55d2375e
SC
507 return -EINVAL;
508
509 return 0;
510}
511
512static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
513 struct vmcs12 *vmcs12)
514{
515 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
516 return 0;
517
5497b955 518 if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap)))
55d2375e
SC
519 return -EINVAL;
520
521 return 0;
522}
523
524static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
525 struct vmcs12 *vmcs12)
526{
527 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
528 return 0;
529
5497b955 530 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)))
55d2375e
SC
531 return -EINVAL;
532
533 return 0;
534}
535
55d2375e 536/*
a5e0c252
SC
537 * For x2APIC MSRs, ignore the vmcs01 bitmap. L1 can enable x2APIC without L1
538 * itself utilizing x2APIC. All MSRs were previously set to be intercepted,
539 * only the "disable intercept" case needs to be handled.
55d2375e 540 */
a5e0c252
SC
541static void nested_vmx_disable_intercept_for_x2apic_msr(unsigned long *msr_bitmap_l1,
542 unsigned long *msr_bitmap_l0,
543 u32 msr, int type)
55d2375e 544{
a5e0c252
SC
545 if (type & MSR_TYPE_R && !vmx_test_msr_bitmap_read(msr_bitmap_l1, msr))
546 vmx_clear_msr_bitmap_read(msr_bitmap_l0, msr);
55d2375e 547
a5e0c252
SC
548 if (type & MSR_TYPE_W && !vmx_test_msr_bitmap_write(msr_bitmap_l1, msr))
549 vmx_clear_msr_bitmap_write(msr_bitmap_l0, msr);
55d2375e
SC
550}
551
ffdbd50d
ML
552static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap)
553{
acff7847
MO
554 int msr;
555
556 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
557 unsigned word = msr / BITS_PER_LONG;
558
559 msr_bitmap[word] = ~0;
560 msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
561 }
562}
563
67f4b996
SC
564#define BUILD_NVMX_MSR_INTERCEPT_HELPER(rw) \
565static inline \
566void nested_vmx_set_msr_##rw##_intercept(struct vcpu_vmx *vmx, \
567 unsigned long *msr_bitmap_l1, \
568 unsigned long *msr_bitmap_l0, u32 msr) \
569{ \
570 if (vmx_test_msr_bitmap_##rw(vmx->vmcs01.msr_bitmap, msr) || \
571 vmx_test_msr_bitmap_##rw(msr_bitmap_l1, msr)) \
572 vmx_set_msr_bitmap_##rw(msr_bitmap_l0, msr); \
573 else \
574 vmx_clear_msr_bitmap_##rw(msr_bitmap_l0, msr); \
575}
576BUILD_NVMX_MSR_INTERCEPT_HELPER(read)
577BUILD_NVMX_MSR_INTERCEPT_HELPER(write)
578
579static inline void nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx,
580 unsigned long *msr_bitmap_l1,
581 unsigned long *msr_bitmap_l0,
582 u32 msr, int types)
583{
584 if (types & MSR_TYPE_R)
585 nested_vmx_set_msr_read_intercept(vmx, msr_bitmap_l1,
586 msr_bitmap_l0, msr);
587 if (types & MSR_TYPE_W)
588 nested_vmx_set_msr_write_intercept(vmx, msr_bitmap_l1,
589 msr_bitmap_l0, msr);
590}
591
55d2375e
SC
592/*
593 * Merge L0's and L1's MSR bitmap, return false to indicate that
594 * we do not use the hardware.
595 */
596static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
597 struct vmcs12 *vmcs12)
598{
67f4b996 599 struct vcpu_vmx *vmx = to_vmx(vcpu);
55d2375e 600 int msr;
55d2375e 601 unsigned long *msr_bitmap_l1;
67f4b996 602 unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap;
502d2bf5 603 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
67f4b996 604 struct kvm_host_map *map = &vmx->nested.msr_bitmap_map;
55d2375e
SC
605
606 /* Nothing to do if the MSR bitmap is not in use. */
607 if (!cpu_has_vmx_msr_bitmap() ||
608 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
609 return false;
610
502d2bf5
VK
611 /*
612 * MSR bitmap update can be skipped when:
613 * - MSR bitmap for L1 hasn't changed.
614 * - Nested hypervisor (L1) is attempting to launch the same L2 as
615 * before.
616 * - Nested hypervisor (L1) has enabled 'Enlightened MSR Bitmap' feature
617 * and tells KVM (L0) there were no changes in MSR bitmap for L2.
618 */
619 if (!vmx->nested.force_msr_bitmap_recalc && evmcs &&
620 evmcs->hv_enlightenments_control.msr_bitmap &&
621 evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP)
622 return true;
623
31f0b6c4 624 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
55d2375e
SC
625 return false;
626
31f0b6c4 627 msr_bitmap_l1 = (unsigned long *)map->hva;
55d2375e 628
acff7847
MO
629 /*
630 * To keep the control flow simple, pay eight 8-byte writes (sixteen
631 * 4-byte writes on 32-bit systems) up front to enable intercepts for
a5e0c252 632 * the x2APIC MSR range and selectively toggle those relevant to L2.
acff7847
MO
633 */
634 enable_x2apic_msr_intercepts(msr_bitmap_l0);
635
636 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
637 if (nested_cpu_has_apic_reg_virt(vmcs12)) {
638 /*
639 * L0 need not intercept reads for MSRs between 0x800
640 * and 0x8ff, it just lets the processor take the value
641 * from the virtual-APIC page; take those 256 bits
642 * directly from the L1 bitmap.
643 */
644 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
645 unsigned word = msr / BITS_PER_LONG;
646
647 msr_bitmap_l0[word] = msr_bitmap_l1[word];
648 }
649 }
55d2375e 650
a5e0c252 651 nested_vmx_disable_intercept_for_x2apic_msr(
55d2375e 652 msr_bitmap_l1, msr_bitmap_l0,
acff7847 653 X2APIC_MSR(APIC_TASKPRI),
c73f4c99 654 MSR_TYPE_R | MSR_TYPE_W);
acff7847
MO
655
656 if (nested_cpu_has_vid(vmcs12)) {
a5e0c252 657 nested_vmx_disable_intercept_for_x2apic_msr(
acff7847
MO
658 msr_bitmap_l1, msr_bitmap_l0,
659 X2APIC_MSR(APIC_EOI),
660 MSR_TYPE_W);
a5e0c252 661 nested_vmx_disable_intercept_for_x2apic_msr(
acff7847
MO
662 msr_bitmap_l1, msr_bitmap_l0,
663 X2APIC_MSR(APIC_SELF_IPI),
664 MSR_TYPE_W);
665 }
55d2375e
SC
666 }
667
67f4b996
SC
668 /*
669 * Always check vmcs01's bitmap to honor userspace MSR filters and any
670 * other runtime changes to vmcs01's bitmap, e.g. dynamic pass-through.
671 */
dbdd096a 672#ifdef CONFIG_X86_64
67f4b996
SC
673 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
674 MSR_FS_BASE, MSR_TYPE_RW);
d69129b4 675
67f4b996
SC
676 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
677 MSR_GS_BASE, MSR_TYPE_RW);
d69129b4 678
67f4b996
SC
679 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
680 MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
dbdd096a 681#endif
67f4b996
SC
682 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
683 MSR_IA32_SPEC_CTRL, MSR_TYPE_RW);
d69129b4 684
67f4b996
SC
685 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
686 MSR_IA32_PRED_CMD, MSR_TYPE_W);
55d2375e 687
67f4b996 688 kvm_vcpu_unmap(vcpu, &vmx->nested.msr_bitmap_map, false);
55d2375e 689
ed2a4800
VK
690 vmx->nested.force_msr_bitmap_recalc = false;
691
55d2375e
SC
692 return true;
693}
694
695static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
696 struct vmcs12 *vmcs12)
697{
297d597a
DW
698 struct vcpu_vmx *vmx = to_vmx(vcpu);
699 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
55d2375e
SC
700
701 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
64c78508 702 vmcs12->vmcs_link_pointer == INVALID_GPA)
55d2375e
SC
703 return;
704
297d597a
DW
705 if (ghc->gpa != vmcs12->vmcs_link_pointer &&
706 kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
707 vmcs12->vmcs_link_pointer, VMCS12_SIZE))
88925305 708 return;
55d2375e 709
297d597a
DW
710 kvm_read_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu),
711 VMCS12_SIZE);
55d2375e
SC
712}
713
714static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
715 struct vmcs12 *vmcs12)
716{
717 struct vcpu_vmx *vmx = to_vmx(vcpu);
297d597a 718 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
55d2375e
SC
719
720 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
64c78508 721 vmcs12->vmcs_link_pointer == INVALID_GPA)
55d2375e
SC
722 return;
723
297d597a
DW
724 if (ghc->gpa != vmcs12->vmcs_link_pointer &&
725 kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
726 vmcs12->vmcs_link_pointer, VMCS12_SIZE))
727 return;
728
729 kvm_write_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu),
730 VMCS12_SIZE);
55d2375e
SC
731}
732
733/*
734 * In nested virtualization, check if L1 has set
735 * VM_EXIT_ACK_INTR_ON_EXIT
736 */
737static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
738{
739 return get_vmcs12(vcpu)->vm_exit_controls &
740 VM_EXIT_ACK_INTR_ON_EXIT;
741}
742
55d2375e
SC
743static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
744 struct vmcs12 *vmcs12)
745{
746 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
5497b955 747 CC(!page_address_valid(vcpu, vmcs12->apic_access_addr)))
55d2375e
SC
748 return -EINVAL;
749 else
750 return 0;
751}
752
753static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
754 struct vmcs12 *vmcs12)
755{
756 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
757 !nested_cpu_has_apic_reg_virt(vmcs12) &&
758 !nested_cpu_has_vid(vmcs12) &&
759 !nested_cpu_has_posted_intr(vmcs12))
760 return 0;
761
762 /*
763 * If virtualize x2apic mode is enabled,
764 * virtualize apic access must be disabled.
765 */
5497b955
SC
766 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) &&
767 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)))
55d2375e
SC
768 return -EINVAL;
769
770 /*
771 * If virtual interrupt delivery is enabled,
772 * we must exit on external interrupts.
773 */
5497b955 774 if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu)))
55d2375e
SC
775 return -EINVAL;
776
777 /*
778 * bits 15:8 should be zero in posted_intr_nv,
779 * the descriptor address has been already checked
780 * in nested_get_vmcs12_pages.
781 *
782 * bits 5:0 of posted_intr_desc_addr should be zero.
783 */
784 if (nested_cpu_has_posted_intr(vmcs12) &&
5497b955
SC
785 (CC(!nested_cpu_has_vid(vmcs12)) ||
786 CC(!nested_exit_intr_ack_set(vcpu)) ||
787 CC((vmcs12->posted_intr_nv & 0xff00)) ||
636e8b73 788 CC(!kvm_vcpu_is_legal_aligned_gpa(vcpu, vmcs12->posted_intr_desc_addr, 64))))
55d2375e
SC
789 return -EINVAL;
790
791 /* tpr shadow is needed by all apicv features. */
5497b955 792 if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)))
55d2375e
SC
793 return -EINVAL;
794
795 return 0;
796}
797
798static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
f9b245e1 799 u32 count, u64 addr)
55d2375e 800{
55d2375e
SC
801 if (count == 0)
802 return 0;
636e8b73
SC
803
804 if (!kvm_vcpu_is_legal_aligned_gpa(vcpu, addr, 16) ||
805 !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1)))
55d2375e 806 return -EINVAL;
f9b245e1 807
55d2375e
SC
808 return 0;
809}
810
61446ba7
KS
811static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
812 struct vmcs12 *vmcs12)
55d2375e 813{
5497b955
SC
814 if (CC(nested_vmx_check_msr_switch(vcpu,
815 vmcs12->vm_exit_msr_load_count,
816 vmcs12->vm_exit_msr_load_addr)) ||
817 CC(nested_vmx_check_msr_switch(vcpu,
818 vmcs12->vm_exit_msr_store_count,
819 vmcs12->vm_exit_msr_store_addr)))
55d2375e 820 return -EINVAL;
f9b245e1 821
55d2375e
SC
822 return 0;
823}
824
5fbf9634
KS
825static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
826 struct vmcs12 *vmcs12)
61446ba7 827{
5497b955
SC
828 if (CC(nested_vmx_check_msr_switch(vcpu,
829 vmcs12->vm_entry_msr_load_count,
830 vmcs12->vm_entry_msr_load_addr)))
61446ba7
KS
831 return -EINVAL;
832
833 return 0;
834}
835
55d2375e
SC
836static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
837 struct vmcs12 *vmcs12)
838{
839 if (!nested_cpu_has_pml(vmcs12))
840 return 0;
841
5497b955
SC
842 if (CC(!nested_cpu_has_ept(vmcs12)) ||
843 CC(!page_address_valid(vcpu, vmcs12->pml_address)))
55d2375e
SC
844 return -EINVAL;
845
846 return 0;
847}
848
849static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
850 struct vmcs12 *vmcs12)
851{
5497b955
SC
852 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
853 !nested_cpu_has_ept(vmcs12)))
55d2375e
SC
854 return -EINVAL;
855 return 0;
856}
857
858static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
859 struct vmcs12 *vmcs12)
860{
5497b955
SC
861 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
862 !nested_cpu_has_ept(vmcs12)))
55d2375e
SC
863 return -EINVAL;
864 return 0;
865}
866
867static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
868 struct vmcs12 *vmcs12)
869{
870 if (!nested_cpu_has_shadow_vmcs(vmcs12))
871 return 0;
872
5497b955
SC
873 if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) ||
874 CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap)))
55d2375e
SC
875 return -EINVAL;
876
877 return 0;
878}
879
880static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
881 struct vmx_msr_entry *e)
882{
883 /* x2APIC MSR accesses are not allowed */
5497b955 884 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8))
55d2375e 885 return -EINVAL;
5497b955
SC
886 if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */
887 CC(e->index == MSR_IA32_UCODE_REV))
55d2375e 888 return -EINVAL;
5497b955 889 if (CC(e->reserved != 0))
55d2375e
SC
890 return -EINVAL;
891 return 0;
892}
893
894static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
895 struct vmx_msr_entry *e)
896{
5497b955
SC
897 if (CC(e->index == MSR_FS_BASE) ||
898 CC(e->index == MSR_GS_BASE) ||
899 CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */
55d2375e
SC
900 nested_vmx_msr_check_common(vcpu, e))
901 return -EINVAL;
902 return 0;
903}
904
905static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
906 struct vmx_msr_entry *e)
907{
5497b955 908 if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */
55d2375e
SC
909 nested_vmx_msr_check_common(vcpu, e))
910 return -EINVAL;
911 return 0;
912}
913
f0b5105a
MO
914static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu)
915{
916 struct vcpu_vmx *vmx = to_vmx(vcpu);
917 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
918 vmx->nested.msrs.misc_high);
919
920 return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER;
921}
922
55d2375e
SC
923/*
924 * Load guest's/host's msr at nested entry/exit.
925 * return 0 for success, entry index for failure.
f0b5105a
MO
926 *
927 * One of the failure modes for MSR load/store is when a list exceeds the
928 * virtual hardware's capacity. To maintain compatibility with hardware inasmuch
929 * as possible, process all valid entries before failing rather than precheck
930 * for a capacity violation.
55d2375e
SC
931 */
932static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
933{
934 u32 i;
935 struct vmx_msr_entry e;
f0b5105a 936 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
55d2375e 937
55d2375e 938 for (i = 0; i < count; i++) {
f0b5105a
MO
939 if (unlikely(i >= max_msr_list_size))
940 goto fail;
941
55d2375e
SC
942 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
943 &e, sizeof(e))) {
944 pr_debug_ratelimited(
945 "%s cannot read MSR entry (%u, 0x%08llx)\n",
946 __func__, i, gpa + i * sizeof(e));
947 goto fail;
948 }
949 if (nested_vmx_load_msr_check(vcpu, &e)) {
950 pr_debug_ratelimited(
951 "%s check failed (%u, 0x%x, 0x%x)\n",
952 __func__, i, e.index, e.reserved);
953 goto fail;
954 }
f20935d8 955 if (kvm_set_msr(vcpu, e.index, e.value)) {
55d2375e
SC
956 pr_debug_ratelimited(
957 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
958 __func__, i, e.index, e.value);
959 goto fail;
960 }
961 }
962 return 0;
963fail:
68cda40d 964 /* Note, max_msr_list_size is at most 4096, i.e. this can't wrap. */
55d2375e
SC
965 return i + 1;
966}
967
662f1d1d
AL
968static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
969 u32 msr_index,
970 u64 *data)
971{
972 struct vcpu_vmx *vmx = to_vmx(vcpu);
973
974 /*
975 * If the L0 hypervisor stored a more accurate value for the TSC that
976 * does not include the time taken for emulation of the L2->L1
977 * VM-exit in L0, use the more accurate value.
978 */
979 if (msr_index == MSR_IA32_TSC) {
a128a934
SC
980 int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest,
981 MSR_IA32_TSC);
662f1d1d 982
a128a934
SC
983 if (i >= 0) {
984 u64 val = vmx->msr_autostore.guest.val[i].value;
662f1d1d
AL
985
986 *data = kvm_read_l1_tsc(vcpu, val);
987 return true;
988 }
989 }
990
991 if (kvm_get_msr(vcpu, msr_index, data)) {
992 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__,
993 msr_index);
994 return false;
995 }
996 return true;
997}
998
365d3d55
AL
999static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i,
1000 struct vmx_msr_entry *e)
1001{
1002 if (kvm_vcpu_read_guest(vcpu,
1003 gpa + i * sizeof(*e),
1004 e, 2 * sizeof(u32))) {
1005 pr_debug_ratelimited(
1006 "%s cannot read MSR entry (%u, 0x%08llx)\n",
1007 __func__, i, gpa + i * sizeof(*e));
1008 return false;
1009 }
1010 if (nested_vmx_store_msr_check(vcpu, e)) {
1011 pr_debug_ratelimited(
1012 "%s check failed (%u, 0x%x, 0x%x)\n",
1013 __func__, i, e->index, e->reserved);
1014 return false;
1015 }
1016 return true;
1017}
1018
55d2375e
SC
1019static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
1020{
f20935d8 1021 u64 data;
55d2375e
SC
1022 u32 i;
1023 struct vmx_msr_entry e;
f0b5105a 1024 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
55d2375e
SC
1025
1026 for (i = 0; i < count; i++) {
f0b5105a
MO
1027 if (unlikely(i >= max_msr_list_size))
1028 return -EINVAL;
1029
365d3d55 1030 if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
55d2375e 1031 return -EINVAL;
365d3d55 1032
662f1d1d 1033 if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data))
55d2375e 1034 return -EINVAL;
662f1d1d 1035
55d2375e
SC
1036 if (kvm_vcpu_write_guest(vcpu,
1037 gpa + i * sizeof(e) +
1038 offsetof(struct vmx_msr_entry, value),
f20935d8 1039 &data, sizeof(data))) {
55d2375e
SC
1040 pr_debug_ratelimited(
1041 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
f20935d8 1042 __func__, i, e.index, data);
55d2375e
SC
1043 return -EINVAL;
1044 }
1045 }
1046 return 0;
1047}
1048
662f1d1d
AL
1049static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index)
1050{
1051 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1052 u32 count = vmcs12->vm_exit_msr_store_count;
1053 u64 gpa = vmcs12->vm_exit_msr_store_addr;
1054 struct vmx_msr_entry e;
1055 u32 i;
1056
1057 for (i = 0; i < count; i++) {
1058 if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
1059 return false;
1060
1061 if (e.index == msr_index)
1062 return true;
1063 }
1064 return false;
1065}
1066
1067static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
1068 u32 msr_index)
1069{
1070 struct vcpu_vmx *vmx = to_vmx(vcpu);
1071 struct vmx_msrs *autostore = &vmx->msr_autostore.guest;
1072 bool in_vmcs12_store_list;
a128a934 1073 int msr_autostore_slot;
662f1d1d
AL
1074 bool in_autostore_list;
1075 int last;
1076
a128a934
SC
1077 msr_autostore_slot = vmx_find_loadstore_msr_slot(autostore, msr_index);
1078 in_autostore_list = msr_autostore_slot >= 0;
662f1d1d
AL
1079 in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index);
1080
1081 if (in_vmcs12_store_list && !in_autostore_list) {
ce833b23 1082 if (autostore->nr == MAX_NR_LOADSTORE_MSRS) {
662f1d1d
AL
1083 /*
1084 * Emulated VMEntry does not fail here. Instead a less
1085 * accurate value will be returned by
1086 * nested_vmx_get_vmexit_msr_value() using kvm_get_msr()
1087 * instead of reading the value from the vmcs02 VMExit
1088 * MSR-store area.
1089 */
1090 pr_warn_ratelimited(
1091 "Not enough msr entries in msr_autostore. Can't add msr %x\n",
1092 msr_index);
1093 return;
1094 }
1095 last = autostore->nr++;
1096 autostore->val[last].index = msr_index;
1097 } else if (!in_vmcs12_store_list && in_autostore_list) {
1098 last = --autostore->nr;
a128a934 1099 autostore->val[msr_autostore_slot] = autostore->val[last];
662f1d1d
AL
1100 }
1101}
1102
55d2375e 1103/*
ea79a750
SC
1104 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are
1105 * emulating VM-Entry into a guest with EPT enabled. On failure, the expected
1106 * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to
1107 * @entry_failure_code.
55d2375e 1108 */
0f857223
ML
1109static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
1110 bool nested_ept, bool reload_pdptrs,
68cda40d 1111 enum vm_entry_failure_code *entry_failure_code)
55d2375e 1112{
636e8b73 1113 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) {
0cc69204
SC
1114 *entry_failure_code = ENTRY_FAIL_DEFAULT;
1115 return -EINVAL;
1116 }
55d2375e 1117
0cc69204
SC
1118 /*
1119 * If PAE paging and EPT are both on, CR3 is not used by the CPU and
1120 * must not be dereferenced.
1121 */
0f857223 1122 if (reload_pdptrs && !nested_ept && is_pae_paging(vcpu) &&
2df4a5eb 1123 CC(!load_pdptrs(vcpu, cr3))) {
bcb72d06
SC
1124 *entry_failure_code = ENTRY_FAIL_PDPTE;
1125 return -EINVAL;
55d2375e
SC
1126 }
1127
55d2375e 1128 vcpu->arch.cr3 = cr3;
3883bc9d 1129 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
55d2375e 1130
616007c8 1131 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
c9060662 1132 kvm_init_mmu(vcpu);
55d2375e 1133
3cffc89d
PB
1134 if (!nested_ept)
1135 kvm_mmu_new_pgd(vcpu, cr3);
1136
55d2375e
SC
1137 return 0;
1138}
1139
1140/*
1141 * Returns if KVM is able to config CPU to tag TLB entries
1142 * populated by L2 differently than TLB entries populated
1143 * by L1.
1144 *
992edeae
LA
1145 * If L0 uses EPT, L1 and L2 run with different EPTP because
1146 * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries
1147 * are tagged with different EPTP.
55d2375e
SC
1148 *
1149 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
1150 * with different VPID (L1 entries are tagged with vmx->vpid
1151 * while L2 entries are tagged with vmx->nested.vpid02).
1152 */
1153static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
1154{
1155 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1156
992edeae 1157 return enable_ept ||
55d2375e
SC
1158 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
1159}
1160
50b265a4
SC
1161static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
1162 struct vmcs12 *vmcs12,
1163 bool is_vmenter)
1164{
1165 struct vcpu_vmx *vmx = to_vmx(vcpu);
1166
1167 /*
50a41796
SC
1168 * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
1169 * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a
1170 * full TLB flush from the guest's perspective. This is required even
1171 * if VPID is disabled in the host as KVM may need to synchronize the
1172 * MMU in response to the guest TLB flush.
1173 *
1174 * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use.
1175 * EPT is a special snowflake, as guest-physical mappings aren't
1176 * flushed on VPID invalidations, including VM-Enter or VM-Exit with
1177 * VPID disabled. As a result, KVM _never_ needs to sync nEPT
1178 * entries on VM-Enter because L1 can't rely on VM-Enter to flush
1179 * those mappings.
50b265a4 1180 */
50a41796
SC
1181 if (!nested_cpu_has_vpid(vmcs12)) {
1182 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
50b265a4 1183 return;
50a41796
SC
1184 }
1185
1186 /* L2 should never have a VPID if VPID is disabled. */
1187 WARN_ON(!enable_vpid);
50b265a4
SC
1188
1189 /*
712494de
SC
1190 * VPID is enabled and in use by vmcs12. If vpid12 is changing, then
1191 * emulate a guest TLB flush as KVM does not track vpid12 history nor
1192 * is the VPID incorporated into the MMU context. I.e. KVM must assume
1193 * that the new vpid12 has never been used and thus represents a new
1194 * guest ASID that cannot have entries in the TLB.
50b265a4 1195 */
712494de 1196 if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
50b265a4 1197 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
712494de
SC
1198 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1199 return;
50b265a4 1200 }
712494de
SC
1201
1202 /*
1203 * If VPID is enabled, used by vmc12, and vpid12 is not changing but
1204 * does not have a unique TLB tag (ASID), i.e. EPT is disabled and
1205 * KVM was unable to allocate a VPID for L2, flush the current context
1206 * as the effective ASID is common to both L1 and L2.
1207 */
1208 if (!nested_has_guest_tlb_tag(vcpu))
1209 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
50b265a4
SC
1210}
1211
55d2375e
SC
1212static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
1213{
1214 superset &= mask;
1215 subset &= mask;
1216
1217 return (superset | subset) == superset;
1218}
1219
1220static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
1221{
1222 const u64 feature_and_reserved =
1223 /* feature (except bit 48; see below) */
1224 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
1225 /* reserved */
1226 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
1227 u64 vmx_basic = vmx->nested.msrs.basic;
1228
1229 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
1230 return -EINVAL;
1231
1232 /*
1233 * KVM does not emulate a version of VMX that constrains physical
1234 * addresses of VMX structures (e.g. VMCS) to 32-bits.
1235 */
1236 if (data & BIT_ULL(48))
1237 return -EINVAL;
1238
1239 if (vmx_basic_vmcs_revision_id(vmx_basic) !=
1240 vmx_basic_vmcs_revision_id(data))
1241 return -EINVAL;
1242
1243 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
1244 return -EINVAL;
1245
1246 vmx->nested.msrs.basic = data;
1247 return 0;
1248}
1249
1250static int
1251vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1252{
1253 u64 supported;
1254 u32 *lowp, *highp;
1255
1256 switch (msr_index) {
1257 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1258 lowp = &vmx->nested.msrs.pinbased_ctls_low;
1259 highp = &vmx->nested.msrs.pinbased_ctls_high;
1260 break;
1261 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1262 lowp = &vmx->nested.msrs.procbased_ctls_low;
1263 highp = &vmx->nested.msrs.procbased_ctls_high;
1264 break;
1265 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1266 lowp = &vmx->nested.msrs.exit_ctls_low;
1267 highp = &vmx->nested.msrs.exit_ctls_high;
1268 break;
1269 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1270 lowp = &vmx->nested.msrs.entry_ctls_low;
1271 highp = &vmx->nested.msrs.entry_ctls_high;
1272 break;
1273 case MSR_IA32_VMX_PROCBASED_CTLS2:
1274 lowp = &vmx->nested.msrs.secondary_ctls_low;
1275 highp = &vmx->nested.msrs.secondary_ctls_high;
1276 break;
1277 default:
1278 BUG();
1279 }
1280
1281 supported = vmx_control_msr(*lowp, *highp);
1282
1283 /* Check must-be-1 bits are still 1. */
1284 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
1285 return -EINVAL;
1286
1287 /* Check must-be-0 bits are still 0. */
1288 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
1289 return -EINVAL;
1290
1291 *lowp = data;
1292 *highp = data >> 32;
1293 return 0;
1294}
1295
1296static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
1297{
1298 const u64 feature_and_reserved_bits =
1299 /* feature */
1300 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
1301 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
1302 /* reserved */
1303 GENMASK_ULL(13, 9) | BIT_ULL(31);
1304 u64 vmx_misc;
1305
1306 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
1307 vmx->nested.msrs.misc_high);
1308
1309 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
1310 return -EINVAL;
1311
1312 if ((vmx->nested.msrs.pinbased_ctls_high &
1313 PIN_BASED_VMX_PREEMPTION_TIMER) &&
1314 vmx_misc_preemption_timer_rate(data) !=
1315 vmx_misc_preemption_timer_rate(vmx_misc))
1316 return -EINVAL;
1317
1318 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
1319 return -EINVAL;
1320
1321 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
1322 return -EINVAL;
1323
1324 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
1325 return -EINVAL;
1326
1327 vmx->nested.msrs.misc_low = data;
1328 vmx->nested.msrs.misc_high = data >> 32;
1329
55d2375e
SC
1330 return 0;
1331}
1332
1333static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
1334{
1335 u64 vmx_ept_vpid_cap;
1336
1337 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
1338 vmx->nested.msrs.vpid_caps);
1339
1340 /* Every bit is either reserved or a feature bit. */
1341 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
1342 return -EINVAL;
1343
1344 vmx->nested.msrs.ept_caps = data;
1345 vmx->nested.msrs.vpid_caps = data >> 32;
1346 return 0;
1347}
1348
1349static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1350{
1351 u64 *msr;
1352
1353 switch (msr_index) {
1354 case MSR_IA32_VMX_CR0_FIXED0:
1355 msr = &vmx->nested.msrs.cr0_fixed0;
1356 break;
1357 case MSR_IA32_VMX_CR4_FIXED0:
1358 msr = &vmx->nested.msrs.cr4_fixed0;
1359 break;
1360 default:
1361 BUG();
1362 }
1363
1364 /*
1365 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
1366 * must be 1 in the restored value.
1367 */
1368 if (!is_bitwise_subset(data, *msr, -1ULL))
1369 return -EINVAL;
1370
1371 *msr = data;
1372 return 0;
1373}
1374
1375/*
1376 * Called when userspace is restoring VMX MSRs.
1377 *
1378 * Returns 0 on success, non-0 otherwise.
1379 */
1380int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1381{
1382 struct vcpu_vmx *vmx = to_vmx(vcpu);
1383
1384 /*
1385 * Don't allow changes to the VMX capability MSRs while the vCPU
1386 * is in VMX operation.
1387 */
1388 if (vmx->nested.vmxon)
1389 return -EBUSY;
1390
1391 switch (msr_index) {
1392 case MSR_IA32_VMX_BASIC:
1393 return vmx_restore_vmx_basic(vmx, data);
1394 case MSR_IA32_VMX_PINBASED_CTLS:
1395 case MSR_IA32_VMX_PROCBASED_CTLS:
1396 case MSR_IA32_VMX_EXIT_CTLS:
1397 case MSR_IA32_VMX_ENTRY_CTLS:
1398 /*
1399 * The "non-true" VMX capability MSRs are generated from the
1400 * "true" MSRs, so we do not support restoring them directly.
1401 *
1402 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
1403 * should restore the "true" MSRs with the must-be-1 bits
1404 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
1405 * DEFAULT SETTINGS".
1406 */
1407 return -EINVAL;
1408 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1409 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1410 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1411 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1412 case MSR_IA32_VMX_PROCBASED_CTLS2:
1413 return vmx_restore_control_msr(vmx, msr_index, data);
1414 case MSR_IA32_VMX_MISC:
1415 return vmx_restore_vmx_misc(vmx, data);
1416 case MSR_IA32_VMX_CR0_FIXED0:
1417 case MSR_IA32_VMX_CR4_FIXED0:
1418 return vmx_restore_fixed0_msr(vmx, msr_index, data);
1419 case MSR_IA32_VMX_CR0_FIXED1:
1420 case MSR_IA32_VMX_CR4_FIXED1:
1421 /*
1422 * These MSRs are generated based on the vCPU's CPUID, so we
1423 * do not support restoring them directly.
1424 */
1425 return -EINVAL;
1426 case MSR_IA32_VMX_EPT_VPID_CAP:
1427 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
1428 case MSR_IA32_VMX_VMCS_ENUM:
1429 vmx->nested.msrs.vmcs_enum = data;
1430 return 0;
e8a70bd4
PB
1431 case MSR_IA32_VMX_VMFUNC:
1432 if (data & ~vmx->nested.msrs.vmfunc_controls)
1433 return -EINVAL;
1434 vmx->nested.msrs.vmfunc_controls = data;
1435 return 0;
55d2375e
SC
1436 default:
1437 /*
1438 * The rest of the VMX capability MSRs do not support restore.
1439 */
1440 return -EINVAL;
1441 }
1442}
1443
1444/* Returns 0 on success, non-0 otherwise. */
1445int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
1446{
1447 switch (msr_index) {
1448 case MSR_IA32_VMX_BASIC:
1449 *pdata = msrs->basic;
1450 break;
1451 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1452 case MSR_IA32_VMX_PINBASED_CTLS:
1453 *pdata = vmx_control_msr(
1454 msrs->pinbased_ctls_low,
1455 msrs->pinbased_ctls_high);
1456 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
1457 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1458 break;
1459 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1460 case MSR_IA32_VMX_PROCBASED_CTLS:
1461 *pdata = vmx_control_msr(
1462 msrs->procbased_ctls_low,
1463 msrs->procbased_ctls_high);
1464 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
1465 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1466 break;
1467 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1468 case MSR_IA32_VMX_EXIT_CTLS:
1469 *pdata = vmx_control_msr(
1470 msrs->exit_ctls_low,
1471 msrs->exit_ctls_high);
1472 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
1473 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
1474 break;
1475 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1476 case MSR_IA32_VMX_ENTRY_CTLS:
1477 *pdata = vmx_control_msr(
1478 msrs->entry_ctls_low,
1479 msrs->entry_ctls_high);
1480 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
1481 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
1482 break;
1483 case MSR_IA32_VMX_MISC:
1484 *pdata = vmx_control_msr(
1485 msrs->misc_low,
1486 msrs->misc_high);
1487 break;
1488 case MSR_IA32_VMX_CR0_FIXED0:
1489 *pdata = msrs->cr0_fixed0;
1490 break;
1491 case MSR_IA32_VMX_CR0_FIXED1:
1492 *pdata = msrs->cr0_fixed1;
1493 break;
1494 case MSR_IA32_VMX_CR4_FIXED0:
1495 *pdata = msrs->cr4_fixed0;
1496 break;
1497 case MSR_IA32_VMX_CR4_FIXED1:
1498 *pdata = msrs->cr4_fixed1;
1499 break;
1500 case MSR_IA32_VMX_VMCS_ENUM:
1501 *pdata = msrs->vmcs_enum;
1502 break;
1503 case MSR_IA32_VMX_PROCBASED_CTLS2:
1504 *pdata = vmx_control_msr(
1505 msrs->secondary_ctls_low,
1506 msrs->secondary_ctls_high);
1507 break;
1508 case MSR_IA32_VMX_EPT_VPID_CAP:
1509 *pdata = msrs->ept_caps |
1510 ((u64)msrs->vpid_caps << 32);
1511 break;
1512 case MSR_IA32_VMX_VMFUNC:
1513 *pdata = msrs->vmfunc_controls;
1514 break;
1515 default:
1516 return 1;
1517 }
1518
1519 return 0;
1520}
1521
1522/*
fadcead0
SC
1523 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have
1524 * been modified by the L1 guest. Note, "writable" in this context means
1525 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of
1526 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only"
1527 * VM-exit information fields (which are actually writable if the vCPU is
1528 * configured to support "VMWRITE to any supported field in the VMCS").
55d2375e
SC
1529 */
1530static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
1531{
55d2375e 1532 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
fadcead0 1533 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1c6f0b47
SC
1534 struct shadow_vmcs_field field;
1535 unsigned long val;
fadcead0 1536 int i;
55d2375e 1537
88dddc11
PB
1538 if (WARN_ON(!shadow_vmcs))
1539 return;
1540
55d2375e
SC
1541 preempt_disable();
1542
1543 vmcs_load(shadow_vmcs);
1544
fadcead0
SC
1545 for (i = 0; i < max_shadow_read_write_fields; i++) {
1546 field = shadow_read_write_fields[i];
1c6f0b47
SC
1547 val = __vmcs_readl(field.encoding);
1548 vmcs12_write_any(vmcs12, field.encoding, field.offset, val);
55d2375e
SC
1549 }
1550
1551 vmcs_clear(shadow_vmcs);
1552 vmcs_load(vmx->loaded_vmcs->vmcs);
1553
1554 preempt_enable();
1555}
1556
1557static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
1558{
1c6f0b47 1559 const struct shadow_vmcs_field *fields[] = {
55d2375e
SC
1560 shadow_read_write_fields,
1561 shadow_read_only_fields
1562 };
1563 const int max_fields[] = {
1564 max_shadow_read_write_fields,
1565 max_shadow_read_only_fields
1566 };
55d2375e 1567 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1c6f0b47
SC
1568 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1569 struct shadow_vmcs_field field;
1570 unsigned long val;
1571 int i, q;
55d2375e 1572
88dddc11
PB
1573 if (WARN_ON(!shadow_vmcs))
1574 return;
1575
55d2375e
SC
1576 vmcs_load(shadow_vmcs);
1577
1578 for (q = 0; q < ARRAY_SIZE(fields); q++) {
1579 for (i = 0; i < max_fields[q]; i++) {
1580 field = fields[q][i];
1c6f0b47
SC
1581 val = vmcs12_read_any(vmcs12, field.encoding,
1582 field.offset);
1583 __vmcs_writel(field.encoding, val);
55d2375e
SC
1584 }
1585 }
1586
1587 vmcs_clear(shadow_vmcs);
1588 vmcs_load(vmx->loaded_vmcs->vmcs);
1589}
1590
d6bf71a1 1591static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields)
55d2375e
SC
1592{
1593 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1594 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1595
1596 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
1597 vmcs12->tpr_threshold = evmcs->tpr_threshold;
1598 vmcs12->guest_rip = evmcs->guest_rip;
1599
d6bf71a1 1600 if (unlikely(!(hv_clean_fields &
55d2375e
SC
1601 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
1602 vmcs12->guest_rsp = evmcs->guest_rsp;
1603 vmcs12->guest_rflags = evmcs->guest_rflags;
1604 vmcs12->guest_interruptibility_info =
1605 evmcs->guest_interruptibility_info;
1606 }
1607
d6bf71a1 1608 if (unlikely(!(hv_clean_fields &
55d2375e
SC
1609 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1610 vmcs12->cpu_based_vm_exec_control =
1611 evmcs->cpu_based_vm_exec_control;
1612 }
1613
d6bf71a1 1614 if (unlikely(!(hv_clean_fields &
f9bc5227 1615 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) {
55d2375e
SC
1616 vmcs12->exception_bitmap = evmcs->exception_bitmap;
1617 }
1618
d6bf71a1 1619 if (unlikely(!(hv_clean_fields &
55d2375e
SC
1620 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
1621 vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
1622 }
1623
d6bf71a1 1624 if (unlikely(!(hv_clean_fields &
55d2375e
SC
1625 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
1626 vmcs12->vm_entry_intr_info_field =
1627 evmcs->vm_entry_intr_info_field;
1628 vmcs12->vm_entry_exception_error_code =
1629 evmcs->vm_entry_exception_error_code;
1630 vmcs12->vm_entry_instruction_len =
1631 evmcs->vm_entry_instruction_len;
1632 }
1633
d6bf71a1 1634 if (unlikely(!(hv_clean_fields &
55d2375e
SC
1635 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1636 vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
1637 vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
1638 vmcs12->host_cr0 = evmcs->host_cr0;
1639 vmcs12->host_cr3 = evmcs->host_cr3;
1640 vmcs12->host_cr4 = evmcs->host_cr4;
1641 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
1642 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
1643 vmcs12->host_rip = evmcs->host_rip;
1644 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
1645 vmcs12->host_es_selector = evmcs->host_es_selector;
1646 vmcs12->host_cs_selector = evmcs->host_cs_selector;
1647 vmcs12->host_ss_selector = evmcs->host_ss_selector;
1648 vmcs12->host_ds_selector = evmcs->host_ds_selector;
1649 vmcs12->host_fs_selector = evmcs->host_fs_selector;
1650 vmcs12->host_gs_selector = evmcs->host_gs_selector;
1651 vmcs12->host_tr_selector = evmcs->host_tr_selector;
1652 }
1653
d6bf71a1 1654 if (unlikely(!(hv_clean_fields &
f9bc5227 1655 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) {
55d2375e
SC
1656 vmcs12->pin_based_vm_exec_control =
1657 evmcs->pin_based_vm_exec_control;
1658 vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
1659 vmcs12->secondary_vm_exec_control =
1660 evmcs->secondary_vm_exec_control;
1661 }
1662
d6bf71a1 1663 if (unlikely(!(hv_clean_fields &
55d2375e
SC
1664 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
1665 vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
1666 vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
1667 }
1668
d6bf71a1 1669 if (unlikely(!(hv_clean_fields &
55d2375e
SC
1670 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
1671 vmcs12->msr_bitmap = evmcs->msr_bitmap;
1672 }
1673
d6bf71a1 1674 if (unlikely(!(hv_clean_fields &
55d2375e
SC
1675 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
1676 vmcs12->guest_es_base = evmcs->guest_es_base;
1677 vmcs12->guest_cs_base = evmcs->guest_cs_base;
1678 vmcs12->guest_ss_base = evmcs->guest_ss_base;
1679 vmcs12->guest_ds_base = evmcs->guest_ds_base;
1680 vmcs12->guest_fs_base = evmcs->guest_fs_base;
1681 vmcs12->guest_gs_base = evmcs->guest_gs_base;
1682 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
1683 vmcs12->guest_tr_base = evmcs->guest_tr_base;
1684 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
1685 vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
1686 vmcs12->guest_es_limit = evmcs->guest_es_limit;
1687 vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
1688 vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
1689 vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
1690 vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
1691 vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
1692 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
1693 vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
1694 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
1695 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
1696 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
1697 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
1698 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
1699 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
1700 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
1701 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
1702 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
1703 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
1704 vmcs12->guest_es_selector = evmcs->guest_es_selector;
1705 vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
1706 vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
1707 vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
1708 vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
1709 vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
1710 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
1711 vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
1712 }
1713
d6bf71a1 1714 if (unlikely(!(hv_clean_fields &
55d2375e
SC
1715 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
1716 vmcs12->tsc_offset = evmcs->tsc_offset;
1717 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
1718 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
1719 }
1720
d6bf71a1 1721 if (unlikely(!(hv_clean_fields &
55d2375e
SC
1722 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
1723 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
1724 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
1725 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
1726 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
1727 vmcs12->guest_cr0 = evmcs->guest_cr0;
1728 vmcs12->guest_cr3 = evmcs->guest_cr3;
1729 vmcs12->guest_cr4 = evmcs->guest_cr4;
1730 vmcs12->guest_dr7 = evmcs->guest_dr7;
1731 }
1732
d6bf71a1 1733 if (unlikely(!(hv_clean_fields &
55d2375e
SC
1734 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
1735 vmcs12->host_fs_base = evmcs->host_fs_base;
1736 vmcs12->host_gs_base = evmcs->host_gs_base;
1737 vmcs12->host_tr_base = evmcs->host_tr_base;
1738 vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
1739 vmcs12->host_idtr_base = evmcs->host_idtr_base;
1740 vmcs12->host_rsp = evmcs->host_rsp;
1741 }
1742
d6bf71a1 1743 if (unlikely(!(hv_clean_fields &
55d2375e
SC
1744 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
1745 vmcs12->ept_pointer = evmcs->ept_pointer;
1746 vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
1747 }
1748
d6bf71a1 1749 if (unlikely(!(hv_clean_fields &
55d2375e
SC
1750 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
1751 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
1752 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
1753 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
1754 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
1755 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
1756 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
1757 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
1758 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
1759 vmcs12->guest_pending_dbg_exceptions =
1760 evmcs->guest_pending_dbg_exceptions;
1761 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
1762 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
1763 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
1764 vmcs12->guest_activity_state = evmcs->guest_activity_state;
1765 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
1766 }
1767
1768 /*
1769 * Not used?
1770 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1771 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1772 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
55d2375e
SC
1773 * vmcs12->page_fault_error_code_mask =
1774 * evmcs->page_fault_error_code_mask;
1775 * vmcs12->page_fault_error_code_match =
1776 * evmcs->page_fault_error_code_match;
1777 * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1778 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1779 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1780 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1781 */
1782
1783 /*
1784 * Read only fields:
1785 * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1786 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1787 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1788 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1789 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1790 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1791 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1792 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1793 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1794 * vmcs12->exit_qualification = evmcs->exit_qualification;
1795 * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1796 *
1797 * Not present in struct vmcs12:
1798 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1799 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1800 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1801 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1802 */
1803
25641caf 1804 return;
55d2375e
SC
1805}
1806
25641caf 1807static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
55d2375e
SC
1808{
1809 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1810 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1811
1812 /*
1813 * Should not be changed by KVM:
1814 *
1815 * evmcs->host_es_selector = vmcs12->host_es_selector;
1816 * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1817 * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1818 * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1819 * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1820 * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1821 * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1822 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1823 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1824 * evmcs->host_cr0 = vmcs12->host_cr0;
1825 * evmcs->host_cr3 = vmcs12->host_cr3;
1826 * evmcs->host_cr4 = vmcs12->host_cr4;
1827 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1828 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1829 * evmcs->host_rip = vmcs12->host_rip;
1830 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1831 * evmcs->host_fs_base = vmcs12->host_fs_base;
1832 * evmcs->host_gs_base = vmcs12->host_gs_base;
1833 * evmcs->host_tr_base = vmcs12->host_tr_base;
1834 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1835 * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1836 * evmcs->host_rsp = vmcs12->host_rsp;
3731905e 1837 * sync_vmcs02_to_vmcs12() doesn't read these:
55d2375e
SC
1838 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1839 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1840 * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1841 * evmcs->ept_pointer = vmcs12->ept_pointer;
1842 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1843 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1844 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1845 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
55d2375e
SC
1846 * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1847 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1848 * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1849 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1850 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1851 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1852 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1853 * evmcs->page_fault_error_code_mask =
1854 * vmcs12->page_fault_error_code_mask;
1855 * evmcs->page_fault_error_code_match =
1856 * vmcs12->page_fault_error_code_match;
1857 * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1858 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1859 * evmcs->tsc_offset = vmcs12->tsc_offset;
1860 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1861 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1862 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1863 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1864 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1865 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1866 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1867 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1868 *
1869 * Not present in struct vmcs12:
1870 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1871 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1872 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1873 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1874 */
1875
1876 evmcs->guest_es_selector = vmcs12->guest_es_selector;
1877 evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
1878 evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
1879 evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
1880 evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
1881 evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
1882 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
1883 evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
1884
1885 evmcs->guest_es_limit = vmcs12->guest_es_limit;
1886 evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
1887 evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
1888 evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
1889 evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
1890 evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
1891 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
1892 evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
1893 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
1894 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
1895
1896 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
1897 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
1898 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
1899 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
1900 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
1901 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
1902 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
1903 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
1904
1905 evmcs->guest_es_base = vmcs12->guest_es_base;
1906 evmcs->guest_cs_base = vmcs12->guest_cs_base;
1907 evmcs->guest_ss_base = vmcs12->guest_ss_base;
1908 evmcs->guest_ds_base = vmcs12->guest_ds_base;
1909 evmcs->guest_fs_base = vmcs12->guest_fs_base;
1910 evmcs->guest_gs_base = vmcs12->guest_gs_base;
1911 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
1912 evmcs->guest_tr_base = vmcs12->guest_tr_base;
1913 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
1914 evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
1915
1916 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
1917 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
1918
1919 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
1920 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
1921 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
1922 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
1923
1924 evmcs->guest_pending_dbg_exceptions =
1925 vmcs12->guest_pending_dbg_exceptions;
1926 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
1927 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
1928
1929 evmcs->guest_activity_state = vmcs12->guest_activity_state;
1930 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
1931
1932 evmcs->guest_cr0 = vmcs12->guest_cr0;
1933 evmcs->guest_cr3 = vmcs12->guest_cr3;
1934 evmcs->guest_cr4 = vmcs12->guest_cr4;
1935 evmcs->guest_dr7 = vmcs12->guest_dr7;
1936
1937 evmcs->guest_physical_address = vmcs12->guest_physical_address;
1938
1939 evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
1940 evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
1941 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
1942 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
1943 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
1944 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
1945 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
1946 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
1947
1948 evmcs->exit_qualification = vmcs12->exit_qualification;
1949
1950 evmcs->guest_linear_address = vmcs12->guest_linear_address;
1951 evmcs->guest_rsp = vmcs12->guest_rsp;
1952 evmcs->guest_rflags = vmcs12->guest_rflags;
1953
1954 evmcs->guest_interruptibility_info =
1955 vmcs12->guest_interruptibility_info;
1956 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
1957 evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
1958 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
1959 evmcs->vm_entry_exception_error_code =
1960 vmcs12->vm_entry_exception_error_code;
1961 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
1962
1963 evmcs->guest_rip = vmcs12->guest_rip;
1964
1965 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
1966
25641caf 1967 return;
55d2375e
SC
1968}
1969
1970/*
1971 * This is an equivalent of the nested hypervisor executing the vmptrld
1972 * instruction.
1973 */
b6a0653a
VK
1974static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
1975 struct kvm_vcpu *vcpu, bool from_launch)
55d2375e
SC
1976{
1977 struct vcpu_vmx *vmx = to_vmx(vcpu);
a21a39c2 1978 bool evmcs_gpa_changed = false;
11e34914 1979 u64 evmcs_gpa;
55d2375e
SC
1980
1981 if (likely(!vmx->nested.enlightened_vmcs_enabled))
b6a0653a 1982 return EVMPTRLD_DISABLED;
55d2375e 1983
02761716
VK
1984 if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa)) {
1985 nested_release_evmcs(vcpu);
b6a0653a 1986 return EVMPTRLD_DISABLED;
02761716 1987 }
55d2375e 1988
1e9dfbd7 1989 if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
64c78508 1990 vmx->nested.current_vmptr = INVALID_GPA;
55d2375e
SC
1991
1992 nested_release_evmcs(vcpu);
1993
11e34914 1994 if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa),
dee9c049 1995 &vmx->nested.hv_evmcs_map))
b6a0653a 1996 return EVMPTRLD_ERROR;
55d2375e 1997
dee9c049 1998 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva;
55d2375e
SC
1999
2000 /*
2001 * Currently, KVM only supports eVMCS version 1
2002 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
2003 * value to first u32 field of eVMCS which should specify eVMCS
2004 * VersionNumber.
2005 *
2006 * Guest should be aware of supported eVMCS versions by host by
2007 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
2008 * expected to set this CPUID leaf according to the value
2009 * returned in vmcs_version from nested_enable_evmcs().
2010 *
2011 * However, it turns out that Microsoft Hyper-V fails to comply
2012 * to their own invented interface: When Hyper-V use eVMCS, it
2013 * just sets first u32 field of eVMCS to revision_id specified
2014 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
2015 * which is one of the supported versions specified in
2016 * CPUID.0x4000000A.EAX[0:15].
2017 *
2018 * To overcome Hyper-V bug, we accept here either a supported
2019 * eVMCS version or VMCS12 revision_id as valid values for first
2020 * u32 field of eVMCS.
2021 */
2022 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
2023 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
2024 nested_release_evmcs(vcpu);
b6a0653a 2025 return EVMPTRLD_VMFAIL;
55d2375e
SC
2026 }
2027
11e34914 2028 vmx->nested.hv_evmcs_vmptr = evmcs_gpa;
55d2375e 2029
a21a39c2 2030 evmcs_gpa_changed = true;
55d2375e
SC
2031 /*
2032 * Unlike normal vmcs12, enlightened vmcs12 is not fully
2033 * reloaded from guest's memory (read only fields, fields not
2034 * present in struct hv_enlightened_vmcs, ...). Make sure there
2035 * are no leftovers.
2036 */
2037 if (from_launch) {
2038 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2039 memset(vmcs12, 0, sizeof(*vmcs12));
2040 vmcs12->hdr.revision_id = VMCS12_REVISION;
2041 }
2042
2043 }
a21a39c2
VK
2044
2045 /*
ffdbd50d 2046 * Clean fields data can't be used on VMLAUNCH and when we switch
a21a39c2
VK
2047 * between different L2 guests as KVM keeps a single VMCS12 per L1.
2048 */
ed2a4800 2049 if (from_launch || evmcs_gpa_changed) {
a21a39c2
VK
2050 vmx->nested.hv_evmcs->hv_clean_fields &=
2051 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
2052
ed2a4800
VK
2053 vmx->nested.force_msr_bitmap_recalc = true;
2054 }
2055
b6a0653a 2056 return EVMPTRLD_SUCCEEDED;
55d2375e
SC
2057}
2058
3731905e 2059void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
55d2375e
SC
2060{
2061 struct vcpu_vmx *vmx = to_vmx(vcpu);
2062
dc313385 2063 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
55d2375e 2064 copy_vmcs12_to_enlightened(vmx);
dc313385 2065 else
55d2375e 2066 copy_vmcs12_to_shadow(vmx);
55d2375e 2067
3731905e 2068 vmx->nested.need_vmcs12_to_shadow_sync = false;
55d2375e
SC
2069}
2070
2071static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
2072{
2073 struct vcpu_vmx *vmx =
2074 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
2075
2076 vmx->nested.preemption_timer_expired = true;
2077 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
2078 kvm_vcpu_kick(&vmx->vcpu);
2079
2080 return HRTIMER_NORESTART;
2081}
2082
850448f3
PS
2083static u64 vmx_calc_preemption_timer_value(struct kvm_vcpu *vcpu)
2084{
2085 struct vcpu_vmx *vmx = to_vmx(vcpu);
2086 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
850448f3
PS
2087
2088 u64 l1_scaled_tsc = kvm_read_l1_tsc(vcpu, rdtsc()) >>
2089 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
2090
2091 if (!vmx->nested.has_preemption_timer_deadline) {
8d7fbf01
MS
2092 vmx->nested.preemption_timer_deadline =
2093 vmcs12->vmx_preemption_timer_value + l1_scaled_tsc;
850448f3 2094 vmx->nested.has_preemption_timer_deadline = true;
8d7fbf01
MS
2095 }
2096 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc;
850448f3
PS
2097}
2098
2099static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu,
2100 u64 preemption_timeout)
55d2375e 2101{
55d2375e
SC
2102 struct vcpu_vmx *vmx = to_vmx(vcpu);
2103
2104 /*
2105 * A timer value of zero is architecturally guaranteed to cause
2106 * a VMExit prior to executing any instructions in the guest.
2107 */
2108 if (preemption_timeout == 0) {
2109 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
2110 return;
2111 }
2112
2113 if (vcpu->arch.virtual_tsc_khz == 0)
2114 return;
2115
2116 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
2117 preemption_timeout *= 1000000;
2118 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
2119 hrtimer_start(&vmx->nested.preemption_timer,
ada0098d
JM
2120 ktime_add_ns(ktime_get(), preemption_timeout),
2121 HRTIMER_MODE_ABS_PINNED);
55d2375e
SC
2122}
2123
2124static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2125{
2126 if (vmx->nested.nested_run_pending &&
2127 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
2128 return vmcs12->guest_ia32_efer;
2129 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
2130 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
2131 else
2132 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
2133}
2134
2135static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
2136{
2137 /*
2138 * If vmcs02 hasn't been initialized, set the constant vmcs02 state
2139 * according to L0's settings (vmcs12 is irrelevant here). Host
2140 * fields that come from L0 and are not constant, e.g. HOST_CR3,
2141 * will be set as needed prior to VMLAUNCH/VMRESUME.
2142 */
2143 if (vmx->nested.vmcs02_initialized)
2144 return;
2145 vmx->nested.vmcs02_initialized = true;
2146
2147 /*
2148 * We don't care what the EPTP value is we just need to guarantee
2149 * it's valid so we don't get a false positive when doing early
2150 * consistency checks.
2151 */
2152 if (enable_ept && nested_early_check)
2a40b900
SC
2153 vmcs_write64(EPT_POINTER,
2154 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL));
55d2375e
SC
2155
2156 /* All VMFUNCs are currently emulated through L0 vmexits. */
2157 if (cpu_has_vmx_vmfunc())
2158 vmcs_write64(VM_FUNCTION_CONTROL, 0);
2159
2160 if (cpu_has_vmx_posted_intr())
2161 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
2162
2163 if (cpu_has_vmx_msr_bitmap())
2164 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
2165
4d6c9892 2166 /*
c3bb9a20
SC
2167 * PML is emulated for L2, but never enabled in hardware as the MMU
2168 * handles A/D emulation. Disabling PML for L2 also avoids having to
2169 * deal with filtering out L2 GPAs from the buffer.
4d6c9892
SC
2170 */
2171 if (enable_pml) {
c3bb9a20
SC
2172 vmcs_write64(PML_ADDRESS, 0);
2173 vmcs_write16(GUEST_PML_INDEX, -1);
4d6c9892 2174 }
55d2375e 2175
c538d57f 2176 if (cpu_has_vmx_encls_vmexit())
64c78508 2177 vmcs_write64(ENCLS_EXITING_BITMAP, INVALID_GPA);
55d2375e
SC
2178
2179 /*
2180 * Set the MSR load/store lists to match L0's settings. Only the
2181 * addresses are constant (for vmcs02), the counts can change based
2182 * on L2's behavior, e.g. switching to/from long mode.
2183 */
662f1d1d 2184 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val));
55d2375e
SC
2185 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
2186 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
2187
2188 vmx_set_constant_host_state(vmx);
2189}
2190
b1346ab2 2191static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,
55d2375e
SC
2192 struct vmcs12 *vmcs12)
2193{
2194 prepare_vmcs02_constant_state(vmx);
2195
64c78508 2196 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA);
55d2375e
SC
2197
2198 if (enable_vpid) {
2199 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
2200 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
2201 else
2202 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2203 }
2204}
2205
389ab252
SC
2206static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01,
2207 struct vmcs12 *vmcs12)
55d2375e 2208{
c3bb9a20 2209 u32 exec_control;
55d2375e
SC
2210 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
2211
1e9dfbd7 2212 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
b1346ab2 2213 prepare_vmcs02_early_rare(vmx, vmcs12);
55d2375e 2214
55d2375e
SC
2215 /*
2216 * PIN CONTROLS
2217 */
389ab252 2218 exec_control = __pin_controls_get(vmcs01);
804939ea
SC
2219 exec_control |= (vmcs12->pin_based_vm_exec_control &
2220 ~PIN_BASED_VMX_PREEMPTION_TIMER);
55d2375e
SC
2221
2222 /* Posted interrupts setting is only taken from vmcs12. */
f7782bb8
SC
2223 vmx->nested.pi_pending = false;
2224 if (nested_cpu_has_posted_intr(vmcs12))
55d2375e 2225 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
f7782bb8 2226 else
55d2375e 2227 exec_control &= ~PIN_BASED_POSTED_INTR;
3af80fec 2228 pin_controls_set(vmx, exec_control);
55d2375e
SC
2229
2230 /*
2231 * EXEC CONTROLS
2232 */
389ab252 2233 exec_control = __exec_controls_get(vmcs01); /* L0's desires */
9dadc2f9 2234 exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING;
4e2a0bc5 2235 exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING;
55d2375e
SC
2236 exec_control &= ~CPU_BASED_TPR_SHADOW;
2237 exec_control |= vmcs12->cpu_based_vm_exec_control;
2238
02d496cf 2239 vmx->nested.l1_tpr_threshold = -1;
ca2f5466 2240 if (exec_control & CPU_BASED_TPR_SHADOW)
55d2375e 2241 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
55d2375e 2242#ifdef CONFIG_X86_64
ca2f5466 2243 else
55d2375e
SC
2244 exec_control |= CPU_BASED_CR8_LOAD_EXITING |
2245 CPU_BASED_CR8_STORE_EXITING;
2246#endif
55d2375e
SC
2247
2248 /*
2249 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
2250 * for I/O port accesses.
2251 */
55d2375e 2252 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
de0286b7
SC
2253 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
2254
2255 /*
2256 * This bit will be computed in nested_get_vmcs12_pages, because
2257 * we do not have access to L1's MSR bitmap yet. For now, keep
2258 * the same bit as before, hoping to avoid multiple VMWRITEs that
2259 * only set/clear this bit.
2260 */
2261 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
2262 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS;
2263
3af80fec 2264 exec_controls_set(vmx, exec_control);
55d2375e
SC
2265
2266 /*
2267 * SECONDARY EXEC CONTROLS
2268 */
2269 if (cpu_has_secondary_exec_ctrls()) {
389ab252 2270 exec_control = __secondary_exec_controls_get(vmcs01);
55d2375e
SC
2271
2272 /* Take the following fields only from vmcs12 */
2273 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
389ab252 2274 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
55d2375e 2275 SECONDARY_EXEC_ENABLE_INVPCID |
7f3603b6 2276 SECONDARY_EXEC_ENABLE_RDTSCP |
55d2375e 2277 SECONDARY_EXEC_XSAVES |
e69e72fa 2278 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
55d2375e
SC
2279 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2280 SECONDARY_EXEC_APIC_REGISTER_VIRT |
d041b5ea 2281 SECONDARY_EXEC_ENABLE_VMFUNC |
389ab252
SC
2282 SECONDARY_EXEC_TSC_SCALING |
2283 SECONDARY_EXEC_DESC);
2284
55d2375e 2285 if (nested_cpu_has(vmcs12,
c3bb9a20
SC
2286 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
2287 exec_control |= vmcs12->secondary_vm_exec_control;
2288
2289 /* PML is emulated and never enabled in hardware for L2. */
2290 exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
55d2375e
SC
2291
2292 /* VMCS shadowing for L2 is emulated for now */
2293 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
2294
55d2375e 2295 /*
469debdb
SC
2296 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4()
2297 * will not have to rewrite the controls just for this bit.
55d2375e 2298 */
469debdb
SC
2299 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated() &&
2300 (vmcs12->guest_cr4 & X86_CR4_UMIP))
2301 exec_control |= SECONDARY_EXEC_DESC;
55d2375e 2302
55d2375e
SC
2303 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
2304 vmcs_write16(GUEST_INTR_STATUS,
2305 vmcs12->guest_intr_status);
55d2375e 2306
bddd82d1
KS
2307 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
2308 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
2309
72add915
SC
2310 if (exec_control & SECONDARY_EXEC_ENCLS_EXITING)
2311 vmx_write_encls_bitmap(&vmx->vcpu, vmcs12);
2312
3af80fec 2313 secondary_exec_controls_set(vmx, exec_control);
55d2375e
SC
2314 }
2315
2316 /*
2317 * ENTRY CONTROLS
2318 *
2319 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2320 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
2321 * on the related bits (if supported by the CPU) in the hope that
2322 * we can avoid VMWrites during vmx_set_efer().
2323 */
389ab252
SC
2324 exec_control = __vm_entry_controls_get(vmcs01);
2325 exec_control |= vmcs12->vm_entry_controls;
2326 exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER);
55d2375e
SC
2327 if (cpu_has_load_ia32_efer()) {
2328 if (guest_efer & EFER_LMA)
2329 exec_control |= VM_ENTRY_IA32E_MODE;
2330 if (guest_efer != host_efer)
2331 exec_control |= VM_ENTRY_LOAD_IA32_EFER;
2332 }
3af80fec 2333 vm_entry_controls_set(vmx, exec_control);
55d2375e
SC
2334
2335 /*
2336 * EXIT CONTROLS
2337 *
2338 * L2->L1 exit controls are emulated - the hardware exit is to L0 so
2339 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
2340 * bits may be modified by vmx_set_efer() in prepare_vmcs02().
2341 */
389ab252 2342 exec_control = __vm_exit_controls_get(vmcs01);
55d2375e
SC
2343 if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
2344 exec_control |= VM_EXIT_LOAD_IA32_EFER;
389ab252
SC
2345 else
2346 exec_control &= ~VM_EXIT_LOAD_IA32_EFER;
3af80fec 2347 vm_exit_controls_set(vmx, exec_control);
55d2375e
SC
2348
2349 /*
2350 * Interrupt/Exception Fields
2351 */
2352 if (vmx->nested.nested_run_pending) {
2353 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2354 vmcs12->vm_entry_intr_info_field);
2355 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2356 vmcs12->vm_entry_exception_error_code);
2357 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2358 vmcs12->vm_entry_instruction_len);
2359 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
2360 vmcs12->guest_interruptibility_info);
2361 vmx->loaded_vmcs->nmi_known_unmasked =
2362 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
2363 } else {
2364 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
2365 }
2366}
2367
b1346ab2 2368static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
55d2375e
SC
2369{
2370 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2371
2372 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2373 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
2374 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
2375 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
2376 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
2377 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
2378 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
2379 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
2380 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
2381 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
2382 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
2383 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
2384 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
2385 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
2386 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
2387 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
2388 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
2389 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
2390 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
2391 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
1c6f0b47
SC
2392 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
2393 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
55d2375e
SC
2394 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
2395 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
2396 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
2397 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
2398 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
2399 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
2400 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
2401 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
2402 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
2403 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
2404 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
2405 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
2406 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
2407 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
2408 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
2409 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
fc387d8d
SC
2410
2411 vmx->segment_cache.bitmask = 0;
55d2375e
SC
2412 }
2413
2414 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2415 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
2416 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
2417 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
2418 vmcs12->guest_pending_dbg_exceptions);
2419 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
2420 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
2421
2422 /*
2423 * L1 may access the L2's PDPTR, so save them to construct
2424 * vmcs12
2425 */
2426 if (enable_ept) {
2427 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2428 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2429 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2430 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2431 }
c27e5b0d
SC
2432
2433 if (kvm_mpx_supported() && vmx->nested.nested_run_pending &&
2434 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2435 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
55d2375e
SC
2436 }
2437
2438 if (nested_cpu_has_xsaves(vmcs12))
2439 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
2440
2441 /*
2442 * Whether page-faults are trapped is determined by a combination of
a0c13434
PB
2443 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. If L0
2444 * doesn't care about page faults then we should set all of these to
2445 * L1's desires. However, if L0 does care about (some) page faults, it
2446 * is not easy (if at all possible?) to merge L0 and L1's desires, we
2447 * simply ask to exit on each and every L2 page fault. This is done by
2448 * setting MASK=MATCH=0 and (see below) EB.PF=1.
55d2375e
SC
2449 * Note that below we don't need special code to set EB.PF beyond the
2450 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2451 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2452 * !enable_ept, EB.PF is 1, so the "or" will always be 1.
2453 */
a0c13434
PB
2454 if (vmx_need_pf_intercept(&vmx->vcpu)) {
2455 /*
2456 * TODO: if both L0 and L1 need the same MASK and MATCH,
2457 * go ahead and use it?
2458 */
2459 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
2460 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
2461 } else {
2462 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask);
2463 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match);
2464 }
55d2375e
SC
2465
2466 if (cpu_has_vmx_apicv()) {
2467 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
2468 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
2469 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
2470 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
2471 }
2472
662f1d1d
AL
2473 /*
2474 * Make sure the msr_autostore list is up to date before we set the
2475 * count in the vmcs02.
2476 */
2477 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC);
2478
2479 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr);
55d2375e
SC
2480 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2481 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2482
2483 set_cr4_guest_host_mask(vmx);
55d2375e
SC
2484}
2485
2486/*
2487 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2488 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2489 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
2490 * guest in a way that will both be appropriate to L1's requests, and our
2491 * needs. In addition to modifying the active vmcs (which is vmcs02), this
2492 * function also has additional necessary side-effects, like setting various
2493 * vcpu->arch fields.
2494 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2495 * is assigned to entry_failure_code on failure.
2496 */
2497static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
0f857223 2498 bool from_vmentry,
68cda40d 2499 enum vm_entry_failure_code *entry_failure_code)
55d2375e
SC
2500{
2501 struct vcpu_vmx *vmx = to_vmx(vcpu);
c7554efc 2502 bool load_guest_pdptrs_vmcs12 = false;
55d2375e 2503
1e9dfbd7 2504 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
b1346ab2 2505 prepare_vmcs02_rare(vmx, vmcs12);
55d2375e 2506 vmx->nested.dirty_vmcs12 = false;
55d2375e 2507
1e9dfbd7
VK
2508 load_guest_pdptrs_vmcs12 = !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) ||
2509 !(vmx->nested.hv_evmcs->hv_clean_fields &
c7554efc 2510 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1);
55d2375e
SC
2511 }
2512
2513 if (vmx->nested.nested_run_pending &&
2514 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
2515 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
2516 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
2517 } else {
2518 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
2519 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
2520 }
3b013a29
SC
2521 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
2522 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
2523 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
55d2375e
SC
2524 vmx_set_rflags(vcpu, vmcs12->guest_rflags);
2525
55d2375e
SC
2526 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
2527 * bitwise-or of what L1 wants to trap for L2, and what we want to
2528 * trap. Note that CR0.TS also needs updating - we do this later.
2529 */
b6a7cc35 2530 vmx_update_exception_bitmap(vcpu);
55d2375e
SC
2531 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
2532 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
2533
2534 if (vmx->nested.nested_run_pending &&
2535 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
2536 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
2537 vcpu->arch.pat = vmcs12->guest_ia32_pat;
2538 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2539 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
2540 }
2541
d041b5ea
IS
2542 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
2543 vcpu->arch.l1_tsc_offset,
2544 vmx_get_l2_tsc_offset(vcpu),
2545 vmx_get_l2_tsc_multiplier(vcpu));
2546
2547 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
2548 vcpu->arch.l1_tsc_scaling_ratio,
2549 vmx_get_l2_tsc_multiplier(vcpu));
2550
55d2375e 2551 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
55d2375e 2552 if (kvm_has_tsc_control)
1ab9287a 2553 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
55d2375e 2554
50b265a4 2555 nested_vmx_transition_tlb_flush(vcpu, vmcs12, true);
55d2375e
SC
2556
2557 if (nested_cpu_has_ept(vmcs12))
2558 nested_ept_init_mmu_context(vcpu);
55d2375e
SC
2559
2560 /*
2561 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
2562 * bits which we consider mandatory enabled.
2563 * The CR0_READ_SHADOW is what L2 should have expected to read given
2564 * the specifications by L1; It's not enough to take
2565 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
2566 * have more bits than L1 expected.
2567 */
2568 vmx_set_cr0(vcpu, vmcs12->guest_cr0);
2569 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
2570
2571 vmx_set_cr4(vcpu, vmcs12->guest_cr4);
2572 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
2573
2574 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
2575 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
2576 vmx_set_efer(vcpu, vcpu->arch.efer);
2577
2578 /*
2579 * Guest state is invalid and unrestricted guest is disabled,
2580 * which means L1 attempted VMEntry to L2 with invalid state.
2581 * Fail the VMEntry.
c8607e4a
ML
2582 *
2583 * However when force loading the guest state (SMM exit or
2584 * loading nested state after migration, it is possible to
2585 * have invalid guest state now, which will be later fixed by
2586 * restoring L2 register state
55d2375e 2587 */
c8607e4a 2588 if (CC(from_vmentry && !vmx_guest_state_valid(vcpu))) {
55d2375e 2589 *entry_failure_code = ENTRY_FAIL_DEFAULT;
c80add0f 2590 return -EINVAL;
55d2375e
SC
2591 }
2592
2593 /* Shadow page tables on either EPT or shadow page tables. */
2594 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
0f857223 2595 from_vmentry, entry_failure_code))
c80add0f 2596 return -EINVAL;
55d2375e 2597
04f11ef4
SC
2598 /*
2599 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12
2600 * on nested VM-Exit, which can occur without actually running L2 and
727a7e27 2601 * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with
04f11ef4
SC
2602 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
2603 * transition to HLT instead of running L2.
2604 */
2605 if (enable_ept)
2606 vmcs_writel(GUEST_CR3, vmcs12->guest_cr3);
2607
c7554efc
SC
2608 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */
2609 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) &&
2610 is_pae_paging(vcpu)) {
2611 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2612 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2613 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2614 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2615 }
2616
55d2375e
SC
2617 if (!enable_ept)
2618 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
2619
71f73470 2620 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
d1968421 2621 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
bfbb307c
DC
2622 vmcs12->guest_ia32_perf_global_ctrl))) {
2623 *entry_failure_code = ENTRY_FAIL_DEFAULT;
71f73470 2624 return -EINVAL;
bfbb307c 2625 }
71f73470 2626
e9c16c78
PB
2627 kvm_rsp_write(vcpu, vmcs12->guest_rsp);
2628 kvm_rip_write(vcpu, vmcs12->guest_rip);
dc313385
VK
2629
2630 /*
2631 * It was observed that genuine Hyper-V running in L1 doesn't reset
2632 * 'hv_clean_fields' by itself, it only sets the corresponding dirty
2633 * bits when it changes a field in eVMCS. Mark all fields as clean
2634 * here.
2635 */
2636 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
2637 vmx->nested.hv_evmcs->hv_clean_fields |=
2638 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
2639
55d2375e
SC
2640 return 0;
2641}
2642
2643static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
2644{
5497b955
SC
2645 if (CC(!nested_cpu_has_nmi_exiting(vmcs12) &&
2646 nested_cpu_has_virtual_nmis(vmcs12)))
55d2375e
SC
2647 return -EINVAL;
2648
5497b955 2649 if (CC(!nested_cpu_has_virtual_nmis(vmcs12) &&
4e2a0bc5 2650 nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING)))
55d2375e
SC
2651 return -EINVAL;
2652
2653 return 0;
2654}
2655
ac6389ab 2656static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp)
55d2375e
SC
2657{
2658 struct vcpu_vmx *vmx = to_vmx(vcpu);
55d2375e
SC
2659
2660 /* Check for memory type validity */
ac6389ab 2661 switch (new_eptp & VMX_EPTP_MT_MASK) {
55d2375e 2662 case VMX_EPTP_MT_UC:
5497b955 2663 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)))
55d2375e
SC
2664 return false;
2665 break;
2666 case VMX_EPTP_MT_WB:
5497b955 2667 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)))
55d2375e
SC
2668 return false;
2669 break;
2670 default:
2671 return false;
2672 }
2673
bb1fcc70 2674 /* Page-walk levels validity. */
ac6389ab 2675 switch (new_eptp & VMX_EPTP_PWL_MASK) {
bb1fcc70
SC
2676 case VMX_EPTP_PWL_5:
2677 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT)))
2678 return false;
2679 break;
2680 case VMX_EPTP_PWL_4:
2681 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT)))
2682 return false;
2683 break;
2684 default:
55d2375e 2685 return false;
bb1fcc70 2686 }
55d2375e
SC
2687
2688 /* Reserved bits should not be set */
636e8b73 2689 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f)))
55d2375e
SC
2690 return false;
2691
2692 /* AD, if set, should be supported */
ac6389ab 2693 if (new_eptp & VMX_EPTP_AD_ENABLE_BIT) {
5497b955 2694 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)))
55d2375e
SC
2695 return false;
2696 }
2697
2698 return true;
2699}
2700
461b4ba4
KS
2701/*
2702 * Checks related to VM-Execution Control Fields
2703 */
2704static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
2705 struct vmcs12 *vmcs12)
55d2375e
SC
2706{
2707 struct vcpu_vmx *vmx = to_vmx(vcpu);
55d2375e 2708
5497b955
SC
2709 if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
2710 vmx->nested.msrs.pinbased_ctls_low,
2711 vmx->nested.msrs.pinbased_ctls_high)) ||
2712 CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
2713 vmx->nested.msrs.procbased_ctls_low,
2714 vmx->nested.msrs.procbased_ctls_high)))
461b4ba4 2715 return -EINVAL;
55d2375e 2716
461b4ba4 2717 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
5497b955
SC
2718 CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control,
2719 vmx->nested.msrs.secondary_ctls_low,
2720 vmx->nested.msrs.secondary_ctls_high)))
461b4ba4
KS
2721 return -EINVAL;
2722
5497b955 2723 if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) ||
461b4ba4
KS
2724 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
2725 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
2726 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
2727 nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
2728 nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
2729 nested_vmx_check_nmi_controls(vmcs12) ||
2730 nested_vmx_check_pml_controls(vcpu, vmcs12) ||
2731 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
2732 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
2733 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
5497b955 2734 CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
461b4ba4
KS
2735 return -EINVAL;
2736
bc441211
SC
2737 if (!nested_cpu_has_preemption_timer(vmcs12) &&
2738 nested_cpu_has_save_preemption_timer(vmcs12))
2739 return -EINVAL;
2740
461b4ba4 2741 if (nested_cpu_has_ept(vmcs12) &&
ac6389ab 2742 CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer)))
461b4ba4 2743 return -EINVAL;
55d2375e
SC
2744
2745 if (nested_cpu_has_vmfunc(vmcs12)) {
5497b955
SC
2746 if (CC(vmcs12->vm_function_control &
2747 ~vmx->nested.msrs.vmfunc_controls))
461b4ba4 2748 return -EINVAL;
55d2375e
SC
2749
2750 if (nested_cpu_has_eptp_switching(vmcs12)) {
5497b955
SC
2751 if (CC(!nested_cpu_has_ept(vmcs12)) ||
2752 CC(!page_address_valid(vcpu, vmcs12->eptp_list_address)))
461b4ba4 2753 return -EINVAL;
55d2375e
SC
2754 }
2755 }
2756
461b4ba4
KS
2757 return 0;
2758}
2759
61446ba7
KS
2760/*
2761 * Checks related to VM-Exit Control Fields
2762 */
2763static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
2764 struct vmcs12 *vmcs12)
2765{
2766 struct vcpu_vmx *vmx = to_vmx(vcpu);
2767
5497b955
SC
2768 if (CC(!vmx_control_verify(vmcs12->vm_exit_controls,
2769 vmx->nested.msrs.exit_ctls_low,
2770 vmx->nested.msrs.exit_ctls_high)) ||
2771 CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12)))
61446ba7
KS
2772 return -EINVAL;
2773
2774 return 0;
2775}
2776
5fbf9634
KS
2777/*
2778 * Checks related to VM-Entry Control Fields
2779 */
2780static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
2781 struct vmcs12 *vmcs12)
461b4ba4
KS
2782{
2783 struct vcpu_vmx *vmx = to_vmx(vcpu);
55d2375e 2784
5497b955
SC
2785 if (CC(!vmx_control_verify(vmcs12->vm_entry_controls,
2786 vmx->nested.msrs.entry_ctls_low,
2787 vmx->nested.msrs.entry_ctls_high)))
5fbf9634 2788 return -EINVAL;
55d2375e
SC
2789
2790 /*
2791 * From the Intel SDM, volume 3:
2792 * Fields relevant to VM-entry event injection must be set properly.
2793 * These fields are the VM-entry interruption-information field, the
2794 * VM-entry exception error code, and the VM-entry instruction length.
2795 */
2796 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
2797 u32 intr_info = vmcs12->vm_entry_intr_info_field;
2798 u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
2799 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
2800 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
2801 bool should_have_error_code;
2802 bool urg = nested_cpu_has2(vmcs12,
2803 SECONDARY_EXEC_UNRESTRICTED_GUEST);
2804 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
2805
2806 /* VM-entry interruption-info field: interruption type */
5497b955
SC
2807 if (CC(intr_type == INTR_TYPE_RESERVED) ||
2808 CC(intr_type == INTR_TYPE_OTHER_EVENT &&
2809 !nested_cpu_supports_monitor_trap_flag(vcpu)))
5fbf9634 2810 return -EINVAL;
55d2375e
SC
2811
2812 /* VM-entry interruption-info field: vector */
5497b955
SC
2813 if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
2814 CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
2815 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
5fbf9634 2816 return -EINVAL;
55d2375e
SC
2817
2818 /* VM-entry interruption-info field: deliver error code */
2819 should_have_error_code =
2820 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
2821 x86_exception_has_error_code(vector);
5497b955 2822 if (CC(has_error_code != should_have_error_code))
5fbf9634 2823 return -EINVAL;
55d2375e
SC
2824
2825 /* VM-entry exception error code */
5497b955 2826 if (CC(has_error_code &&
567926cc 2827 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16)))
5fbf9634 2828 return -EINVAL;
55d2375e
SC
2829
2830 /* VM-entry interruption-info field: reserved bits */
5497b955 2831 if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK))
5fbf9634 2832 return -EINVAL;
55d2375e
SC
2833
2834 /* VM-entry instruction length */
2835 switch (intr_type) {
2836 case INTR_TYPE_SOFT_EXCEPTION:
2837 case INTR_TYPE_SOFT_INTR:
2838 case INTR_TYPE_PRIV_SW_EXCEPTION:
5497b955
SC
2839 if (CC(vmcs12->vm_entry_instruction_len > 15) ||
2840 CC(vmcs12->vm_entry_instruction_len == 0 &&
2841 CC(!nested_cpu_has_zero_length_injection(vcpu))))
5fbf9634 2842 return -EINVAL;
55d2375e
SC
2843 }
2844 }
2845
5fbf9634
KS
2846 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12))
2847 return -EINVAL;
2848
2849 return 0;
2850}
2851
5478ba34
SC
2852static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
2853 struct vmcs12 *vmcs12)
2854{
2855 if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
2856 nested_check_vm_exit_controls(vcpu, vmcs12) ||
2857 nested_check_vm_entry_controls(vcpu, vmcs12))
98d9e858 2858 return -EINVAL;
5478ba34 2859
a8350231
VK
2860 if (to_vmx(vcpu)->nested.enlightened_vmcs_enabled)
2861 return nested_evmcs_check_controls(vmcs12);
2862
5478ba34
SC
2863 return 0;
2864}
2865
af957eeb
ML
2866static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu,
2867 struct vmcs12 *vmcs12)
2868{
2869#ifdef CONFIG_X86_64
2870 if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) !=
2871 !!(vcpu->arch.efer & EFER_LMA)))
2872 return -EINVAL;
2873#endif
2874 return 0;
2875}
2876
98d9e858
PB
2877static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
2878 struct vmcs12 *vmcs12)
5fbf9634
KS
2879{
2880 bool ia32e;
2881
5497b955
SC
2882 if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) ||
2883 CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) ||
636e8b73 2884 CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3)))
254b2f3b 2885 return -EINVAL;
711eff3a 2886
5497b955
SC
2887 if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
2888 CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu)))
711eff3a
KS
2889 return -EINVAL;
2890
f6b0db1f 2891 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) &&
5497b955 2892 CC(!kvm_pat_valid(vmcs12->host_ia32_pat)))
f6b0db1f
KS
2893 return -EINVAL;
2894
c547cb6f
OU
2895 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
2896 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu),
2897 vmcs12->host_ia32_perf_global_ctrl)))
2898 return -EINVAL;
2899
fd3edd4a 2900#ifdef CONFIG_X86_64
af957eeb 2901 ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE);
fd3edd4a
PB
2902#else
2903 ia32e = false;
2904#endif
2905
2906 if (ia32e) {
af957eeb 2907 if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
fd3edd4a
PB
2908 return -EINVAL;
2909 } else {
af957eeb 2910 if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
fd3edd4a
PB
2911 CC(vmcs12->host_cr4 & X86_CR4_PCIDE) ||
2912 CC((vmcs12->host_rip) >> 32))
2913 return -EINVAL;
2914 }
1ef23e1f 2915
5497b955
SC
2916 if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2917 CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2918 CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2919 CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2920 CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2921 CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2922 CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2923 CC(vmcs12->host_cs_selector == 0) ||
2924 CC(vmcs12->host_tr_selector == 0) ||
2925 CC(vmcs12->host_ss_selector == 0 && !ia32e))
1ef23e1f
KS
2926 return -EINVAL;
2927
5497b955
SC
2928 if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) ||
2929 CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) ||
2930 CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) ||
2931 CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) ||
fd3edd4a
PB
2932 CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) ||
2933 CC(is_noncanonical_address(vmcs12->host_rip, vcpu)))
5845038c 2934 return -EINVAL;
1ef23e1f 2935
5fbf9634
KS
2936 /*
2937 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
2938 * IA32_EFER MSR must be 0 in the field for that register. In addition,
2939 * the values of the LMA and LME bits in the field must each be that of
2940 * the host address-space size VM-exit control.
2941 */
2942 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
5497b955
SC
2943 if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) ||
2944 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) ||
2945 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)))
254b2f3b 2946 return -EINVAL;
5fbf9634
KS
2947 }
2948
55d2375e
SC
2949 return 0;
2950}
2951
2952static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
2953 struct vmcs12 *vmcs12)
2954{
7d0172b3
DW
2955 struct vcpu_vmx *vmx = to_vmx(vcpu);
2956 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
2957 struct vmcs_hdr hdr;
55d2375e 2958
64c78508 2959 if (vmcs12->vmcs_link_pointer == INVALID_GPA)
55d2375e
SC
2960 return 0;
2961
5497b955 2962 if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)))
55d2375e
SC
2963 return -EINVAL;
2964
7d0172b3
DW
2965 if (ghc->gpa != vmcs12->vmcs_link_pointer &&
2966 CC(kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
2967 vmcs12->vmcs_link_pointer, VMCS12_SIZE)))
2968 return -EINVAL;
55d2375e 2969
7d0172b3
DW
2970 if (CC(kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr,
2971 offsetof(struct vmcs12, hdr),
2972 sizeof(hdr))))
2973 return -EINVAL;
88925305 2974
7d0172b3
DW
2975 if (CC(hdr.revision_id != VMCS12_REVISION) ||
2976 CC(hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)))
2977 return -EINVAL;
88925305 2978
7d0172b3 2979 return 0;
55d2375e
SC
2980}
2981
9c3e922b
SC
2982/*
2983 * Checks related to Guest Non-register State
2984 */
2985static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
2986{
5497b955 2987 if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
bf0cd88c
YQ
2988 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT &&
2989 vmcs12->guest_activity_state != GUEST_ACTIVITY_WAIT_SIPI))
9c3e922b
SC
2990 return -EINVAL;
2991
2992 return 0;
2993}
2994
5478ba34
SC
2995static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
2996 struct vmcs12 *vmcs12,
68cda40d 2997 enum vm_entry_failure_code *entry_failure_code)
55d2375e
SC
2998{
2999 bool ia32e;
3000
68cda40d 3001 *entry_failure_code = ENTRY_FAIL_DEFAULT;
55d2375e 3002
5497b955
SC
3003 if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) ||
3004 CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)))
c80add0f 3005 return -EINVAL;
55d2375e 3006
b91991bf
KS
3007 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) &&
3008 CC(!kvm_dr7_valid(vmcs12->guest_dr7)))
3009 return -EINVAL;
3010
de2bc2bf 3011 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
5497b955 3012 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat)))
c80add0f 3013 return -EINVAL;
55d2375e
SC
3014
3015 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
68cda40d 3016 *entry_failure_code = ENTRY_FAIL_VMCS_LINK_PTR;
c80add0f 3017 return -EINVAL;
55d2375e
SC
3018 }
3019
bfc6ad6a
OU
3020 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
3021 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu),
3022 vmcs12->guest_ia32_perf_global_ctrl)))
3023 return -EINVAL;
3024
55d2375e
SC
3025 /*
3026 * If the load IA32_EFER VM-entry control is 1, the following checks
3027 * are performed on the field for the IA32_EFER MSR:
3028 * - Bits reserved in the IA32_EFER MSR must be 0.
3029 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
3030 * the IA-32e mode guest VM-exit control. It must also be identical
3031 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
3032 * CR0.PG) is 1.
3033 */
3034 if (to_vmx(vcpu)->nested.nested_run_pending &&
3035 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
3036 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
5497b955
SC
3037 if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
3038 CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
3039 CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&
3040 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))))
c80add0f 3041 return -EINVAL;
55d2375e
SC
3042 }
3043
3044 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
5497b955
SC
3045 (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) ||
3046 CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))))
c80add0f 3047 return -EINVAL;
55d2375e 3048
9c3e922b 3049 if (nested_check_guest_non_reg_state(vmcs12))
c80add0f 3050 return -EINVAL;
55d2375e
SC
3051
3052 return 0;
3053}
3054
453eafbe 3055static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
55d2375e
SC
3056{
3057 struct vcpu_vmx *vmx = to_vmx(vcpu);
1a715810 3058 unsigned long cr3, cr4;
f1727b49 3059 bool vm_fail;
55d2375e
SC
3060
3061 if (!nested_early_check)
3062 return 0;
3063
3064 if (vmx->msr_autoload.host.nr)
3065 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
3066 if (vmx->msr_autoload.guest.nr)
3067 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
3068
3069 preempt_disable();
3070
3071 vmx_prepare_switch_to_guest(vcpu);
3072
3073 /*
3074 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
3075 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
49f933d4 3076 * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e.
55d2375e
SC
3077 * there is no need to preserve other bits or save/restore the field.
3078 */
3079 vmcs_writel(GUEST_RFLAGS, 0);
3080
1a715810
SC
3081 cr3 = __get_current_cr3_fast();
3082 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
3083 vmcs_writel(HOST_CR3, cr3);
3084 vmx->loaded_vmcs->host_state.cr3 = cr3;
3085 }
3086
55d2375e
SC
3087 cr4 = cr4_read_shadow();
3088 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
3089 vmcs_writel(HOST_CR4, cr4);
3090 vmx->loaded_vmcs->host_state.cr4 = cr4;
3091 }
3092
150f17bf
UB
3093 vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
3094 vmx->loaded_vmcs->launched);
55d2375e 3095
55d2375e
SC
3096 if (vmx->msr_autoload.host.nr)
3097 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
3098 if (vmx->msr_autoload.guest.nr)
3099 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
3100
f1727b49 3101 if (vm_fail) {
380e0055
SC
3102 u32 error = vmcs_read32(VM_INSTRUCTION_ERROR);
3103
541e886f 3104 preempt_enable();
380e0055
SC
3105
3106 trace_kvm_nested_vmenter_failed(
3107 "early hardware check VM-instruction error: ", error);
3108 WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD);
55d2375e
SC
3109 return 1;
3110 }
3111
3112 /*
3113 * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
3114 */
55d2375e
SC
3115 if (hw_breakpoint_active())
3116 set_debugreg(__this_cpu_read(cpu_dr7), 7);
84b6a349 3117 local_irq_enable();
541e886f 3118 preempt_enable();
55d2375e
SC
3119
3120 /*
3121 * A non-failing VMEntry means we somehow entered guest mode with
3122 * an illegal RIP, and that's just the tip of the iceberg. There
3123 * is no telling what memory has been modified or what state has
3124 * been exposed to unknown code. Hitting this all but guarantees
3125 * a (very critical) hardware issue.
3126 */
3127 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) &
3128 VMX_EXIT_REASONS_FAILED_VMENTRY));
3129
3130 return 0;
3131}
55d2375e 3132
9a78e158 3133static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
55d2375e 3134{
55d2375e 3135 struct vcpu_vmx *vmx = to_vmx(vcpu);
55d2375e 3136
e942dbf8
VK
3137 /*
3138 * hv_evmcs may end up being not mapped after migration (when
3139 * L2 was running), map it here to make sure vmcs12 changes are
3140 * properly reflected.
3141 */
1e9dfbd7 3142 if (vmx->nested.enlightened_vmcs_enabled &&
27849968 3143 vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) {
b6a0653a
VK
3144 enum nested_evmptrld_status evmptrld_status =
3145 nested_vmx_handle_enlightened_vmptrld(vcpu, false);
3146
3147 if (evmptrld_status == EVMPTRLD_VMFAIL ||
f5c7e842 3148 evmptrld_status == EVMPTRLD_ERROR)
b6a0653a 3149 return false;
8629b625
VK
3150
3151 /*
3152 * Post migration VMCS12 always provides the most actual
3153 * information, copy it to eVMCS upon entry.
3154 */
3155 vmx->nested.need_vmcs12_to_shadow_sync = true;
b6a0653a 3156 }
e942dbf8 3157
9a78e158
PB
3158 return true;
3159}
3160
3161static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
3162{
3163 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3164 struct vcpu_vmx *vmx = to_vmx(vcpu);
3165 struct kvm_host_map *map;
3166 struct page *page;
3167 u64 hpa;
3168
158a48ec
ML
3169 if (!vcpu->arch.pdptrs_from_userspace &&
3170 !nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
0f857223
ML
3171 /*
3172 * Reload the guest's PDPTRs since after a migration
3173 * the guest CR3 might be restored prior to setting the nested
3174 * state which can lead to a load of wrong PDPTRs.
3175 */
2df4a5eb 3176 if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
0f857223
ML
3177 return false;
3178 }
3179
3180
55d2375e
SC
3181 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3182 /*
3183 * Translate L1 physical address to host physical
3184 * address for vmcs02. Keep the page pinned, so this
3185 * physical address remains valid. We keep a reference
3186 * to it so we can release it later.
3187 */
3188 if (vmx->nested.apic_access_page) { /* shouldn't happen */
b11494bc 3189 kvm_release_page_clean(vmx->nested.apic_access_page);
55d2375e
SC
3190 vmx->nested.apic_access_page = NULL;
3191 }
3192 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
55d2375e
SC
3193 if (!is_error_page(page)) {
3194 vmx->nested.apic_access_page = page;
3195 hpa = page_to_phys(vmx->nested.apic_access_page);
3196 vmcs_write64(APIC_ACCESS_ADDR, hpa);
3197 } else {
671ddc70
JM
3198 pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n",
3199 __func__);
3200 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3201 vcpu->run->internal.suberror =
3202 KVM_INTERNAL_ERROR_EMULATION;
3203 vcpu->run->internal.ndata = 0;
3204 return false;
55d2375e
SC
3205 }
3206 }
3207
3208 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
96c66e87 3209 map = &vmx->nested.virtual_apic_map;
55d2375e 3210
96c66e87
KA
3211 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) {
3212 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn));
69090810
PB
3213 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
3214 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
3215 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3216 /*
3217 * The processor will never use the TPR shadow, simply
3218 * clear the bit from the execution control. Such a
3219 * configuration is useless, but it happens in tests.
3220 * For any other configuration, failing the vm entry is
3221 * _not_ what the processor does but it's basically the
3222 * only possibility we have.
3223 */
2183f564 3224 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW);
69090810 3225 } else {
ca2f5466
SC
3226 /*
3227 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to
3228 * force VM-Entry to fail.
3229 */
64c78508 3230 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, INVALID_GPA);
55d2375e
SC
3231 }
3232 }
3233
3234 if (nested_cpu_has_posted_intr(vmcs12)) {
3278e049
KA
3235 map = &vmx->nested.pi_desc_map;
3236
3237 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) {
3238 vmx->nested.pi_desc =
3239 (struct pi_desc *)(((void *)map->hva) +
3240 offset_in_page(vmcs12->posted_intr_desc_addr));
3241 vmcs_write64(POSTED_INTR_DESC_ADDR,
3242 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
966eefb8
JM
3243 } else {
3244 /*
3245 * Defer the KVM_INTERNAL_EXIT until KVM tries to
3246 * access the contents of the VMCS12 posted interrupt
3247 * descriptor. (Note that KVM may do this when it
3248 * should not, per the architectural specification.)
3249 */
3250 vmx->nested.pi_desc = NULL;
3251 pin_controls_clearbit(vmx, PIN_BASED_POSTED_INTR);
55d2375e 3252 }
55d2375e
SC
3253 }
3254 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
2183f564 3255 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
55d2375e 3256 else
2183f564 3257 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
9a78e158
PB
3258
3259 return true;
3260}
3261
3262static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
3263{
f5c7e842
VK
3264 if (!nested_get_evmcs_page(vcpu)) {
3265 pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
3266 __func__);
3267 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3268 vcpu->run->internal.suberror =
3269 KVM_INTERNAL_ERROR_EMULATION;
3270 vcpu->run->internal.ndata = 0;
3271
9a78e158 3272 return false;
f5c7e842 3273 }
9a78e158
PB
3274
3275 if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
3276 return false;
3277
671ddc70 3278 return true;
55d2375e
SC
3279}
3280
02f5fb2e
SC
3281static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
3282{
3283 struct vmcs12 *vmcs12;
3284 struct vcpu_vmx *vmx = to_vmx(vcpu);
3285 gpa_t dst;
3286
3287 if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
3288 return 0;
3289
3290 if (WARN_ON_ONCE(vmx->nested.pml_full))
3291 return 1;
3292
3293 /*
3294 * Check if PML is enabled for the nested guest. Whether eptp bit 6 is
3295 * set is already checked as part of A/D emulation.
3296 */
3297 vmcs12 = get_vmcs12(vcpu);
3298 if (!nested_cpu_has_pml(vmcs12))
3299 return 0;
3300
3301 if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
3302 vmx->nested.pml_full = true;
3303 return 1;
3304 }
3305
3306 gpa &= ~0xFFFull;
3307 dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
3308
3309 if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
3310 offset_in_page(dst), sizeof(gpa)))
3311 return 0;
3312
3313 vmcs12->guest_pml_index--;
3314
3315 return 0;
3316}
3317
55d2375e
SC
3318/*
3319 * Intel's VMX Instruction Reference specifies a common set of prerequisites
3320 * for running VMX instructions (except VMXON, whose prerequisites are
3321 * slightly different). It also specifies what exception to inject otherwise.
3322 * Note that many of these exceptions have priority over VM exits, so they
3323 * don't have to be checked again here.
3324 */
3325static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
3326{
3327 if (!to_vmx(vcpu)->nested.vmxon) {
3328 kvm_queue_exception(vcpu, UD_VECTOR);
3329 return 0;
3330 }
3331
3332 if (vmx_get_cpl(vcpu)) {
3333 kvm_inject_gp(vcpu, 0);
3334 return 0;
3335 }
3336
3337 return 1;
3338}
3339
3340static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
3341{
3342 u8 rvi = vmx_get_rvi();
3343 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
3344
3345 return ((rvi & 0xf0) > (vppr & 0xf0));
3346}
3347
3348static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
3349 struct vmcs12 *vmcs12);
3350
3351/*
3352 * If from_vmentry is false, this is being called from state restore (either RSM
3353 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
671ddc70
JM
3354 *
3355 * Returns:
463bfeee
ML
3356 * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode
3357 * NVMX_VMENTRY_VMFAIL: Consistency check VMFail
3358 * NVMX_VMENTRY_VMEXIT: Consistency check VMExit
3359 * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error
55d2375e 3360 */
671ddc70
JM
3361enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
3362 bool from_vmentry)
55d2375e
SC
3363{
3364 struct vcpu_vmx *vmx = to_vmx(vcpu);
3365 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
68cda40d 3366 enum vm_entry_failure_code entry_failure_code;
55d2375e 3367 bool evaluate_pending_interrupts;
8e533240
SC
3368 union vmx_exit_reason exit_reason = {
3369 .basic = EXIT_REASON_INVALID_STATE,
3370 .failed_vmentry = 1,
3371 };
3372 u32 failed_index;
55d2375e 3373
40e5f908 3374 kvm_service_local_tlb_flush_requests(vcpu);
eeeb4f67 3375
2183f564 3376 evaluate_pending_interrupts = exec_controls_get(vmx) &
4e2a0bc5 3377 (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
55d2375e
SC
3378 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
3379 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
3380
3381 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
3382 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
3383 if (kvm_mpx_supported() &&
3384 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
3385 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3386
f087a029
SC
3387 /*
3388 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and*
3389 * nested early checks are disabled. In the event of a "late" VM-Fail,
3390 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its
3391 * software model to the pre-VMEntry host state. When EPT is disabled,
3392 * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes
3393 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing
3394 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to
3395 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested
3396 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is
3397 * guaranteed to be overwritten with a shadow CR3 prior to re-entering
3398 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as
3399 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks
3400 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail
3401 * path would need to manually save/restore vmcs01.GUEST_CR3.
3402 */
3403 if (!enable_ept && !nested_early_check)
3404 vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
3405
55d2375e
SC
3406 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
3407
389ab252 3408 prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12);
55d2375e
SC
3409
3410 if (from_vmentry) {
b89d5ad0
SC
3411 if (unlikely(!nested_get_vmcs12_pages(vcpu))) {
3412 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
671ddc70 3413 return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
b89d5ad0 3414 }
55d2375e
SC
3415
3416 if (nested_vmx_check_vmentry_hw(vcpu)) {
3417 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
671ddc70 3418 return NVMX_VMENTRY_VMFAIL;
55d2375e
SC
3419 }
3420
68cda40d
SC
3421 if (nested_vmx_check_guest_state(vcpu, vmcs12,
3422 &entry_failure_code)) {
8e533240 3423 exit_reason.basic = EXIT_REASON_INVALID_STATE;
68cda40d 3424 vmcs12->exit_qualification = entry_failure_code;
55d2375e 3425 goto vmentry_fail_vmexit;
68cda40d 3426 }
55d2375e
SC
3427 }
3428
3429 enter_guest_mode(vcpu);
55d2375e 3430
0f857223 3431 if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &entry_failure_code)) {
8e533240 3432 exit_reason.basic = EXIT_REASON_INVALID_STATE;
68cda40d 3433 vmcs12->exit_qualification = entry_failure_code;
55d2375e 3434 goto vmentry_fail_vmexit_guest_mode;
68cda40d 3435 }
55d2375e
SC
3436
3437 if (from_vmentry) {
68cda40d
SC
3438 failed_index = nested_vmx_load_msr(vcpu,
3439 vmcs12->vm_entry_msr_load_addr,
3440 vmcs12->vm_entry_msr_load_count);
3441 if (failed_index) {
8e533240 3442 exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL;
68cda40d 3443 vmcs12->exit_qualification = failed_index;
55d2375e 3444 goto vmentry_fail_vmexit_guest_mode;
68cda40d 3445 }
55d2375e
SC
3446 } else {
3447 /*
3448 * The MMU is not initialized to point at the right entities yet and
3449 * "get pages" would need to read data from the guest (i.e. we will
3450 * need to perform gpa to hpa translation). Request a call
3451 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
3452 * have already been set at vmentry time and should not be reset.
3453 */
729c15c2 3454 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
55d2375e
SC
3455 }
3456
3457 /*
3458 * If L1 had a pending IRQ/NMI until it executed
3459 * VMLAUNCH/VMRESUME which wasn't delivered because it was
3460 * disallowed (e.g. interrupts disabled), L0 needs to
3461 * evaluate if this pending event should cause an exit from L2
3462 * to L1 or delivered directly to L2 (e.g. In case L1 don't
3463 * intercept EXTERNAL_INTERRUPT).
3464 *
3465 * Usually this would be handled by the processor noticing an
3466 * IRQ/NMI window request, or checking RVI during evaluation of
3467 * pending virtual interrupts. However, this setting was done
3468 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
3469 * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
3470 */
3471 if (unlikely(evaluate_pending_interrupts))
3472 kvm_make_request(KVM_REQ_EVENT, vcpu);
3473
359a6c3d
PB
3474 /*
3475 * Do not start the preemption timer hrtimer until after we know
3476 * we are successful, so that only nested_vmx_vmexit needs to cancel
3477 * the timer.
3478 */
3479 vmx->nested.preemption_timer_expired = false;
850448f3
PS
3480 if (nested_cpu_has_preemption_timer(vmcs12)) {
3481 u64 timer_value = vmx_calc_preemption_timer_value(vcpu);
3482 vmx_start_preemption_timer(vcpu, timer_value);
3483 }
359a6c3d 3484
55d2375e
SC
3485 /*
3486 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
3487 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
3488 * returned as far as L1 is concerned. It will only return (and set
3489 * the success flag) when L2 exits (see nested_vmx_vmexit()).
3490 */
671ddc70 3491 return NVMX_VMENTRY_SUCCESS;
55d2375e
SC
3492
3493 /*
3494 * A failed consistency check that leads to a VMExit during L1's
3495 * VMEnter to L2 is a variation of a normal VMexit, as explained in
3496 * 26.7 "VM-entry failures during or after loading guest state".
3497 */
3498vmentry_fail_vmexit_guest_mode:
5e3d394f 3499 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
55d2375e
SC
3500 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3501 leave_guest_mode(vcpu);
3502
3503vmentry_fail_vmexit:
3504 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3505
3506 if (!from_vmentry)
671ddc70 3507 return NVMX_VMENTRY_VMEXIT;
55d2375e
SC
3508
3509 load_vmcs12_host_state(vcpu, vmcs12);
8e533240 3510 vmcs12->vm_exit_reason = exit_reason.full;
1e9dfbd7 3511 if (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
3731905e 3512 vmx->nested.need_vmcs12_to_shadow_sync = true;
671ddc70 3513 return NVMX_VMENTRY_VMEXIT;
55d2375e
SC
3514}
3515
3516/*
3517 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3518 * for running an L2 nested guest.
3519 */
3520static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
3521{
3522 struct vmcs12 *vmcs12;
671ddc70 3523 enum nvmx_vmentry_status status;
55d2375e
SC
3524 struct vcpu_vmx *vmx = to_vmx(vcpu);
3525 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
b6a0653a 3526 enum nested_evmptrld_status evmptrld_status;
55d2375e
SC
3527
3528 if (!nested_vmx_check_permission(vcpu))
3529 return 1;
3530
b6a0653a
VK
3531 evmptrld_status = nested_vmx_handle_enlightened_vmptrld(vcpu, launch);
3532 if (evmptrld_status == EVMPTRLD_ERROR) {
3533 kvm_queue_exception(vcpu, UD_VECTOR);
55d2375e 3534 return 1;
b6a0653a 3535 }
55d2375e 3536
018d70ff
EH
3537 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
3538
3539 if (CC(evmptrld_status == EVMPTRLD_VMFAIL))
3540 return nested_vmx_failInvalid(vcpu);
3541
1e9dfbd7 3542 if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) &&
64c78508 3543 vmx->nested.current_vmptr == INVALID_GPA))
55d2375e
SC
3544 return nested_vmx_failInvalid(vcpu);
3545
3546 vmcs12 = get_vmcs12(vcpu);
3547
3548 /*
3549 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
3550 * that there *is* a valid VMCS pointer, RFLAGS.CF is set
3551 * rather than RFLAGS.ZF, and no error number is stored to the
3552 * VM-instruction error field.
3553 */
fc595f35 3554 if (CC(vmcs12->hdr.shadow_vmcs))
55d2375e
SC
3555 return nested_vmx_failInvalid(vcpu);
3556
1e9dfbd7 3557 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
d6bf71a1 3558 copy_enlightened_to_vmcs12(vmx, vmx->nested.hv_evmcs->hv_clean_fields);
55d2375e
SC
3559 /* Enlightened VMCS doesn't have launch state */
3560 vmcs12->launch_state = !launch;
3561 } else if (enable_shadow_vmcs) {
3562 copy_shadow_to_vmcs12(vmx);
3563 }
3564
3565 /*
3566 * The nested entry process starts with enforcing various prerequisites
3567 * on vmcs12 as required by the Intel SDM, and act appropriately when
3568 * they fail: As the SDM explains, some conditions should cause the
3569 * instruction to fail, while others will cause the instruction to seem
3570 * to succeed, but return an EXIT_REASON_INVALID_STATE.
3571 * To speed up the normal (success) code path, we should avoid checking
3572 * for misconfigurations which will anyway be caught by the processor
3573 * when using the merged vmcs02.
3574 */
fc595f35 3575 if (CC(interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS))
b2656e4d 3576 return nested_vmx_fail(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
55d2375e 3577
fc595f35 3578 if (CC(vmcs12->launch_state == launch))
b2656e4d 3579 return nested_vmx_fail(vcpu,
55d2375e
SC
3580 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
3581 : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
3582
98d9e858 3583 if (nested_vmx_check_controls(vcpu, vmcs12))
b2656e4d 3584 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
5478ba34 3585
af957eeb
ML
3586 if (nested_vmx_check_address_space_size(vcpu, vmcs12))
3587 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
3588
98d9e858 3589 if (nested_vmx_check_host_state(vcpu, vmcs12))
b2656e4d 3590 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
55d2375e
SC
3591
3592 /*
3593 * We're finally done with prerequisite checking, and can start with
3594 * the nested entry.
3595 */
3596 vmx->nested.nested_run_pending = 1;
850448f3 3597 vmx->nested.has_preemption_timer_deadline = false;
671ddc70
JM
3598 status = nested_vmx_enter_non_root_mode(vcpu, true);
3599 if (unlikely(status != NVMX_VMENTRY_SUCCESS))
3600 goto vmentry_failed;
55d2375e 3601
25bb2cf9
SC
3602 /* Emulate processing of posted interrupts on VM-Enter. */
3603 if (nested_cpu_has_posted_intr(vmcs12) &&
3604 kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) {
3605 vmx->nested.pi_pending = true;
3606 kvm_make_request(KVM_REQ_EVENT, vcpu);
3607 kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv);
3608 }
3609
55d2375e
SC
3610 /* Hide L1D cache contents from the nested guest. */
3611 vmx->vcpu.arch.l1tf_flush_l1d = true;
3612
3613 /*
3614 * Must happen outside of nested_vmx_enter_non_root_mode() as it will
3615 * also be used as part of restoring nVMX state for
3616 * snapshot restore (migration).
3617 *
3618 * In this flow, it is assumed that vmcs12 cache was
163b0991 3619 * transferred as part of captured nVMX state and should
55d2375e
SC
3620 * therefore not be read from guest memory (which may not
3621 * exist on destination host yet).
3622 */
3623 nested_cache_shadow_vmcs12(vcpu, vmcs12);
3624
bf0cd88c
YQ
3625 switch (vmcs12->guest_activity_state) {
3626 case GUEST_ACTIVITY_HLT:
3627 /*
3628 * If we're entering a halted L2 vcpu and the L2 vcpu won't be
3629 * awakened by event injection or by an NMI-window VM-exit or
3630 * by an interrupt-window VM-exit, halt the vcpu.
3631 */
3632 if (!(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
3633 !nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING) &&
3634 !(nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING) &&
3635 (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
3636 vmx->nested.nested_run_pending = 0;
1460179d 3637 return kvm_emulate_halt_noskip(vcpu);
bf0cd88c
YQ
3638 }
3639 break;
3640 case GUEST_ACTIVITY_WAIT_SIPI:
55d2375e 3641 vmx->nested.nested_run_pending = 0;
bf0cd88c
YQ
3642 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3643 break;
3644 default:
3645 break;
55d2375e 3646 }
bf0cd88c 3647
55d2375e 3648 return 1;
671ddc70
JM
3649
3650vmentry_failed:
3651 vmx->nested.nested_run_pending = 0;
3652 if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR)
3653 return 0;
3654 if (status == NVMX_VMENTRY_VMEXIT)
3655 return 1;
3656 WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL);
b2656e4d 3657 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
55d2375e
SC
3658}
3659
3660/*
3661 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
67b0ae43 3662 * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK).
55d2375e
SC
3663 * This function returns the new value we should put in vmcs12.guest_cr0.
3664 * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
3665 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
3666 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
3667 * didn't trap the bit, because if L1 did, so would L0).
3668 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
3669 * been modified by L2, and L1 knows it. So just leave the old value of
3670 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
3671 * isn't relevant, because if L0 traps this bit it can set it to anything.
3672 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
3673 * changed these bits, and therefore they need to be updated, but L0
3674 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
3675 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
3676 */
3677static inline unsigned long
3678vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3679{
3680 return
3681 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
3682 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
3683 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
3684 vcpu->arch.cr0_guest_owned_bits));
3685}
3686
3687static inline unsigned long
3688vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3689{
3690 return
3691 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
3692 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
3693 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
3694 vcpu->arch.cr4_guest_owned_bits));
3695}
3696
3697static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
3698 struct vmcs12 *vmcs12)
3699{
3700 u32 idt_vectoring;
3701 unsigned int nr;
3702
3703 if (vcpu->arch.exception.injected) {
3704 nr = vcpu->arch.exception.nr;
3705 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3706
3707 if (kvm_exception_is_soft(nr)) {
3708 vmcs12->vm_exit_instruction_len =
3709 vcpu->arch.event_exit_inst_len;
3710 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
3711 } else
3712 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
3713
3714 if (vcpu->arch.exception.has_error_code) {
3715 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
3716 vmcs12->idt_vectoring_error_code =
3717 vcpu->arch.exception.error_code;
3718 }
3719
3720 vmcs12->idt_vectoring_info_field = idt_vectoring;
3721 } else if (vcpu->arch.nmi_injected) {
3722 vmcs12->idt_vectoring_info_field =
3723 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
3724 } else if (vcpu->arch.interrupt.injected) {
3725 nr = vcpu->arch.interrupt.nr;
3726 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3727
3728 if (vcpu->arch.interrupt.soft) {
3729 idt_vectoring |= INTR_TYPE_SOFT_INTR;
3730 vmcs12->vm_entry_instruction_len =
3731 vcpu->arch.event_exit_inst_len;
3732 } else
3733 idt_vectoring |= INTR_TYPE_EXT_INTR;
3734
3735 vmcs12->idt_vectoring_info_field = idt_vectoring;
3736 }
3737}
3738
3739
96b100cd 3740void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
55d2375e
SC
3741{
3742 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3743 gfn_t gfn;
3744
3745 /*
3746 * Don't need to mark the APIC access page dirty; it is never
3747 * written to by the CPU during APIC virtualization.
3748 */
3749
3750 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3751 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
3752 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3753 }
3754
3755 if (nested_cpu_has_posted_intr(vmcs12)) {
3756 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
3757 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3758 }
3759}
3760
650293c3 3761static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
55d2375e
SC
3762{
3763 struct vcpu_vmx *vmx = to_vmx(vcpu);
3764 int max_irr;
3765 void *vapic_page;
3766 u16 status;
3767
966eefb8 3768 if (!vmx->nested.pi_pending)
650293c3 3769 return 0;
55d2375e 3770
966eefb8
JM
3771 if (!vmx->nested.pi_desc)
3772 goto mmio_needed;
3773
55d2375e 3774 vmx->nested.pi_pending = false;
966eefb8 3775
55d2375e 3776 if (!pi_test_and_clear_on(vmx->nested.pi_desc))
650293c3 3777 return 0;
55d2375e
SC
3778
3779 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
3780 if (max_irr != 256) {
96c66e87
KA
3781 vapic_page = vmx->nested.virtual_apic_map.hva;
3782 if (!vapic_page)
0fe998b2 3783 goto mmio_needed;
96c66e87 3784
55d2375e
SC
3785 __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
3786 vapic_page, &max_irr);
55d2375e
SC
3787 status = vmcs_read16(GUEST_INTR_STATUS);
3788 if ((u8)max_irr > ((u8)status & 0xff)) {
3789 status &= ~0xff;
3790 status |= (u8)max_irr;
3791 vmcs_write16(GUEST_INTR_STATUS, status);
3792 }
3793 }
3794
3795 nested_mark_vmcs12_pages_dirty(vcpu);
650293c3 3796 return 0;
0fe998b2
JM
3797
3798mmio_needed:
3799 kvm_handle_memory_failure(vcpu, X86EMUL_IO_NEEDED, NULL);
3800 return -ENXIO;
55d2375e
SC
3801}
3802
3803static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
3804 unsigned long exit_qual)
3805{
3806 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3807 unsigned int nr = vcpu->arch.exception.nr;
3808 u32 intr_info = nr | INTR_INFO_VALID_MASK;
3809
3810 if (vcpu->arch.exception.has_error_code) {
3811 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
3812 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3813 }
3814
3815 if (kvm_exception_is_soft(nr))
3816 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3817 else
3818 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3819
3820 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
3821 vmx_get_nmi_mask(vcpu))
3822 intr_info |= INTR_INFO_UNBLOCK_NMI;
3823
3824 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
3825}
3826
684c0422
OU
3827/*
3828 * Returns true if a debug trap is pending delivery.
3829 *
3830 * In KVM, debug traps bear an exception payload. As such, the class of a #DB
3831 * exception may be inferred from the presence of an exception payload.
3832 */
3833static inline bool vmx_pending_dbg_trap(struct kvm_vcpu *vcpu)
3834{
3835 return vcpu->arch.exception.pending &&
3836 vcpu->arch.exception.nr == DB_VECTOR &&
3837 vcpu->arch.exception.payload;
3838}
3839
3840/*
3841 * Certain VM-exits set the 'pending debug exceptions' field to indicate a
3842 * recognized #DB (data or single-step) that has yet to be delivered. Since KVM
3843 * represents these debug traps with a payload that is said to be compatible
3844 * with the 'pending debug exceptions' field, write the payload to the VMCS
3845 * field if a VM-exit is delivered before the debug trap.
3846 */
3847static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu)
3848{
3849 if (vmx_pending_dbg_trap(vcpu))
3850 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
3851 vcpu->arch.exception.payload);
3852}
3853
d2060bd4
SC
3854static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
3855{
3856 return nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
3857 to_vmx(vcpu)->nested.preemption_timer_expired;
3858}
3859
a1c77abb 3860static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
55d2375e
SC
3861{
3862 struct vcpu_vmx *vmx = to_vmx(vcpu);
3863 unsigned long exit_qual;
3864 bool block_nested_events =
3865 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
5ef8acbd 3866 bool mtf_pending = vmx->nested.mtf_pending;
4b9852f4
LA
3867 struct kvm_lapic *apic = vcpu->arch.apic;
3868
5ef8acbd
OU
3869 /*
3870 * Clear the MTF state. If a higher priority VM-exit is delivered first,
3871 * this state is discarded.
3872 */
5c8beb47
OU
3873 if (!block_nested_events)
3874 vmx->nested.mtf_pending = false;
5ef8acbd 3875
4b9852f4
LA
3876 if (lapic_in_kernel(vcpu) &&
3877 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
3878 if (block_nested_events)
3879 return -EBUSY;
684c0422 3880 nested_vmx_update_pending_dbg(vcpu);
e64a8508 3881 clear_bit(KVM_APIC_INIT, &apic->pending_events);
bf0cd88c
YQ
3882 if (vcpu->arch.mp_state != KVM_MP_STATE_INIT_RECEIVED)
3883 nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0);
3884 return 0;
3885 }
3886
3887 if (lapic_in_kernel(vcpu) &&
3888 test_bit(KVM_APIC_SIPI, &apic->pending_events)) {
3889 if (block_nested_events)
3890 return -EBUSY;
3891
3892 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
3893 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
3894 nested_vmx_vmexit(vcpu, EXIT_REASON_SIPI_SIGNAL, 0,
3895 apic->sipi_vector & 0xFFUL);
4b9852f4
LA
3896 return 0;
3897 }
55d2375e 3898
5ef8acbd
OU
3899 /*
3900 * Process any exceptions that are not debug traps before MTF.
4020da3b
ML
3901 *
3902 * Note that only a pending nested run can block a pending exception.
3903 * Otherwise an injected NMI/interrupt should either be
3904 * lost or delivered to the nested hypervisor in the IDT_VECTORING_INFO,
3905 * while delivering the pending exception.
5ef8acbd 3906 */
4020da3b 3907
6ce347af 3908 if (vcpu->arch.exception.pending && !vmx_pending_dbg_trap(vcpu)) {
4020da3b 3909 if (vmx->nested.nested_run_pending)
5ef8acbd 3910 return -EBUSY;
6ce347af
SC
3911 if (!nested_vmx_check_exception(vcpu, &exit_qual))
3912 goto no_vmexit;
5ef8acbd
OU
3913 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3914 return 0;
3915 }
3916
3917 if (mtf_pending) {
3918 if (block_nested_events)
3919 return -EBUSY;
3920 nested_vmx_update_pending_dbg(vcpu);
3921 nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0);
3922 return 0;
3923 }
3924
6ce347af 3925 if (vcpu->arch.exception.pending) {
4020da3b 3926 if (vmx->nested.nested_run_pending)
55d2375e 3927 return -EBUSY;
6ce347af
SC
3928 if (!nested_vmx_check_exception(vcpu, &exit_qual))
3929 goto no_vmexit;
55d2375e
SC
3930 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3931 return 0;
3932 }
3933
d2060bd4 3934 if (nested_vmx_preemption_timer_pending(vcpu)) {
55d2375e
SC
3935 if (block_nested_events)
3936 return -EBUSY;
3937 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
3938 return 0;
3939 }
3940
1cd2f0b0
SC
3941 if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
3942 if (block_nested_events)
3943 return -EBUSY;
3944 goto no_vmexit;
3945 }
3946
15ff0b45 3947 if (vcpu->arch.nmi_pending && !vmx_nmi_blocked(vcpu)) {
55d2375e
SC
3948 if (block_nested_events)
3949 return -EBUSY;
15ff0b45
SC
3950 if (!nested_exit_on_nmi(vcpu))
3951 goto no_vmexit;
3952
55d2375e
SC
3953 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
3954 NMI_VECTOR | INTR_TYPE_NMI_INTR |
3955 INTR_INFO_VALID_MASK, 0);
3956 /*
3957 * The NMI-triggered VM exit counts as injection:
3958 * clear this one and block further NMIs.
3959 */
3960 vcpu->arch.nmi_pending = 0;
3961 vmx_set_nmi_mask(vcpu, true);
3962 return 0;
3963 }
3964
15ff0b45 3965 if (kvm_cpu_has_interrupt(vcpu) && !vmx_interrupt_blocked(vcpu)) {
55d2375e
SC
3966 if (block_nested_events)
3967 return -EBUSY;
15ff0b45
SC
3968 if (!nested_exit_on_intr(vcpu))
3969 goto no_vmexit;
55d2375e
SC
3970 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
3971 return 0;
3972 }
3973
6ce347af 3974no_vmexit:
650293c3 3975 return vmx_complete_nested_posted_interrupt(vcpu);
55d2375e
SC
3976}
3977
3978static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
3979{
3980 ktime_t remaining =
3981 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
3982 u64 value;
3983
3984 if (ktime_to_ns(remaining) <= 0)
3985 return 0;
3986
3987 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
3988 do_div(value, 1000000);
3989 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
3990}
3991
7952d769 3992static bool is_vmcs12_ext_field(unsigned long field)
55d2375e 3993{
7952d769
SC
3994 switch (field) {
3995 case GUEST_ES_SELECTOR:
3996 case GUEST_CS_SELECTOR:
3997 case GUEST_SS_SELECTOR:
3998 case GUEST_DS_SELECTOR:
3999 case GUEST_FS_SELECTOR:
4000 case GUEST_GS_SELECTOR:
4001 case GUEST_LDTR_SELECTOR:
4002 case GUEST_TR_SELECTOR:
4003 case GUEST_ES_LIMIT:
4004 case GUEST_CS_LIMIT:
4005 case GUEST_SS_LIMIT:
4006 case GUEST_DS_LIMIT:
4007 case GUEST_FS_LIMIT:
4008 case GUEST_GS_LIMIT:
4009 case GUEST_LDTR_LIMIT:
4010 case GUEST_TR_LIMIT:
4011 case GUEST_GDTR_LIMIT:
4012 case GUEST_IDTR_LIMIT:
4013 case GUEST_ES_AR_BYTES:
4014 case GUEST_DS_AR_BYTES:
4015 case GUEST_FS_AR_BYTES:
4016 case GUEST_GS_AR_BYTES:
4017 case GUEST_LDTR_AR_BYTES:
4018 case GUEST_TR_AR_BYTES:
4019 case GUEST_ES_BASE:
4020 case GUEST_CS_BASE:
4021 case GUEST_SS_BASE:
4022 case GUEST_DS_BASE:
4023 case GUEST_FS_BASE:
4024 case GUEST_GS_BASE:
4025 case GUEST_LDTR_BASE:
4026 case GUEST_TR_BASE:
4027 case GUEST_GDTR_BASE:
4028 case GUEST_IDTR_BASE:
4029 case GUEST_PENDING_DBG_EXCEPTIONS:
4030 case GUEST_BNDCFGS:
4031 return true;
4032 default:
4033 break;
4034 }
55d2375e 4035
7952d769
SC
4036 return false;
4037}
4038
4039static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
4040 struct vmcs12 *vmcs12)
4041{
4042 struct vcpu_vmx *vmx = to_vmx(vcpu);
55d2375e
SC
4043
4044 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
4045 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
4046 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
4047 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
4048 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
4049 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
4050 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
4051 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
4052 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
4053 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
4054 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
4055 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
4056 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
4057 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
4058 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
4059 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
4060 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
4061 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
4062 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
55d2375e
SC
4063 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
4064 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
4065 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
4066 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
4067 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
4068 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
4069 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
4070 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
4071 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
4072 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
4073 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
4074 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
4075 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
4076 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
4077 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
7952d769
SC
4078 vmcs12->guest_pending_dbg_exceptions =
4079 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
4080 if (kvm_mpx_supported())
4081 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
4082
4083 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false;
4084}
4085
4086static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
4087 struct vmcs12 *vmcs12)
4088{
4089 struct vcpu_vmx *vmx = to_vmx(vcpu);
4090 int cpu;
4091
4092 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare)
4093 return;
4094
4095
4096 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01);
4097
4098 cpu = get_cpu();
4099 vmx->loaded_vmcs = &vmx->nested.vmcs02;
1af1bb05 4100 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->vmcs01);
7952d769
SC
4101
4102 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4103
4104 vmx->loaded_vmcs = &vmx->vmcs01;
1af1bb05 4105 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02);
7952d769
SC
4106 put_cpu();
4107}
4108
4109/*
4110 * Update the guest state fields of vmcs12 to reflect changes that
4111 * occurred while L2 was running. (The "IA-32e mode guest" bit of the
4112 * VM-entry controls is also updated, since this is really a guest
4113 * state bit.)
4114 */
4115static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
4116{
4117 struct vcpu_vmx *vmx = to_vmx(vcpu);
4118
1e9dfbd7 4119 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
7952d769
SC
4120 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4121
1e9dfbd7
VK
4122 vmx->nested.need_sync_vmcs02_to_vmcs12_rare =
4123 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr);
7952d769
SC
4124
4125 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
4126 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
4127
4128 vmcs12->guest_rsp = kvm_rsp_read(vcpu);
4129 vmcs12->guest_rip = kvm_rip_read(vcpu);
4130 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
4131
4132 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
4133 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
55d2375e
SC
4134
4135 vmcs12->guest_interruptibility_info =
4136 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
7952d769 4137
55d2375e
SC
4138 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
4139 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
bf0cd88c
YQ
4140 else if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
4141 vmcs12->guest_activity_state = GUEST_ACTIVITY_WAIT_SIPI;
55d2375e
SC
4142 else
4143 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
4144
b4b65b56 4145 if (nested_cpu_has_preemption_timer(vmcs12) &&
850448f3
PS
4146 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER &&
4147 !vmx->nested.nested_run_pending)
4148 vmcs12->vmx_preemption_timer_value =
4149 vmx_get_preemption_timer_value(vcpu);
55d2375e
SC
4150
4151 /*
4152 * In some cases (usually, nested EPT), L2 is allowed to change its
4153 * own CR3 without exiting. If it has changed it, we must keep it.
4154 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
4155 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
4156 *
4157 * Additionally, restore L2's PDPTR to vmcs12.
4158 */
4159 if (enable_ept) {
4160 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
c7554efc
SC
4161 if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
4162 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
4163 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
4164 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
4165 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
4166 }
55d2375e
SC
4167 }
4168
4169 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
4170
4171 if (nested_cpu_has_vid(vmcs12))
4172 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
4173
4174 vmcs12->vm_entry_controls =
4175 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
4176 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
4177
699a1ac2 4178 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS)
55d2375e 4179 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
55d2375e 4180
55d2375e
SC
4181 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
4182 vmcs12->guest_ia32_efer = vcpu->arch.efer;
55d2375e
SC
4183}
4184
4185/*
4186 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
4187 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
4188 * and this function updates it to reflect the changes to the guest state while
4189 * L2 was running (and perhaps made some exits which were handled directly by L0
4190 * without going back to L1), and to reflect the exit reason.
4191 * Note that we do not have to copy here all VMCS fields, just those that
4192 * could have changed by the L2 guest or the exit - i.e., the guest-state and
4193 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
4194 * which already writes to vmcs12 directly.
4195 */
4196static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
4dcefa31 4197 u32 vm_exit_reason, u32 exit_intr_info,
55d2375e
SC
4198 unsigned long exit_qualification)
4199{
55d2375e 4200 /* update exit information fields: */
4dcefa31 4201 vmcs12->vm_exit_reason = vm_exit_reason;
3c0c2ad1
SC
4202 if (to_vmx(vcpu)->exit_reason.enclave_mode)
4203 vmcs12->vm_exit_reason |= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE;
55d2375e
SC
4204 vmcs12->exit_qualification = exit_qualification;
4205 vmcs12->vm_exit_intr_info = exit_intr_info;
4206
4207 vmcs12->idt_vectoring_info_field = 0;
4208 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
4209 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4210
4211 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
4212 vmcs12->launch_state = 1;
4213
4214 /* vm_entry_intr_info_field is cleared on exit. Emulate this
4215 * instead of reading the real value. */
4216 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
4217
4218 /*
4219 * Transfer the event that L0 or L1 may wanted to inject into
4220 * L2 to IDT_VECTORING_INFO_FIELD.
4221 */
4222 vmcs12_save_pending_event(vcpu, vmcs12);
a0d4f803
KS
4223
4224 /*
4225 * According to spec, there's no need to store the guest's
4226 * MSRs if the exit is due to a VM-entry failure that occurs
4227 * during or after loading the guest state. Since this exit
4228 * does not fall in that category, we need to save the MSRs.
4229 */
4230 if (nested_vmx_store_msr(vcpu,
4231 vmcs12->vm_exit_msr_store_addr,
4232 vmcs12->vm_exit_msr_store_count))
4233 nested_vmx_abort(vcpu,
4234 VMX_ABORT_SAVE_GUEST_MSR_FAIL);
55d2375e
SC
4235 }
4236
4237 /*
4238 * Drop what we picked up for L2 via vmx_complete_interrupts. It is
4239 * preserved above and would only end up incorrectly in L1.
4240 */
4241 vcpu->arch.nmi_injected = false;
4242 kvm_clear_exception_queue(vcpu);
4243 kvm_clear_interrupt_queue(vcpu);
4244}
4245
4246/*
4247 * A part of what we need to when the nested L2 guest exits and we want to
4248 * run its L1 parent, is to reset L1's guest state to the host state specified
4249 * in vmcs12.
4250 * This function is to be called not only on normal nested exit, but also on
4251 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
4252 * Failures During or After Loading Guest State").
4253 * This function should be called when the active VMCS is L1's (vmcs01).
4254 */
4255static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
4256 struct vmcs12 *vmcs12)
4257{
68cda40d 4258 enum vm_entry_failure_code ignored;
55d2375e 4259 struct kvm_segment seg;
55d2375e
SC
4260
4261 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
4262 vcpu->arch.efer = vmcs12->host_ia32_efer;
4263 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
4264 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
4265 else
4266 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
4267 vmx_set_efer(vcpu, vcpu->arch.efer);
4268
e9c16c78
PB
4269 kvm_rsp_write(vcpu, vmcs12->host_rsp);
4270 kvm_rip_write(vcpu, vmcs12->host_rip);
55d2375e
SC
4271 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
4272 vmx_set_interrupt_shadow(vcpu, 0);
4273
4274 /*
4275 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
4276 * actually changed, because vmx_set_cr0 refers to efer set above.
4277 *
4278 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
4279 * (KVM doesn't change it);
4280 */
fa71e952 4281 vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
55d2375e
SC
4282 vmx_set_cr0(vcpu, vmcs12->host_cr0);
4283
4284 /* Same as above - no reason to call set_cr4_guest_host_mask(). */
4285 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
4286 vmx_set_cr4(vcpu, vmcs12->host_cr4);
4287
4288 nested_ept_uninit_mmu_context(vcpu);
4289
4290 /*
4291 * Only PDPTE load can fail as the value of cr3 was checked on entry and
4292 * couldn't have changed.
4293 */
0f857223 4294 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, true, &ignored))
55d2375e
SC
4295 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
4296
50b265a4 4297 nested_vmx_transition_tlb_flush(vcpu, vmcs12, false);
55d2375e
SC
4298
4299 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
4300 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
4301 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
4302 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
4303 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
4304 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
4305 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
4306
4307 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
4308 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
4309 vmcs_write64(GUEST_BNDCFGS, 0);
4310
4311 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
4312 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
4313 vcpu->arch.pat = vmcs12->host_ia32_pat;
4314 }
4315 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
d1968421
OU
4316 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
4317 vmcs12->host_ia32_perf_global_ctrl));
55d2375e
SC
4318
4319 /* Set L1 segment info according to Intel SDM
4320 27.5.2 Loading Host Segment and Descriptor-Table Registers */
4321 seg = (struct kvm_segment) {
4322 .base = 0,
4323 .limit = 0xFFFFFFFF,
4324 .selector = vmcs12->host_cs_selector,
4325 .type = 11,
4326 .present = 1,
4327 .s = 1,
4328 .g = 1
4329 };
4330 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
4331 seg.l = 1;
4332 else
4333 seg.db = 1;
816be9e9 4334 __vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
55d2375e
SC
4335 seg = (struct kvm_segment) {
4336 .base = 0,
4337 .limit = 0xFFFFFFFF,
4338 .type = 3,
4339 .present = 1,
4340 .s = 1,
4341 .db = 1,
4342 .g = 1
4343 };
4344 seg.selector = vmcs12->host_ds_selector;
816be9e9 4345 __vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
55d2375e 4346 seg.selector = vmcs12->host_es_selector;
816be9e9 4347 __vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
55d2375e 4348 seg.selector = vmcs12->host_ss_selector;
816be9e9 4349 __vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
55d2375e
SC
4350 seg.selector = vmcs12->host_fs_selector;
4351 seg.base = vmcs12->host_fs_base;
816be9e9 4352 __vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
55d2375e
SC
4353 seg.selector = vmcs12->host_gs_selector;
4354 seg.base = vmcs12->host_gs_base;
816be9e9 4355 __vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
55d2375e
SC
4356 seg = (struct kvm_segment) {
4357 .base = vmcs12->host_tr_base,
4358 .limit = 0x67,
4359 .selector = vmcs12->host_tr_selector,
4360 .type = 11,
4361 .present = 1
4362 };
816be9e9 4363 __vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
55d2375e 4364
afc8de01
SC
4365 memset(&seg, 0, sizeof(seg));
4366 seg.unusable = 1;
816be9e9 4367 __vmx_set_segment(vcpu, &seg, VCPU_SREG_LDTR);
55d2375e
SC
4368
4369 kvm_set_dr(vcpu, 7, 0x400);
4370 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
4371
55d2375e
SC
4372 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
4373 vmcs12->vm_exit_msr_load_count))
4374 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
dbab610a
ML
4375
4376 to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
55d2375e
SC
4377}
4378
4379static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
4380{
eb3db1b1 4381 struct vmx_uret_msr *efer_msr;
55d2375e
SC
4382 unsigned int i;
4383
4384 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
4385 return vmcs_read64(GUEST_IA32_EFER);
4386
4387 if (cpu_has_load_ia32_efer())
4388 return host_efer;
4389
4390 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
4391 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
4392 return vmx->msr_autoload.guest.val[i].value;
4393 }
4394
d85a8034 4395 efer_msr = vmx_find_uret_msr(vmx, MSR_EFER);
55d2375e
SC
4396 if (efer_msr)
4397 return efer_msr->data;
4398
4399 return host_efer;
4400}
4401
4402static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
4403{
4404 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4405 struct vcpu_vmx *vmx = to_vmx(vcpu);
4406 struct vmx_msr_entry g, h;
55d2375e
SC
4407 gpa_t gpa;
4408 u32 i, j;
4409
4410 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
4411
4412 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
4413 /*
4414 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
4415 * as vmcs01.GUEST_DR7 contains a userspace defined value
4416 * and vcpu->arch.dr7 is not squirreled away before the
4417 * nested VMENTER (not worth adding a variable in nested_vmx).
4418 */
4419 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
4420 kvm_set_dr(vcpu, 7, DR7_FIXED_1);
4421 else
4422 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
4423 }
4424
4425 /*
4426 * Note that calling vmx_set_{efer,cr0,cr4} is important as they
4427 * handle a variety of side effects to KVM's software model.
4428 */
4429 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
4430
fa71e952 4431 vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
55d2375e
SC
4432 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
4433
4434 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
4435 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
4436
4437 nested_ept_uninit_mmu_context(vcpu);
f087a029 4438 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
cb3c1e2f 4439 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
55d2375e
SC
4440
4441 /*
4442 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
4443 * from vmcs01 (if necessary). The PDPTRs are not loaded on
4444 * VMFail, like everything else we just need to ensure our
4445 * software model is up-to-date.
4446 */
9932b49e 4447 if (enable_ept && is_pae_paging(vcpu))
f087a029 4448 ept_save_pdptrs(vcpu);
55d2375e
SC
4449
4450 kvm_mmu_reset_context(vcpu);
4451
55d2375e
SC
4452 /*
4453 * This nasty bit of open coding is a compromise between blindly
4454 * loading L1's MSRs using the exit load lists (incorrect emulation
4455 * of VMFail), leaving the nested VM's MSRs in the software model
4456 * (incorrect behavior) and snapshotting the modified MSRs (too
4457 * expensive since the lists are unbound by hardware). For each
4458 * MSR that was (prematurely) loaded from the nested VMEntry load
4459 * list, reload it from the exit load list if it exists and differs
4460 * from the guest value. The intent is to stuff host state as
4461 * silently as possible, not to fully process the exit load list.
4462 */
55d2375e
SC
4463 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
4464 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
4465 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
4466 pr_debug_ratelimited(
4467 "%s read MSR index failed (%u, 0x%08llx)\n",
4468 __func__, i, gpa);
4469 goto vmabort;
4470 }
4471
4472 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
4473 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
4474 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
4475 pr_debug_ratelimited(
4476 "%s read MSR failed (%u, 0x%08llx)\n",
4477 __func__, j, gpa);
4478 goto vmabort;
4479 }
4480 if (h.index != g.index)
4481 continue;
4482 if (h.value == g.value)
4483 break;
4484
4485 if (nested_vmx_load_msr_check(vcpu, &h)) {
4486 pr_debug_ratelimited(
4487 "%s check failed (%u, 0x%x, 0x%x)\n",
4488 __func__, j, h.index, h.reserved);
4489 goto vmabort;
4490 }
4491
f20935d8 4492 if (kvm_set_msr(vcpu, h.index, h.value)) {
55d2375e
SC
4493 pr_debug_ratelimited(
4494 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
4495 __func__, j, h.index, h.value);
4496 goto vmabort;
4497 }
4498 }
4499 }
4500
4501 return;
4502
4503vmabort:
4504 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
4505}
4506
4507/*
4508 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
4509 * and modify vmcs12 to make it see what it would expect to see there if
4510 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
4511 */
4dcefa31 4512void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
55d2375e
SC
4513 u32 exit_intr_info, unsigned long exit_qualification)
4514{
4515 struct vcpu_vmx *vmx = to_vmx(vcpu);
4516 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4517
4518 /* trying to cancel vmlaunch/vmresume is a bug */
4519 WARN_ON_ONCE(vmx->nested.nested_run_pending);
4520
cb6a32c2
SC
4521 /* Similarly, triple faults in L2 should never escape. */
4522 WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));
4523
f5c7e842
VK
4524 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
4525 /*
4526 * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
4527 * Enlightened VMCS after migration and we still need to
4528 * do that when something is forcing L2->L1 exit prior to
4529 * the first L2 run.
4530 */
4531 (void)nested_get_evmcs_page(vcpu);
4532 }
f2c7ef3b 4533
40e5f908
SC
4534 /* Service pending TLB flush requests for L2 before switching to L1. */
4535 kvm_service_local_tlb_flush_requests(vcpu);
eeeb4f67 4536
43fea4e4
PS
4537 /*
4538 * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
4539 * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are
4540 * up-to-date before switching to L1.
4541 */
4542 if (enable_ept && is_pae_paging(vcpu))
4543 vmx_ept_load_pdptrs(vcpu);
4544
55d2375e
SC
4545 leave_guest_mode(vcpu);
4546
b4b65b56
PB
4547 if (nested_cpu_has_preemption_timer(vmcs12))
4548 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
4549
d041b5ea
IS
4550 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING)) {
4551 vcpu->arch.tsc_offset = vcpu->arch.l1_tsc_offset;
4552 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
4553 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
4554 }
55d2375e
SC
4555
4556 if (likely(!vmx->fail)) {
3731905e 4557 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
f4f8316d 4558
4dcefa31
SC
4559 if (vm_exit_reason != -1)
4560 prepare_vmcs12(vcpu, vmcs12, vm_exit_reason,
4561 exit_intr_info, exit_qualification);
55d2375e
SC
4562
4563 /*
3731905e 4564 * Must happen outside of sync_vmcs02_to_vmcs12() as it will
55d2375e
SC
4565 * also be used to capture vmcs12 cache as part of
4566 * capturing nVMX state for snapshot (migration).
4567 *
4568 * Otherwise, this flush will dirty guest memory at a
4569 * point it is already assumed by user-space to be
4570 * immutable.
4571 */
4572 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
55d2375e
SC
4573 } else {
4574 /*
4575 * The only expected VM-instruction error is "VM entry with
4576 * invalid control field(s)." Anything else indicates a
4577 * problem with L0. And we should never get here with a
4578 * VMFail of any type if early consistency checks are enabled.
4579 */
4580 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
4581 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4582 WARN_ON_ONCE(nested_early_check);
4583 }
4584
4585 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
4586
4587 /* Update any VMCS fields that might have changed while L2 ran */
4588 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
4589 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
4590 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
1ab9287a
IS
4591 if (kvm_has_tsc_control)
4592 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
4593
02d496cf
LA
4594 if (vmx->nested.l1_tpr_threshold != -1)
4595 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold);
55d2375e 4596
55d2375e
SC
4597 if (vmx->nested.change_vmcs01_virtual_apic_mode) {
4598 vmx->nested.change_vmcs01_virtual_apic_mode = false;
4599 vmx_set_virtual_apic_mode(vcpu);
55d2375e
SC
4600 }
4601
a85863c2
MS
4602 if (vmx->nested.update_vmcs01_cpu_dirty_logging) {
4603 vmx->nested.update_vmcs01_cpu_dirty_logging = false;
4604 vmx_update_cpu_dirty_logging(vcpu);
4605 }
4606
55d2375e
SC
4607 /* Unpin physical memory we referred to in vmcs02 */
4608 if (vmx->nested.apic_access_page) {
b11494bc 4609 kvm_release_page_clean(vmx->nested.apic_access_page);
55d2375e
SC
4610 vmx->nested.apic_access_page = NULL;
4611 }
96c66e87 4612 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
3278e049
KA
4613 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
4614 vmx->nested.pi_desc = NULL;
55d2375e 4615
1196cb97
SC
4616 if (vmx->nested.reload_vmcs01_apic_access_page) {
4617 vmx->nested.reload_vmcs01_apic_access_page = false;
4618 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4619 }
55d2375e 4620
7c69661e
SC
4621 if (vmx->nested.update_vmcs01_apicv_status) {
4622 vmx->nested.update_vmcs01_apicv_status = false;
4623 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
4624 }
4625
4dcefa31 4626 if ((vm_exit_reason != -1) &&
1e9dfbd7 4627 (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)))
3731905e 4628 vmx->nested.need_vmcs12_to_shadow_sync = true;
55d2375e
SC
4629
4630 /* in case we halted in L2 */
4631 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4632
4633 if (likely(!vmx->fail)) {
4dcefa31 4634 if ((u16)vm_exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
a1c77abb 4635 nested_exit_intr_ack_set(vcpu)) {
55d2375e
SC
4636 int irq = kvm_cpu_get_interrupt(vcpu);
4637 WARN_ON(irq < 0);
4638 vmcs12->vm_exit_intr_info = irq |
4639 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
4640 }
4641
4dcefa31 4642 if (vm_exit_reason != -1)
55d2375e
SC
4643 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
4644 vmcs12->exit_qualification,
4645 vmcs12->idt_vectoring_info_field,
4646 vmcs12->vm_exit_intr_info,
4647 vmcs12->vm_exit_intr_error_code,
4648 KVM_ISA_VMX);
4649
4650 load_vmcs12_host_state(vcpu, vmcs12);
4651
4652 return;
4653 }
4654
4655 /*
4656 * After an early L2 VM-entry failure, we're now back
4657 * in L1 which thinks it just finished a VMLAUNCH or
4658 * VMRESUME instruction, so we need to set the failure
4659 * flag and the VM-instruction error field of the VMCS
4660 * accordingly, and skip the emulated instruction.
4661 */
b2656e4d 4662 (void)nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
55d2375e
SC
4663
4664 /*
4665 * Restore L1's host state to KVM's software model. We're here
4666 * because a consistency check was caught by hardware, which
4667 * means some amount of guest state has been propagated to KVM's
4668 * model and needs to be unwound to the host's state.
4669 */
4670 nested_vmx_restore_host_state(vcpu);
4671
4672 vmx->fail = 0;
4673}
4674
cb6a32c2
SC
4675static void nested_vmx_triple_fault(struct kvm_vcpu *vcpu)
4676{
4677 nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
4678}
4679
55d2375e
SC
4680/*
4681 * Decode the memory-address operand of a vmx instruction, as recorded on an
4682 * exit caused by such an instruction (run by a guest hypervisor).
4683 * On success, returns 0. When the operand is invalid, returns 1 and throws
49f933d4 4684 * #UD, #GP, or #SS.
55d2375e
SC
4685 */
4686int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
fdb28619 4687 u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
55d2375e
SC
4688{
4689 gva_t off;
4690 bool exn;
4691 struct kvm_segment s;
4692
4693 /*
4694 * According to Vol. 3B, "Information for VM Exits Due to Instruction
4695 * Execution", on an exit, vmx_instruction_info holds most of the
4696 * addressing components of the operand. Only the displacement part
4697 * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
4698 * For how an actual address is calculated from all these components,
4699 * refer to Vol. 1, "Operand Addressing".
4700 */
4701 int scaling = vmx_instruction_info & 3;
4702 int addr_size = (vmx_instruction_info >> 7) & 7;
4703 bool is_reg = vmx_instruction_info & (1u << 10);
4704 int seg_reg = (vmx_instruction_info >> 15) & 7;
4705 int index_reg = (vmx_instruction_info >> 18) & 0xf;
4706 bool index_is_valid = !(vmx_instruction_info & (1u << 22));
4707 int base_reg = (vmx_instruction_info >> 23) & 0xf;
4708 bool base_is_valid = !(vmx_instruction_info & (1u << 27));
4709
4710 if (is_reg) {
4711 kvm_queue_exception(vcpu, UD_VECTOR);
4712 return 1;
4713 }
4714
4715 /* Addr = segment_base + offset */
4716 /* offset = base + [index * scale] + displacement */
4717 off = exit_qualification; /* holds the displacement */
946c522b
SC
4718 if (addr_size == 1)
4719 off = (gva_t)sign_extend64(off, 31);
4720 else if (addr_size == 0)
4721 off = (gva_t)sign_extend64(off, 15);
55d2375e
SC
4722 if (base_is_valid)
4723 off += kvm_register_read(vcpu, base_reg);
4724 if (index_is_valid)
e6302698 4725 off += kvm_register_read(vcpu, index_reg) << scaling;
55d2375e 4726 vmx_get_segment(vcpu, &s, seg_reg);
55d2375e 4727
8570f9e8
SC
4728 /*
4729 * The effective address, i.e. @off, of a memory operand is truncated
4730 * based on the address size of the instruction. Note that this is
4731 * the *effective address*, i.e. the address prior to accounting for
4732 * the segment's base.
4733 */
55d2375e 4734 if (addr_size == 1) /* 32 bit */
8570f9e8
SC
4735 off &= 0xffffffff;
4736 else if (addr_size == 0) /* 16 bit */
4737 off &= 0xffff;
55d2375e
SC
4738
4739 /* Checks for #GP/#SS exceptions. */
4740 exn = false;
4741 if (is_long_mode(vcpu)) {
8570f9e8
SC
4742 /*
4743 * The virtual/linear address is never truncated in 64-bit
4744 * mode, e.g. a 32-bit address size can yield a 64-bit virtual
4745 * address when using FS/GS with a non-zero base.
4746 */
6694e480
LA
4747 if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS)
4748 *ret = s.base + off;
4749 else
4750 *ret = off;
8570f9e8 4751
55d2375e
SC
4752 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
4753 * non-canonical form. This is the only check on the memory
4754 * destination for long mode!
4755 */
4756 exn = is_noncanonical_address(*ret, vcpu);
e0dfacbf 4757 } else {
8570f9e8
SC
4758 /*
4759 * When not in long mode, the virtual/linear address is
4760 * unconditionally truncated to 32 bits regardless of the
4761 * address size.
4762 */
4763 *ret = (s.base + off) & 0xffffffff;
4764
55d2375e
SC
4765 /* Protected mode: apply checks for segment validity in the
4766 * following order:
4767 * - segment type check (#GP(0) may be thrown)
4768 * - usability check (#GP(0)/#SS(0))
4769 * - limit check (#GP(0)/#SS(0))
4770 */
4771 if (wr)
4772 /* #GP(0) if the destination operand is located in a
4773 * read-only data segment or any code segment.
4774 */
4775 exn = ((s.type & 0xa) == 0 || (s.type & 8));
4776 else
4777 /* #GP(0) if the source operand is located in an
4778 * execute-only code segment
4779 */
4780 exn = ((s.type & 0xa) == 8);
4781 if (exn) {
4782 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4783 return 1;
4784 }
4785 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
4786 */
4787 exn = (s.unusable != 0);
34333cc6
SC
4788
4789 /*
4790 * Protected mode: #GP(0)/#SS(0) if the memory operand is
4791 * outside the segment limit. All CPUs that support VMX ignore
4792 * limit checks for flat segments, i.e. segments with base==0,
4793 * limit==0xffffffff and of type expand-up data or code.
55d2375e 4794 */
34333cc6
SC
4795 if (!(s.base == 0 && s.limit == 0xffffffff &&
4796 ((s.type & 8) || !(s.type & 4))))
fdb28619 4797 exn = exn || ((u64)off + len - 1 > s.limit);
55d2375e
SC
4798 }
4799 if (exn) {
4800 kvm_queue_exception_e(vcpu,
4801 seg_reg == VCPU_SREG_SS ?
4802 SS_VECTOR : GP_VECTOR,
4803 0);
4804 return 1;
4805 }
4806
4807 return 0;
4808}
4809
0bcd556e
SC
4810void nested_vmx_pmu_refresh(struct kvm_vcpu *vcpu,
4811 bool vcpu_has_perf_global_ctrl)
03a8871a
OU
4812{
4813 struct vcpu_vmx *vmx;
4814
4815 if (!nested_vmx_allowed(vcpu))
4816 return;
4817
4818 vmx = to_vmx(vcpu);
0bcd556e 4819 if (vcpu_has_perf_global_ctrl) {
03a8871a
OU
4820 vmx->nested.msrs.entry_ctls_high |=
4821 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
4822 vmx->nested.msrs.exit_ctls_high |=
4823 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
4824 } else {
4825 vmx->nested.msrs.entry_ctls_high &=
4826 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
4827 vmx->nested.msrs.exit_ctls_high &=
c6b177a3 4828 ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
03a8871a
OU
4829 }
4830}
4831
7a35e515
VK
4832static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer,
4833 int *ret)
55d2375e
SC
4834{
4835 gva_t gva;
4836 struct x86_exception e;
7a35e515 4837 int r;
55d2375e 4838
5addc235 4839 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
fdb28619 4840 vmcs_read32(VMX_INSTRUCTION_INFO), false,
7a35e515
VK
4841 sizeof(*vmpointer), &gva)) {
4842 *ret = 1;
4843 return -EINVAL;
4844 }
55d2375e 4845
7a35e515
VK
4846 r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e);
4847 if (r != X86EMUL_CONTINUE) {
3f3393b3 4848 *ret = kvm_handle_memory_failure(vcpu, r, &e);
7a35e515 4849 return -EINVAL;
55d2375e
SC
4850 }
4851
4852 return 0;
4853}
4854
4855/*
4856 * Allocate a shadow VMCS and associate it with the currently loaded
4857 * VMCS, unless such a shadow VMCS already exists. The newly allocated
4858 * VMCS is also VMCLEARed, so that it is ready for use.
4859 */
4860static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
4861{
4862 struct vcpu_vmx *vmx = to_vmx(vcpu);
4863 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
4864
4865 /*
d6e656cd
SC
4866 * KVM allocates a shadow VMCS only when L1 executes VMXON and frees it
4867 * when L1 executes VMXOFF or the vCPU is forced out of nested
4868 * operation. VMXON faults if the CPU is already post-VMXON, so it
4869 * should be impossible to already have an allocated shadow VMCS. KVM
4870 * doesn't support virtualization of VMCS shadowing, so vmcs01 should
4871 * always be the loaded VMCS.
55d2375e 4872 */
d6e656cd
SC
4873 if (WARN_ON(loaded_vmcs != &vmx->vmcs01 || loaded_vmcs->shadow_vmcs))
4874 return loaded_vmcs->shadow_vmcs;
4875
4876 loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
4877 if (loaded_vmcs->shadow_vmcs)
4878 vmcs_clear(loaded_vmcs->shadow_vmcs);
55d2375e 4879
55d2375e
SC
4880 return loaded_vmcs->shadow_vmcs;
4881}
4882
4883static int enter_vmx_operation(struct kvm_vcpu *vcpu)
4884{
4885 struct vcpu_vmx *vmx = to_vmx(vcpu);
4886 int r;
4887
4888 r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
4889 if (r < 0)
4890 goto out_vmcs02;
4891
41836839 4892 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
55d2375e
SC
4893 if (!vmx->nested.cached_vmcs12)
4894 goto out_cached_vmcs12;
4895
8503fea6 4896 vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA;
41836839 4897 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
55d2375e
SC
4898 if (!vmx->nested.cached_shadow_vmcs12)
4899 goto out_cached_shadow_vmcs12;
4900
4901 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
4902 goto out_shadow_vmcs;
4903
4904 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
ada0098d 4905 HRTIMER_MODE_ABS_PINNED);
55d2375e
SC
4906 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
4907
4908 vmx->nested.vpid02 = allocate_vpid();
4909
4910 vmx->nested.vmcs02_initialized = false;
4911 vmx->nested.vmxon = true;
ee85dec2 4912
2ef7619d 4913 if (vmx_pt_mode_is_host_guest()) {
ee85dec2 4914 vmx->pt_desc.guest.ctl = 0;
476c9bd8 4915 pt_update_intercept_for_msr(vcpu);
ee85dec2
LK
4916 }
4917
55d2375e
SC
4918 return 0;
4919
4920out_shadow_vmcs:
4921 kfree(vmx->nested.cached_shadow_vmcs12);
4922
4923out_cached_shadow_vmcs12:
4924 kfree(vmx->nested.cached_vmcs12);
4925
4926out_cached_vmcs12:
4927 free_loaded_vmcs(&vmx->nested.vmcs02);
4928
4929out_vmcs02:
4930 return -ENOMEM;
4931}
4932
ed7023a1 4933/* Emulate the VMXON instruction. */
55d2375e
SC
4934static int handle_vmon(struct kvm_vcpu *vcpu)
4935{
4936 int ret;
4937 gpa_t vmptr;
2e408936 4938 uint32_t revision;
55d2375e 4939 struct vcpu_vmx *vmx = to_vmx(vcpu);
32ad73db
SC
4940 const u64 VMXON_NEEDED_FEATURES = FEAT_CTL_LOCKED
4941 | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
55d2375e
SC
4942
4943 /*
4944 * The Intel VMX Instruction Reference lists a bunch of bits that are
4945 * prerequisite to running VMXON, most notably cr4.VMXE must be set to
c2fe3cd4 4946 * 1 (see vmx_is_valid_cr4() for when we allow the guest to set this).
55d2375e
SC
4947 * Otherwise, we should fail with #UD. But most faulting conditions
4948 * have already been checked by hardware, prior to the VM-exit for
4949 * VMXON. We do test guest cr4.VMXE because processor CR4 always has
4950 * that bit set to 1 in non-root mode.
4951 */
4952 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
4953 kvm_queue_exception(vcpu, UD_VECTOR);
4954 return 1;
4955 }
4956
4957 /* CPL=0 must be checked manually. */
4958 if (vmx_get_cpl(vcpu)) {
4959 kvm_inject_gp(vcpu, 0);
4960 return 1;
4961 }
4962
4963 if (vmx->nested.vmxon)
b2656e4d 4964 return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
55d2375e
SC
4965
4966 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
4967 != VMXON_NEEDED_FEATURES) {
4968 kvm_inject_gp(vcpu, 0);
4969 return 1;
4970 }
4971
7a35e515
VK
4972 if (nested_vmx_get_vmptr(vcpu, &vmptr, &ret))
4973 return ret;
55d2375e
SC
4974
4975 /*
4976 * SDM 3: 24.11.5
4977 * The first 4 bytes of VMXON region contain the supported
4978 * VMCS revision identifier
4979 *
4980 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
4981 * which replaces physical address width with 32
4982 */
e0bf2665 4983 if (!page_address_valid(vcpu, vmptr))
55d2375e
SC
4984 return nested_vmx_failInvalid(vcpu);
4985
2e408936
KA
4986 if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) ||
4987 revision != VMCS12_REVISION)
55d2375e 4988 return nested_vmx_failInvalid(vcpu);
55d2375e
SC
4989
4990 vmx->nested.vmxon_ptr = vmptr;
4991 ret = enter_vmx_operation(vcpu);
4992 if (ret)
4993 return ret;
4994
4995 return nested_vmx_succeed(vcpu);
4996}
4997
4998static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
4999{
5000 struct vcpu_vmx *vmx = to_vmx(vcpu);
5001
64c78508 5002 if (vmx->nested.current_vmptr == INVALID_GPA)
55d2375e
SC
5003 return;
5004
7952d769
SC
5005 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
5006
55d2375e
SC
5007 if (enable_shadow_vmcs) {
5008 /* copy to memory all shadowed fields in case
5009 they were modified */
5010 copy_shadow_to_vmcs12(vmx);
55d2375e
SC
5011 vmx_disable_shadow_vmcs(vmx);
5012 }
5013 vmx->nested.posted_intr_nv = -1;
5014
5015 /* Flush VMCS12 to guest memory */
5016 kvm_vcpu_write_guest_page(vcpu,
5017 vmx->nested.current_vmptr >> PAGE_SHIFT,
5018 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
5019
0c1c92f1 5020 kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
55d2375e 5021
64c78508 5022 vmx->nested.current_vmptr = INVALID_GPA;
55d2375e
SC
5023}
5024
5025/* Emulate the VMXOFF instruction */
5026static int handle_vmoff(struct kvm_vcpu *vcpu)
5027{
5028 if (!nested_vmx_check_permission(vcpu))
5029 return 1;
4b9852f4 5030
55d2375e 5031 free_nested(vcpu);
4b9852f4
LA
5032
5033 /* Process a latched INIT during time CPU was in VMX operation */
5034 kvm_make_request(KVM_REQ_EVENT, vcpu);
5035
55d2375e
SC
5036 return nested_vmx_succeed(vcpu);
5037}
5038
5039/* Emulate the VMCLEAR instruction */
5040static int handle_vmclear(struct kvm_vcpu *vcpu)
5041{
5042 struct vcpu_vmx *vmx = to_vmx(vcpu);
5043 u32 zero = 0;
5044 gpa_t vmptr;
11e34914 5045 u64 evmcs_gpa;
7a35e515 5046 int r;
55d2375e
SC
5047
5048 if (!nested_vmx_check_permission(vcpu))
5049 return 1;
5050
7a35e515
VK
5051 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
5052 return r;
55d2375e 5053
e0bf2665 5054 if (!page_address_valid(vcpu, vmptr))
b2656e4d 5055 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
55d2375e
SC
5056
5057 if (vmptr == vmx->nested.vmxon_ptr)
b2656e4d 5058 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
55d2375e 5059
11e34914
VK
5060 /*
5061 * When Enlightened VMEntry is enabled on the calling CPU we treat
5062 * memory area pointer by vmptr as Enlightened VMCS (as there's no good
5063 * way to distinguish it from VMCS12) and we must not corrupt it by
5064 * writing to the non-existent 'launch_state' field. The area doesn't
5065 * have to be the currently active EVMCS on the calling CPU and there's
5066 * nothing KVM has to do to transition it from 'active' to 'non-active'
5067 * state. It is possible that the area will stay mapped as
5068 * vmx->nested.hv_evmcs but this shouldn't be a problem.
5069 */
5070 if (likely(!vmx->nested.enlightened_vmcs_enabled ||
5071 !nested_enlightened_vmentry(vcpu, &evmcs_gpa))) {
55d2375e
SC
5072 if (vmptr == vmx->nested.current_vmptr)
5073 nested_release_vmcs12(vcpu);
5074
5075 kvm_vcpu_write_guest(vcpu,
5076 vmptr + offsetof(struct vmcs12,
5077 launch_state),
5078 &zero, sizeof(zero));
3b19b81a
VK
5079 } else if (vmx->nested.hv_evmcs && vmptr == vmx->nested.hv_evmcs_vmptr) {
5080 nested_release_evmcs(vcpu);
55d2375e
SC
5081 }
5082
5083 return nested_vmx_succeed(vcpu);
5084}
5085
55d2375e
SC
5086/* Emulate the VMLAUNCH instruction */
5087static int handle_vmlaunch(struct kvm_vcpu *vcpu)
5088{
5089 return nested_vmx_run(vcpu, true);
5090}
5091
5092/* Emulate the VMRESUME instruction */
5093static int handle_vmresume(struct kvm_vcpu *vcpu)
5094{
5095
5096 return nested_vmx_run(vcpu, false);
5097}
5098
5099static int handle_vmread(struct kvm_vcpu *vcpu)
5100{
dd2d6042
JM
5101 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
5102 : get_vmcs12(vcpu);
5addc235 5103 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
c90f4d03
JM
5104 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5105 struct vcpu_vmx *vmx = to_vmx(vcpu);
f7eea636 5106 struct x86_exception e;
c90f4d03
JM
5107 unsigned long field;
5108 u64 value;
5109 gva_t gva = 0;
1c6f0b47 5110 short offset;
7a35e515 5111 int len, r;
55d2375e
SC
5112
5113 if (!nested_vmx_check_permission(vcpu))
5114 return 1;
5115
55d2375e 5116 /* Decode instruction info and find the field to read */
27b4a9c4 5117 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
1c6f0b47 5118
6cbbaab6
VK
5119 if (!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
5120 /*
5121 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
5122 * any VMREAD sets the ALU flags for VMfailInvalid.
5123 */
5124 if (vmx->nested.current_vmptr == INVALID_GPA ||
5125 (is_guest_mode(vcpu) &&
5126 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
5127 return nested_vmx_failInvalid(vcpu);
5128
5129 offset = get_vmcs12_field_offset(field);
5130 if (offset < 0)
5131 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
55d2375e 5132
6cbbaab6
VK
5133 if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
5134 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
7952d769 5135
6cbbaab6
VK
5136 /* Read the field, zero-extended to a u64 value */
5137 value = vmcs12_read_any(vmcs12, field, offset);
5138 } else {
5139 /*
5140 * Hyper-V TLFS (as of 6.0b) explicitly states, that while an
5141 * enlightened VMCS is active VMREAD/VMWRITE instructions are
5142 * unsupported. Unfortunately, certain versions of Windows 11
5143 * don't comply with this requirement which is not enforced in
5144 * genuine Hyper-V. Allow VMREAD from an enlightened VMCS as a
5145 * workaround, as misbehaving guests will panic on VM-Fail.
5146 * Note, enlightened VMCS is incompatible with shadow VMCS so
5147 * all VMREADs from L2 should go to L1.
5148 */
5149 if (WARN_ON_ONCE(is_guest_mode(vcpu)))
5150 return nested_vmx_failInvalid(vcpu);
5151
5152 offset = evmcs_field_offset(field, NULL);
5153 if (offset < 0)
5154 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5155
5156 /* Read the field, zero-extended to a u64 value */
5157 value = evmcs_read_any(vmx->nested.hv_evmcs, field, offset);
5158 }
1c6f0b47 5159
55d2375e
SC
5160 /*
5161 * Now copy part of this value to register or memory, as requested.
5162 * Note that the number of bits actually copied is 32 or 64 depending
5163 * on the guest's mode (32 or 64 bit), not on the given field's length.
5164 */
c90f4d03 5165 if (instr_info & BIT(10)) {
27b4a9c4 5166 kvm_register_write(vcpu, (((instr_info) >> 3) & 0xf), value);
55d2375e 5167 } else {
fdb28619 5168 len = is_64_bit_mode(vcpu) ? 8 : 4;
55d2375e 5169 if (get_vmx_mem_address(vcpu, exit_qualification,
c90f4d03 5170 instr_info, true, len, &gva))
55d2375e
SC
5171 return 1;
5172 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
7a35e515
VK
5173 r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e);
5174 if (r != X86EMUL_CONTINUE)
3f3393b3 5175 return kvm_handle_memory_failure(vcpu, r, &e);
55d2375e
SC
5176 }
5177
5178 return nested_vmx_succeed(vcpu);
5179}
5180
e2174295
SC
5181static bool is_shadow_field_rw(unsigned long field)
5182{
5183 switch (field) {
5184#define SHADOW_FIELD_RW(x, y) case x:
5185#include "vmcs_shadow_fields.h"
5186 return true;
5187 default:
5188 break;
5189 }
5190 return false;
5191}
5192
5193static bool is_shadow_field_ro(unsigned long field)
5194{
5195 switch (field) {
5196#define SHADOW_FIELD_RO(x, y) case x:
5197#include "vmcs_shadow_fields.h"
5198 return true;
5199 default:
5200 break;
5201 }
5202 return false;
5203}
55d2375e
SC
5204
5205static int handle_vmwrite(struct kvm_vcpu *vcpu)
5206{
c90f4d03
JM
5207 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
5208 : get_vmcs12(vcpu);
5addc235 5209 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
c90f4d03
JM
5210 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5211 struct vcpu_vmx *vmx = to_vmx(vcpu);
5212 struct x86_exception e;
55d2375e 5213 unsigned long field;
c90f4d03 5214 short offset;
55d2375e 5215 gva_t gva;
7a35e515 5216 int len, r;
55d2375e 5217
c90f4d03
JM
5218 /*
5219 * The value to write might be 32 or 64 bits, depending on L1's long
55d2375e
SC
5220 * mode, and eventually we need to write that into a field of several
5221 * possible lengths. The code below first zero-extends the value to 64
c90f4d03 5222 * bit (value), and then copies only the appropriate number of
55d2375e
SC
5223 * bits into the vmcs12 field.
5224 */
c90f4d03 5225 u64 value = 0;
55d2375e
SC
5226
5227 if (!nested_vmx_check_permission(vcpu))
5228 return 1;
5229
dd2d6042 5230 /*
64c78508 5231 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
dd2d6042
JM
5232 * any VMWRITE sets the ALU flags for VMfailInvalid.
5233 */
64c78508 5234 if (vmx->nested.current_vmptr == INVALID_GPA ||
dd2d6042 5235 (is_guest_mode(vcpu) &&
64c78508 5236 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
55d2375e
SC
5237 return nested_vmx_failInvalid(vcpu);
5238
c90f4d03 5239 if (instr_info & BIT(10))
27b4a9c4 5240 value = kvm_register_read(vcpu, (((instr_info) >> 3) & 0xf));
55d2375e 5241 else {
fdb28619 5242 len = is_64_bit_mode(vcpu) ? 8 : 4;
55d2375e 5243 if (get_vmx_mem_address(vcpu, exit_qualification,
c90f4d03 5244 instr_info, false, len, &gva))
55d2375e 5245 return 1;
7a35e515
VK
5246 r = kvm_read_guest_virt(vcpu, gva, &value, len, &e);
5247 if (r != X86EMUL_CONTINUE)
3f3393b3 5248 return kvm_handle_memory_failure(vcpu, r, &e);
55d2375e
SC
5249 }
5250
27b4a9c4 5251 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
693e02cc 5252
2423a4c0 5253 offset = get_vmcs12_field_offset(field);
693e02cc 5254 if (offset < 0)
b2656e4d 5255 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
55d2375e 5256
55d2375e
SC
5257 /*
5258 * If the vCPU supports "VMWRITE to any supported field in the
5259 * VMCS," then the "read-only" fields are actually read/write.
5260 */
5261 if (vmcs_field_readonly(field) &&
5262 !nested_cpu_has_vmwrite_any_field(vcpu))
b2656e4d 5263 return nested_vmx_fail(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
55d2375e 5264
dd2d6042
JM
5265 /*
5266 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
5267 * vmcs12, else we may crush a field or consume a stale value.
5268 */
5269 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field))
5270 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
55d2375e
SC
5271
5272 /*
b6437805
SC
5273 * Some Intel CPUs intentionally drop the reserved bits of the AR byte
5274 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM
5275 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE
5276 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD
5277 * from L1 will return a different value than VMREAD from L2 (L1 sees
5278 * the stripped down value, L2 sees the full value as stored by KVM).
55d2375e 5279 */
b6437805 5280 if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES)
c90f4d03 5281 value &= 0x1f0ff;
b6437805 5282
c90f4d03 5283 vmcs12_write_any(vmcs12, field, offset, value);
55d2375e
SC
5284
5285 /*
e2174295
SC
5286 * Do not track vmcs12 dirty-state if in guest-mode as we actually
5287 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated
5288 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't
5289 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path.
55d2375e 5290 */
e2174295
SC
5291 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) {
5292 /*
5293 * L1 can read these fields without exiting, ensure the
5294 * shadow VMCS is up-to-date.
5295 */
5296 if (enable_shadow_vmcs && is_shadow_field_ro(field)) {
5297 preempt_disable();
5298 vmcs_load(vmx->vmcs01.shadow_vmcs);
fadcead0 5299
c90f4d03 5300 __vmcs_writel(field, value);
fadcead0 5301
e2174295
SC
5302 vmcs_clear(vmx->vmcs01.shadow_vmcs);
5303 vmcs_load(vmx->loaded_vmcs->vmcs);
5304 preempt_enable();
55d2375e 5305 }
e2174295 5306 vmx->nested.dirty_vmcs12 = true;
55d2375e
SC
5307 }
5308
5309 return nested_vmx_succeed(vcpu);
5310}
5311
5312static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
5313{
5314 vmx->nested.current_vmptr = vmptr;
5315 if (enable_shadow_vmcs) {
fe7f895d 5316 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
55d2375e
SC
5317 vmcs_write64(VMCS_LINK_POINTER,
5318 __pa(vmx->vmcs01.shadow_vmcs));
3731905e 5319 vmx->nested.need_vmcs12_to_shadow_sync = true;
55d2375e
SC
5320 }
5321 vmx->nested.dirty_vmcs12 = true;
ed2a4800 5322 vmx->nested.force_msr_bitmap_recalc = true;
55d2375e
SC
5323}
5324
5325/* Emulate the VMPTRLD instruction */
5326static int handle_vmptrld(struct kvm_vcpu *vcpu)
5327{
5328 struct vcpu_vmx *vmx = to_vmx(vcpu);
5329 gpa_t vmptr;
7a35e515 5330 int r;
55d2375e
SC
5331
5332 if (!nested_vmx_check_permission(vcpu))
5333 return 1;
5334
7a35e515
VK
5335 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
5336 return r;
55d2375e 5337
e0bf2665 5338 if (!page_address_valid(vcpu, vmptr))
b2656e4d 5339 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
55d2375e
SC
5340
5341 if (vmptr == vmx->nested.vmxon_ptr)
b2656e4d 5342 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
55d2375e
SC
5343
5344 /* Forbid normal VMPTRLD if Enlightened version was used */
1e9dfbd7 5345 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
55d2375e
SC
5346 return 1;
5347
5348 if (vmx->nested.current_vmptr != vmptr) {
cee66664
DW
5349 struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache;
5350 struct vmcs_hdr hdr;
55d2375e 5351
8503fea6 5352 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) {
55d2375e
SC
5353 /*
5354 * Reads from an unbacked page return all 1s,
5355 * which means that the 32 bits located at the
5356 * given physical address won't match the required
5357 * VMCS12_REVISION identifier.
5358 */
b2656e4d 5359 return nested_vmx_fail(vcpu,
55d2375e 5360 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
55d2375e 5361 }
b146b839 5362
cee66664
DW
5363 if (kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr,
5364 offsetof(struct vmcs12, hdr),
5365 sizeof(hdr))) {
5366 return nested_vmx_fail(vcpu,
5367 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5368 }
b146b839 5369
cee66664
DW
5370 if (hdr.revision_id != VMCS12_REVISION ||
5371 (hdr.shadow_vmcs &&
55d2375e 5372 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
b2656e4d 5373 return nested_vmx_fail(vcpu,
55d2375e
SC
5374 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5375 }
5376
5377 nested_release_vmcs12(vcpu);
5378
5379 /*
5380 * Load VMCS12 from guest memory since it is not already
5381 * cached.
5382 */
cee66664
DW
5383 if (kvm_read_guest_cached(vcpu->kvm, ghc, vmx->nested.cached_vmcs12,
5384 VMCS12_SIZE)) {
5385 return nested_vmx_fail(vcpu,
5386 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5387 }
55d2375e
SC
5388
5389 set_current_vmptr(vmx, vmptr);
5390 }
5391
5392 return nested_vmx_succeed(vcpu);
5393}
5394
5395/* Emulate the VMPTRST instruction */
5396static int handle_vmptrst(struct kvm_vcpu *vcpu)
5397{
5addc235 5398 unsigned long exit_qual = vmx_get_exit_qual(vcpu);
55d2375e
SC
5399 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5400 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
5401 struct x86_exception e;
5402 gva_t gva;
7a35e515 5403 int r;
55d2375e
SC
5404
5405 if (!nested_vmx_check_permission(vcpu))
5406 return 1;
5407
1e9dfbd7 5408 if (unlikely(evmptr_is_valid(to_vmx(vcpu)->nested.hv_evmcs_vmptr)))
55d2375e
SC
5409 return 1;
5410
fdb28619
EK
5411 if (get_vmx_mem_address(vcpu, exit_qual, instr_info,
5412 true, sizeof(gpa_t), &gva))
55d2375e
SC
5413 return 1;
5414 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
7a35e515
VK
5415 r = kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
5416 sizeof(gpa_t), &e);
5417 if (r != X86EMUL_CONTINUE)
3f3393b3 5418 return kvm_handle_memory_failure(vcpu, r, &e);
7a35e515 5419
55d2375e
SC
5420 return nested_vmx_succeed(vcpu);
5421}
5422
5423/* Emulate the INVEPT instruction */
5424static int handle_invept(struct kvm_vcpu *vcpu)
5425{
5426 struct vcpu_vmx *vmx = to_vmx(vcpu);
5427 u32 vmx_instruction_info, types;
ce8fe7b7
SC
5428 unsigned long type, roots_to_free;
5429 struct kvm_mmu *mmu;
55d2375e
SC
5430 gva_t gva;
5431 struct x86_exception e;
5432 struct {
5433 u64 eptp, gpa;
5434 } operand;
329bd56c 5435 int i, r, gpr_index;
55d2375e
SC
5436
5437 if (!(vmx->nested.msrs.secondary_ctls_high &
5438 SECONDARY_EXEC_ENABLE_EPT) ||
5439 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
5440 kvm_queue_exception(vcpu, UD_VECTOR);
5441 return 1;
5442 }
5443
5444 if (!nested_vmx_check_permission(vcpu))
5445 return 1;
5446
5447 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
329bd56c
VS
5448 gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info);
5449 type = kvm_register_read(vcpu, gpr_index);
55d2375e
SC
5450
5451 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
5452
5453 if (type >= 32 || !(types & (1 << type)))
b2656e4d 5454 return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
55d2375e
SC
5455
5456 /* According to the Intel VMX instruction reference, the memory
5457 * operand is read even if it isn't needed (e.g., for type==global)
5458 */
5addc235 5459 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
fdb28619 5460 vmx_instruction_info, false, sizeof(operand), &gva))
55d2375e 5461 return 1;
7a35e515
VK
5462 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
5463 if (r != X86EMUL_CONTINUE)
3f3393b3 5464 return kvm_handle_memory_failure(vcpu, r, &e);
55d2375e 5465
ce8fe7b7
SC
5466 /*
5467 * Nested EPT roots are always held through guest_mmu,
5468 * not root_mmu.
5469 */
5470 mmu = &vcpu->arch.guest_mmu;
5471
55d2375e 5472 switch (type) {
b1190198 5473 case VMX_EPT_EXTENT_CONTEXT:
eed0030e 5474 if (!nested_vmx_check_eptp(vcpu, operand.eptp))
b2656e4d 5475 return nested_vmx_fail(vcpu,
eed0030e 5476 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
f8aa7e39 5477
ce8fe7b7 5478 roots_to_free = 0;
b9e5603c 5479 if (nested_ept_root_matches(mmu->root.hpa, mmu->root.pgd,
ce8fe7b7
SC
5480 operand.eptp))
5481 roots_to_free |= KVM_MMU_ROOT_CURRENT;
5482
5483 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5484 if (nested_ept_root_matches(mmu->prev_roots[i].hpa,
be01e8e2 5485 mmu->prev_roots[i].pgd,
ce8fe7b7
SC
5486 operand.eptp))
5487 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
5488 }
5489 break;
eed0030e 5490 case VMX_EPT_EXTENT_GLOBAL:
ce8fe7b7 5491 roots_to_free = KVM_MMU_ROOTS_ALL;
55d2375e
SC
5492 break;
5493 default:
f9336e32 5494 BUG();
55d2375e
SC
5495 break;
5496 }
5497
ce8fe7b7 5498 if (roots_to_free)
0c1c92f1 5499 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free);
ce8fe7b7 5500
55d2375e
SC
5501 return nested_vmx_succeed(vcpu);
5502}
5503
5504static int handle_invvpid(struct kvm_vcpu *vcpu)
5505{
5506 struct vcpu_vmx *vmx = to_vmx(vcpu);
5507 u32 vmx_instruction_info;
5508 unsigned long type, types;
5509 gva_t gva;
5510 struct x86_exception e;
5511 struct {
5512 u64 vpid;
5513 u64 gla;
5514 } operand;
5515 u16 vpid02;
329bd56c 5516 int r, gpr_index;
55d2375e
SC
5517
5518 if (!(vmx->nested.msrs.secondary_ctls_high &
5519 SECONDARY_EXEC_ENABLE_VPID) ||
5520 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
5521 kvm_queue_exception(vcpu, UD_VECTOR);
5522 return 1;
5523 }
5524
5525 if (!nested_vmx_check_permission(vcpu))
5526 return 1;
5527
5528 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
329bd56c
VS
5529 gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info);
5530 type = kvm_register_read(vcpu, gpr_index);
55d2375e
SC
5531
5532 types = (vmx->nested.msrs.vpid_caps &
5533 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
5534
5535 if (type >= 32 || !(types & (1 << type)))
b2656e4d 5536 return nested_vmx_fail(vcpu,
55d2375e
SC
5537 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5538
5539 /* according to the intel vmx instruction reference, the memory
5540 * operand is read even if it isn't needed (e.g., for type==global)
5541 */
5addc235 5542 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
fdb28619 5543 vmx_instruction_info, false, sizeof(operand), &gva))
55d2375e 5544 return 1;
7a35e515
VK
5545 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
5546 if (r != X86EMUL_CONTINUE)
3f3393b3 5547 return kvm_handle_memory_failure(vcpu, r, &e);
7a35e515 5548
55d2375e 5549 if (operand.vpid >> 16)
b2656e4d 5550 return nested_vmx_fail(vcpu,
55d2375e
SC
5551 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5552
5553 vpid02 = nested_get_vpid02(vcpu);
5554 switch (type) {
5555 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
5556 if (!operand.vpid ||
5557 is_noncanonical_address(operand.gla, vcpu))
b2656e4d 5558 return nested_vmx_fail(vcpu,
55d2375e 5559 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
bc41d0c4 5560 vpid_sync_vcpu_addr(vpid02, operand.gla);
55d2375e
SC
5561 break;
5562 case VMX_VPID_EXTENT_SINGLE_CONTEXT:
5563 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
5564 if (!operand.vpid)
b2656e4d 5565 return nested_vmx_fail(vcpu,
55d2375e 5566 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
446ace4b 5567 vpid_sync_context(vpid02);
55d2375e
SC
5568 break;
5569 case VMX_VPID_EXTENT_ALL_CONTEXT:
446ace4b 5570 vpid_sync_context(vpid02);
55d2375e
SC
5571 break;
5572 default:
5573 WARN_ON_ONCE(1);
5574 return kvm_skip_emulated_instruction(vcpu);
5575 }
5576
d6e3f838
JS
5577 /*
5578 * Sync the shadow page tables if EPT is disabled, L1 is invalidating
25b62c62
SC
5579 * linear mappings for L2 (tagged with L2's VPID). Free all guest
5580 * roots as VPIDs are not tracked in the MMU role.
d6e3f838
JS
5581 *
5582 * Note, this operates on root_mmu, not guest_mmu, as L1 and L2 share
5583 * an MMU when EPT is disabled.
5584 *
5585 * TODO: sync only the affected SPTEs for INVDIVIDUAL_ADDR.
5586 */
5587 if (!enable_ept)
0c1c92f1 5588 kvm_mmu_free_guest_mode_roots(vcpu->kvm, &vcpu->arch.root_mmu);
d6e3f838 5589
55d2375e
SC
5590 return nested_vmx_succeed(vcpu);
5591}
5592
5593static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
5594 struct vmcs12 *vmcs12)
5595{
2b3eaf81 5596 u32 index = kvm_rcx_read(vcpu);
ac6389ab 5597 u64 new_eptp;
55d2375e 5598
c5ffd408 5599 if (WARN_ON_ONCE(!nested_cpu_has_ept(vmcs12)))
55d2375e 5600 return 1;
55d2375e
SC
5601 if (index >= VMFUNC_EPTP_ENTRIES)
5602 return 1;
5603
55d2375e 5604 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
ac6389ab 5605 &new_eptp, index * 8, 8))
55d2375e
SC
5606 return 1;
5607
55d2375e
SC
5608 /*
5609 * If the (L2) guest does a vmfunc to the currently
5610 * active ept pointer, we don't have to do anything else
5611 */
ac6389ab
SC
5612 if (vmcs12->ept_pointer != new_eptp) {
5613 if (!nested_vmx_check_eptp(vcpu, new_eptp))
55d2375e
SC
5614 return 1;
5615
ac6389ab 5616 vmcs12->ept_pointer = new_eptp;
39353ab5 5617 nested_ept_new_eptp(vcpu);
c805f5d5 5618
39353ab5
SC
5619 if (!nested_cpu_has_vpid(vmcs12))
5620 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
55d2375e
SC
5621 }
5622
5623 return 0;
5624}
5625
5626static int handle_vmfunc(struct kvm_vcpu *vcpu)
5627{
5628 struct vcpu_vmx *vmx = to_vmx(vcpu);
5629 struct vmcs12 *vmcs12;
2b3eaf81 5630 u32 function = kvm_rax_read(vcpu);
55d2375e
SC
5631
5632 /*
5633 * VMFUNC is only supported for nested guests, but we always enable the
5634 * secondary control for simplicity; for non-nested mode, fake that we
5635 * didn't by injecting #UD.
5636 */
5637 if (!is_guest_mode(vcpu)) {
5638 kvm_queue_exception(vcpu, UD_VECTOR);
5639 return 1;
5640 }
5641
5642 vmcs12 = get_vmcs12(vcpu);
546e8398
SC
5643
5644 /*
5645 * #UD on out-of-bounds function has priority over VM-Exit, and VMFUNC
5646 * is enabled in vmcs02 if and only if it's enabled in vmcs12.
5647 */
5648 if (WARN_ON_ONCE((function > 63) || !nested_cpu_has_vmfunc(vmcs12))) {
5649 kvm_queue_exception(vcpu, UD_VECTOR);
5650 return 1;
5651 }
5652
0e75225d 5653 if (!(vmcs12->vm_function_control & BIT_ULL(function)))
55d2375e
SC
5654 goto fail;
5655
5656 switch (function) {
5657 case 0:
5658 if (nested_vmx_eptp_switching(vcpu, vmcs12))
5659 goto fail;
5660 break;
5661 default:
5662 goto fail;
5663 }
5664 return kvm_skip_emulated_instruction(vcpu);
5665
5666fail:
8e533240
SC
5667 /*
5668 * This is effectively a reflected VM-Exit, as opposed to a synthesized
5669 * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode
5670 * EXIT_REASON_VMFUNC as the exit reason.
5671 */
5672 nested_vmx_vmexit(vcpu, vmx->exit_reason.full,
87915858 5673 vmx_get_intr_info(vcpu),
5addc235 5674 vmx_get_exit_qual(vcpu));
55d2375e
SC
5675 return 1;
5676}
5677
e71237d3
OU
5678/*
5679 * Return true if an IO instruction with the specified port and size should cause
5680 * a VM-exit into L1.
5681 */
5682bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
5683 int size)
55d2375e 5684{
e71237d3 5685 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
55d2375e 5686 gpa_t bitmap, last_bitmap;
55d2375e
SC
5687 u8 b;
5688
64c78508 5689 last_bitmap = INVALID_GPA;
55d2375e
SC
5690 b = -1;
5691
5692 while (size > 0) {
5693 if (port < 0x8000)
5694 bitmap = vmcs12->io_bitmap_a;
5695 else if (port < 0x10000)
5696 bitmap = vmcs12->io_bitmap_b;
5697 else
5698 return true;
5699 bitmap += (port & 0x7fff) / 8;
5700
5701 if (last_bitmap != bitmap)
5702 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
5703 return true;
5704 if (b & (1 << (port & 7)))
5705 return true;
5706
5707 port++;
5708 size--;
5709 last_bitmap = bitmap;
5710 }
5711
5712 return false;
5713}
5714
e71237d3
OU
5715static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
5716 struct vmcs12 *vmcs12)
5717{
5718 unsigned long exit_qualification;
35a57134 5719 unsigned short port;
e71237d3
OU
5720 int size;
5721
5722 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
5723 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
5724
5addc235 5725 exit_qualification = vmx_get_exit_qual(vcpu);
e71237d3
OU
5726
5727 port = exit_qualification >> 16;
5728 size = (exit_qualification & 7) + 1;
5729
5730 return nested_vmx_check_io_bitmaps(vcpu, port, size);
5731}
5732
55d2375e 5733/*
463bfeee 5734 * Return 1 if we should exit from L2 to L1 to handle an MSR access,
55d2375e
SC
5735 * rather than handle it ourselves in L0. I.e., check whether L1 expressed
5736 * disinterest in the current event (read or write a specific MSR) by using an
5737 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
5738 */
5739static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
8e533240
SC
5740 struct vmcs12 *vmcs12,
5741 union vmx_exit_reason exit_reason)
55d2375e 5742{
2b3eaf81 5743 u32 msr_index = kvm_rcx_read(vcpu);
55d2375e
SC
5744 gpa_t bitmap;
5745
5746 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
5747 return true;
5748
5749 /*
5750 * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
5751 * for the four combinations of read/write and low/high MSR numbers.
5752 * First we need to figure out which of the four to use:
5753 */
5754 bitmap = vmcs12->msr_bitmap;
8e533240 5755 if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
55d2375e
SC
5756 bitmap += 2048;
5757 if (msr_index >= 0xc0000000) {
5758 msr_index -= 0xc0000000;
5759 bitmap += 1024;
5760 }
5761
5762 /* Then read the msr_index'th bit from this bitmap: */
5763 if (msr_index < 1024*8) {
5764 unsigned char b;
5765 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
5766 return true;
5767 return 1 & (b >> (msr_index & 7));
5768 } else
5769 return true; /* let L1 handle the wrong parameter */
5770}
5771
5772/*
5773 * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
5774 * rather than handle it ourselves in L0. I.e., check if L1 wanted to
5775 * intercept (via guest_host_mask etc.) the current event.
5776 */
5777static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
5778 struct vmcs12 *vmcs12)
5779{
5addc235 5780 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
55d2375e
SC
5781 int cr = exit_qualification & 15;
5782 int reg;
5783 unsigned long val;
5784
5785 switch ((exit_qualification >> 4) & 3) {
5786 case 0: /* mov to cr */
5787 reg = (exit_qualification >> 8) & 15;
27b4a9c4 5788 val = kvm_register_read(vcpu, reg);
55d2375e
SC
5789 switch (cr) {
5790 case 0:
5791 if (vmcs12->cr0_guest_host_mask &
5792 (val ^ vmcs12->cr0_read_shadow))
5793 return true;
5794 break;
5795 case 3:
55d2375e
SC
5796 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
5797 return true;
5798 break;
5799 case 4:
5800 if (vmcs12->cr4_guest_host_mask &
5801 (vmcs12->cr4_read_shadow ^ val))
5802 return true;
5803 break;
5804 case 8:
5805 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
5806 return true;
5807 break;
5808 }
5809 break;
5810 case 2: /* clts */
5811 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
5812 (vmcs12->cr0_read_shadow & X86_CR0_TS))
5813 return true;
5814 break;
5815 case 1: /* mov from cr */
5816 switch (cr) {
5817 case 3:
5818 if (vmcs12->cpu_based_vm_exec_control &
5819 CPU_BASED_CR3_STORE_EXITING)
5820 return true;
5821 break;
5822 case 8:
5823 if (vmcs12->cpu_based_vm_exec_control &
5824 CPU_BASED_CR8_STORE_EXITING)
5825 return true;
5826 break;
5827 }
5828 break;
5829 case 3: /* lmsw */
5830 /*
5831 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
5832 * cr0. Other attempted changes are ignored, with no exit.
5833 */
5834 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
5835 if (vmcs12->cr0_guest_host_mask & 0xe &
5836 (val ^ vmcs12->cr0_read_shadow))
5837 return true;
5838 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
5839 !(vmcs12->cr0_read_shadow & 0x1) &&
5840 (val & 0x1))
5841 return true;
5842 break;
5843 }
5844 return false;
5845}
5846
72add915
SC
5847static bool nested_vmx_exit_handled_encls(struct kvm_vcpu *vcpu,
5848 struct vmcs12 *vmcs12)
5849{
5850 u32 encls_leaf;
5851
5852 if (!guest_cpuid_has(vcpu, X86_FEATURE_SGX) ||
5853 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING))
5854 return false;
5855
5856 encls_leaf = kvm_rax_read(vcpu);
5857 if (encls_leaf > 62)
5858 encls_leaf = 63;
5859 return vmcs12->encls_exiting_bitmap & BIT_ULL(encls_leaf);
5860}
5861
55d2375e
SC
5862static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
5863 struct vmcs12 *vmcs12, gpa_t bitmap)
5864{
5865 u32 vmx_instruction_info;
5866 unsigned long field;
5867 u8 b;
5868
5869 if (!nested_cpu_has_shadow_vmcs(vmcs12))
5870 return true;
5871
5872 /* Decode instruction info and find the field to access */
5873 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5874 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
5875
5876 /* Out-of-range fields always cause a VM exit from L2 to L1 */
5877 if (field >> 15)
5878 return true;
5879
5880 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
5881 return true;
5882
5883 return 1 & (b >> (field & 7));
5884}
5885
b045ae90
OU
5886static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
5887{
5888 u32 entry_intr_info = vmcs12->vm_entry_intr_info_field;
5889
5890 if (nested_cpu_has_mtf(vmcs12))
5891 return true;
5892
5893 /*
5894 * An MTF VM-exit may be injected into the guest by setting the
5895 * interruption-type to 7 (other event) and the vector field to 0. Such
5896 * is the case regardless of the 'monitor trap flag' VM-execution
5897 * control.
5898 */
5899 return entry_intr_info == (INTR_INFO_VALID_MASK
5900 | INTR_TYPE_OTHER_EVENT);
5901}
5902
55d2375e 5903/*
2c1f3323
SC
5904 * Return true if L0 wants to handle an exit from L2 regardless of whether or not
5905 * L1 wants the exit. Only call this when in is_guest_mode (L2).
55d2375e 5906 */
8e533240
SC
5907static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
5908 union vmx_exit_reason exit_reason)
55d2375e 5909{
236871b6 5910 u32 intr_info;
55d2375e 5911
8e533240 5912 switch ((u16)exit_reason.basic) {
55d2375e 5913 case EXIT_REASON_EXCEPTION_NMI:
87915858 5914 intr_info = vmx_get_intr_info(vcpu);
55d2375e 5915 if (is_nmi(intr_info))
2c1f3323 5916 return true;
55d2375e 5917 else if (is_page_fault(intr_info))
18712c13
SC
5918 return vcpu->arch.apf.host_apf_flags ||
5919 vmx_need_pf_intercept(vcpu);
55d2375e
SC
5920 else if (is_debug(intr_info) &&
5921 vcpu->guest_debug &
5922 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
2c1f3323 5923 return true;
55d2375e
SC
5924 else if (is_breakpoint(intr_info) &&
5925 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
2c1f3323 5926 return true;
b33bb78a
SC
5927 else if (is_alignment_check(intr_info) &&
5928 !vmx_guest_inject_ac(vcpu))
5929 return true;
2c1f3323
SC
5930 return false;
5931 case EXIT_REASON_EXTERNAL_INTERRUPT:
5932 return true;
5933 case EXIT_REASON_MCE_DURING_VMENTRY:
5934 return true;
5935 case EXIT_REASON_EPT_VIOLATION:
5936 /*
5937 * L0 always deals with the EPT violation. If nested EPT is
5938 * used, and the nested mmu code discovers that the address is
5939 * missing in the guest EPT table (EPT12), the EPT violation
5940 * will be injected with nested_ept_inject_page_fault()
5941 */
5942 return true;
5943 case EXIT_REASON_EPT_MISCONFIG:
5944 /*
5945 * L2 never uses directly L1's EPT, but rather L0's own EPT
5946 * table (shadow on EPT) or a merged EPT table that L0 built
5947 * (EPT on EPT). So any problems with the structure of the
5948 * table is L0's fault.
5949 */
5950 return true;
5951 case EXIT_REASON_PREEMPTION_TIMER:
5952 return true;
5953 case EXIT_REASON_PML_FULL:
c3bb9a20
SC
5954 /*
5955 * PML is emulated for an L1 VMM and should never be enabled in
5956 * vmcs02, always "handle" PML_FULL by exiting to userspace.
5957 */
2c1f3323
SC
5958 return true;
5959 case EXIT_REASON_VMFUNC:
5960 /* VM functions are emulated through L2->L0 vmexits. */
5961 return true;
24a996ad
CQ
5962 case EXIT_REASON_BUS_LOCK:
5963 /*
5964 * At present, bus lock VM exit is never exposed to L1.
5965 * Handle L2's bus locks in L0 directly.
5966 */
5967 return true;
2c1f3323
SC
5968 default:
5969 break;
5970 }
5971 return false;
5972}
5973
5974/*
5975 * Return 1 if L1 wants to intercept an exit from L2. Only call this when in
5976 * is_guest_mode (L2).
5977 */
8e533240
SC
5978static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu,
5979 union vmx_exit_reason exit_reason)
2c1f3323
SC
5980{
5981 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
9bd4af24 5982 u32 intr_info;
2c1f3323 5983
8e533240 5984 switch ((u16)exit_reason.basic) {
2c1f3323 5985 case EXIT_REASON_EXCEPTION_NMI:
87915858 5986 intr_info = vmx_get_intr_info(vcpu);
2c1f3323
SC
5987 if (is_nmi(intr_info))
5988 return true;
5989 else if (is_page_fault(intr_info))
5990 return true;
55d2375e
SC
5991 return vmcs12->exception_bitmap &
5992 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
5993 case EXIT_REASON_EXTERNAL_INTERRUPT:
2c1f3323 5994 return nested_exit_on_intr(vcpu);
55d2375e
SC
5995 case EXIT_REASON_TRIPLE_FAULT:
5996 return true;
9dadc2f9
XL
5997 case EXIT_REASON_INTERRUPT_WINDOW:
5998 return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING);
55d2375e 5999 case EXIT_REASON_NMI_WINDOW:
4e2a0bc5 6000 return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING);
55d2375e
SC
6001 case EXIT_REASON_TASK_SWITCH:
6002 return true;
6003 case EXIT_REASON_CPUID:
6004 return true;
6005 case EXIT_REASON_HLT:
6006 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
6007 case EXIT_REASON_INVD:
6008 return true;
6009 case EXIT_REASON_INVLPG:
6010 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
6011 case EXIT_REASON_RDPMC:
6012 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
6013 case EXIT_REASON_RDRAND:
6014 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
6015 case EXIT_REASON_RDSEED:
6016 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
6017 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
6018 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
6019 case EXIT_REASON_VMREAD:
6020 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
6021 vmcs12->vmread_bitmap);
6022 case EXIT_REASON_VMWRITE:
6023 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
6024 vmcs12->vmwrite_bitmap);
6025 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
6026 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
6027 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
6028 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
6029 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
6030 /*
6031 * VMX instructions trap unconditionally. This allows L1 to
6032 * emulate them for its L2 guest, i.e., allows 3-level nesting!
6033 */
6034 return true;
6035 case EXIT_REASON_CR_ACCESS:
6036 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
6037 case EXIT_REASON_DR_ACCESS:
6038 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
6039 case EXIT_REASON_IO_INSTRUCTION:
6040 return nested_vmx_exit_handled_io(vcpu, vmcs12);
6041 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
6042 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
6043 case EXIT_REASON_MSR_READ:
6044 case EXIT_REASON_MSR_WRITE:
6045 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
6046 case EXIT_REASON_INVALID_STATE:
6047 return true;
6048 case EXIT_REASON_MWAIT_INSTRUCTION:
6049 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
6050 case EXIT_REASON_MONITOR_TRAP_FLAG:
b045ae90 6051 return nested_vmx_exit_handled_mtf(vmcs12);
55d2375e
SC
6052 case EXIT_REASON_MONITOR_INSTRUCTION:
6053 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
6054 case EXIT_REASON_PAUSE_INSTRUCTION:
6055 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
6056 nested_cpu_has2(vmcs12,
6057 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
6058 case EXIT_REASON_MCE_DURING_VMENTRY:
2c1f3323 6059 return true;
55d2375e
SC
6060 case EXIT_REASON_TPR_BELOW_THRESHOLD:
6061 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
6062 case EXIT_REASON_APIC_ACCESS:
6063 case EXIT_REASON_APIC_WRITE:
6064 case EXIT_REASON_EOI_INDUCED:
6065 /*
6066 * The controls for "virtualize APIC accesses," "APIC-
6067 * register virtualization," and "virtual-interrupt
6068 * delivery" only come from vmcs12.
6069 */
6070 return true;
55d2375e
SC
6071 case EXIT_REASON_INVPCID:
6072 return
6073 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
6074 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
6075 case EXIT_REASON_WBINVD:
6076 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
6077 case EXIT_REASON_XSETBV:
6078 return true;
6079 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
6080 /*
6081 * This should never happen, since it is not possible to
6082 * set XSS to a non-zero value---neither in L1 nor in L2.
6083 * If if it were, XSS would have to be checked against
6084 * the XSS exit bitmap in vmcs12.
6085 */
6086 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
bf653b78
TX
6087 case EXIT_REASON_UMWAIT:
6088 case EXIT_REASON_TPAUSE:
6089 return nested_cpu_has2(vmcs12,
6090 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE);
72add915
SC
6091 case EXIT_REASON_ENCLS:
6092 return nested_vmx_exit_handled_encls(vcpu, vmcs12);
55d2375e
SC
6093 default:
6094 return true;
6095 }
6096}
6097
7b7bd87d
SC
6098/*
6099 * Conditionally reflect a VM-Exit into L1. Returns %true if the VM-Exit was
6100 * reflected into L1.
6101 */
f47baaed 6102bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
7b7bd87d 6103{
fbdd5025 6104 struct vcpu_vmx *vmx = to_vmx(vcpu);
8e533240 6105 union vmx_exit_reason exit_reason = vmx->exit_reason;
87796555
SC
6106 unsigned long exit_qual;
6107 u32 exit_intr_info;
fbdd5025
SC
6108
6109 WARN_ON_ONCE(vmx->nested.nested_run_pending);
6110
6111 /*
6112 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM
6113 * has already loaded L2's state.
6114 */
6115 if (unlikely(vmx->fail)) {
6116 trace_kvm_nested_vmenter_failed(
6117 "hardware VM-instruction error: ",
6118 vmcs_read32(VM_INSTRUCTION_ERROR));
6119 exit_intr_info = 0;
6120 exit_qual = 0;
6121 goto reflect_vmexit;
6122 }
7b7bd87d 6123
0a62a031 6124 trace_kvm_nested_vmexit(vcpu, KVM_ISA_VMX);
236871b6 6125
2c1f3323
SC
6126 /* If L0 (KVM) wants the exit, it trumps L1's desires. */
6127 if (nested_vmx_l0_wants_exit(vcpu, exit_reason))
6128 return false;
6129
6130 /* If L1 doesn't want the exit, handle it in L0. */
6131 if (!nested_vmx_l1_wants_exit(vcpu, exit_reason))
7b7bd87d
SC
6132 return false;
6133
6134 /*
1d283062
SC
6135 * vmcs.VM_EXIT_INTR_INFO is only valid for EXCEPTION_NMI exits. For
6136 * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would
6137 * need to be synthesized by querying the in-kernel LAPIC, but external
6138 * interrupts are never reflected to L1 so it's a non-issue.
7b7bd87d 6139 */
02f1965f 6140 exit_intr_info = vmx_get_intr_info(vcpu);
f315f2b1 6141 if (is_exception_with_error_code(exit_intr_info)) {
7b7bd87d
SC
6142 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6143
6144 vmcs12->vm_exit_intr_error_code =
6145 vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
6146 }
02f1965f 6147 exit_qual = vmx_get_exit_qual(vcpu);
7b7bd87d 6148
fbdd5025 6149reflect_vmexit:
8e533240 6150 nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual);
7b7bd87d
SC
6151 return true;
6152}
55d2375e
SC
6153
6154static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
6155 struct kvm_nested_state __user *user_kvm_nested_state,
6156 u32 user_data_size)
6157{
6158 struct vcpu_vmx *vmx;
6159 struct vmcs12 *vmcs12;
6160 struct kvm_nested_state kvm_state = {
6161 .flags = 0,
6ca00dfa 6162 .format = KVM_STATE_NESTED_FORMAT_VMX,
55d2375e 6163 .size = sizeof(kvm_state),
850448f3 6164 .hdr.vmx.flags = 0,
64c78508
YZ
6165 .hdr.vmx.vmxon_pa = INVALID_GPA,
6166 .hdr.vmx.vmcs12_pa = INVALID_GPA,
850448f3 6167 .hdr.vmx.preemption_timer_deadline = 0,
55d2375e 6168 };
6ca00dfa
LA
6169 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
6170 &user_kvm_nested_state->data.vmx[0];
55d2375e
SC
6171
6172 if (!vcpu)
6ca00dfa 6173 return kvm_state.size + sizeof(*user_vmx_nested_state);
55d2375e
SC
6174
6175 vmx = to_vmx(vcpu);
6176 vmcs12 = get_vmcs12(vcpu);
6177
55d2375e
SC
6178 if (nested_vmx_allowed(vcpu) &&
6179 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
6ca00dfa
LA
6180 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
6181 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
55d2375e
SC
6182
6183 if (vmx_has_valid_vmcs12(vcpu)) {
6ca00dfa 6184 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
55d2375e 6185
27849968
VK
6186 /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */
6187 if (vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID)
323d73a8
LA
6188 kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
6189
55d2375e
SC
6190 if (is_guest_mode(vcpu) &&
6191 nested_cpu_has_shadow_vmcs(vmcs12) &&
64c78508 6192 vmcs12->vmcs_link_pointer != INVALID_GPA)
6ca00dfa 6193 kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12);
55d2375e
SC
6194 }
6195
6196 if (vmx->nested.smm.vmxon)
6ca00dfa 6197 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
55d2375e
SC
6198
6199 if (vmx->nested.smm.guest_mode)
6ca00dfa 6200 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
55d2375e
SC
6201
6202 if (is_guest_mode(vcpu)) {
6203 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
6204
6205 if (vmx->nested.nested_run_pending)
6206 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
5ef8acbd
OU
6207
6208 if (vmx->nested.mtf_pending)
6209 kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING;
850448f3
PS
6210
6211 if (nested_cpu_has_preemption_timer(vmcs12) &&
6212 vmx->nested.has_preemption_timer_deadline) {
6213 kvm_state.hdr.vmx.flags |=
6214 KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE;
6215 kvm_state.hdr.vmx.preemption_timer_deadline =
6216 vmx->nested.preemption_timer_deadline;
6217 }
55d2375e
SC
6218 }
6219 }
6220
6221 if (user_data_size < kvm_state.size)
6222 goto out;
6223
6224 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
6225 return -EFAULT;
6226
6227 if (!vmx_has_valid_vmcs12(vcpu))
6228 goto out;
6229
6230 /*
6231 * When running L2, the authoritative vmcs12 state is in the
6232 * vmcs02. When running L1, the authoritative vmcs12 state is
6233 * in the shadow or enlightened vmcs linked to vmcs01, unless
3731905e 6234 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative
55d2375e
SC
6235 * vmcs12 state is in the vmcs12 already.
6236 */
6237 if (is_guest_mode(vcpu)) {
3731905e 6238 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
7952d769 6239 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
d51e1d3f
ML
6240 } else {
6241 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
6242 if (!vmx->nested.need_vmcs12_to_shadow_sync) {
1e9dfbd7 6243 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
d6bf71a1
VK
6244 /*
6245 * L1 hypervisor is not obliged to keep eVMCS
6246 * clean fields data always up-to-date while
6247 * not in guest mode, 'hv_clean_fields' is only
6248 * supposed to be actual upon vmentry so we need
6249 * to ignore it here and do full copy.
6250 */
6251 copy_enlightened_to_vmcs12(vmx, 0);
d51e1d3f
ML
6252 else if (enable_shadow_vmcs)
6253 copy_shadow_to_vmcs12(vmx);
6254 }
55d2375e
SC
6255 }
6256
6ca00dfa
LA
6257 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
6258 BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE);
6259
3a33d030
TR
6260 /*
6261 * Copy over the full allocated size of vmcs12 rather than just the size
6262 * of the struct.
6263 */
6ca00dfa 6264 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE))
55d2375e
SC
6265 return -EFAULT;
6266
6267 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
64c78508 6268 vmcs12->vmcs_link_pointer != INVALID_GPA) {
6ca00dfa 6269 if (copy_to_user(user_vmx_nested_state->shadow_vmcs12,
3a33d030 6270 get_shadow_vmcs12(vcpu), VMCS12_SIZE))
55d2375e
SC
6271 return -EFAULT;
6272 }
55d2375e
SC
6273out:
6274 return kvm_state.size;
6275}
6276
6277/*
6278 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
6279 */
6280void vmx_leave_nested(struct kvm_vcpu *vcpu)
6281{
6282 if (is_guest_mode(vcpu)) {
6283 to_vmx(vcpu)->nested.nested_run_pending = 0;
6284 nested_vmx_vmexit(vcpu, -1, 0, 0);
6285 }
6286 free_nested(vcpu);
6287}
6288
6289static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
6290 struct kvm_nested_state __user *user_kvm_nested_state,
6291 struct kvm_nested_state *kvm_state)
6292{
6293 struct vcpu_vmx *vmx = to_vmx(vcpu);
6294 struct vmcs12 *vmcs12;
68cda40d 6295 enum vm_entry_failure_code ignored;
6ca00dfa
LA
6296 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
6297 &user_kvm_nested_state->data.vmx[0];
55d2375e
SC
6298 int ret;
6299
6ca00dfa 6300 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX)
55d2375e
SC
6301 return -EINVAL;
6302
64c78508 6303 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) {
6ca00dfa 6304 if (kvm_state->hdr.vmx.smm.flags)
55d2375e
SC
6305 return -EINVAL;
6306
64c78508 6307 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA)
55d2375e
SC
6308 return -EINVAL;
6309
323d73a8
LA
6310 /*
6311 * KVM_STATE_NESTED_EVMCS used to signal that KVM should
6312 * enable eVMCS capability on vCPU. However, since then
6313 * code was changed such that flag signals vmcs12 should
6314 * be copied into eVMCS in guest memory.
6315 *
6316 * To preserve backwards compatability, allow user
6317 * to set this flag even when there is no VMXON region.
6318 */
9fd58877
PB
6319 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
6320 return -EINVAL;
6321 } else {
6322 if (!nested_vmx_allowed(vcpu))
6323 return -EINVAL;
55d2375e 6324
9fd58877
PB
6325 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
6326 return -EINVAL;
323d73a8 6327 }
55d2375e 6328
6ca00dfa 6329 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
55d2375e
SC
6330 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
6331 return -EINVAL;
6332
6ca00dfa 6333 if (kvm_state->hdr.vmx.smm.flags &
55d2375e
SC
6334 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
6335 return -EINVAL;
6336
5e105c88
PB
6337 if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE)
6338 return -EINVAL;
6339
55d2375e
SC
6340 /*
6341 * SMM temporarily disables VMX, so we cannot be in guest mode,
6342 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
6343 * must be zero.
6344 */
65b712f1
LA
6345 if (is_smm(vcpu) ?
6346 (kvm_state->flags &
6347 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING))
6348 : kvm_state->hdr.vmx.smm.flags)
55d2375e
SC
6349 return -EINVAL;
6350
6ca00dfa
LA
6351 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
6352 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
55d2375e
SC
6353 return -EINVAL;
6354
323d73a8
LA
6355 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) &&
6356 (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled))
9fd58877 6357 return -EINVAL;
55d2375e 6358
323d73a8 6359 vmx_leave_nested(vcpu);
9fd58877 6360
64c78508 6361 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA)
9fd58877 6362 return 0;
332d0797 6363
6ca00dfa 6364 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
55d2375e
SC
6365 ret = enter_vmx_operation(vcpu);
6366 if (ret)
6367 return ret;
6368
0f02bd0a
PB
6369 /* Empty 'VMXON' state is permitted if no VMCS loaded */
6370 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) {
6371 /* See vmx_has_valid_vmcs12. */
6372 if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) ||
6373 (kvm_state->flags & KVM_STATE_NESTED_EVMCS) ||
64c78508 6374 (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA))
0f02bd0a
PB
6375 return -EINVAL;
6376 else
6377 return 0;
6378 }
55d2375e 6379
64c78508 6380 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) {
6ca00dfa
LA
6381 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
6382 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa))
55d2375e
SC
6383 return -EINVAL;
6384
6ca00dfa 6385 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa);
55d2375e
SC
6386 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
6387 /*
e942dbf8
VK
6388 * nested_vmx_handle_enlightened_vmptrld() cannot be called
6389 * directly from here as HV_X64_MSR_VP_ASSIST_PAGE may not be
6390 * restored yet. EVMCS will be mapped from
6391 * nested_get_vmcs12_pages().
55d2375e 6392 */
27849968 6393 vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING;
729c15c2 6394 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
55d2375e
SC
6395 } else {
6396 return -EINVAL;
6397 }
6398
6ca00dfa 6399 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
55d2375e
SC
6400 vmx->nested.smm.vmxon = true;
6401 vmx->nested.vmxon = false;
6402
6ca00dfa 6403 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
55d2375e
SC
6404 vmx->nested.smm.guest_mode = true;
6405 }
6406
6407 vmcs12 = get_vmcs12(vcpu);
6ca00dfa 6408 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12)))
55d2375e
SC
6409 return -EFAULT;
6410
6411 if (vmcs12->hdr.revision_id != VMCS12_REVISION)
6412 return -EINVAL;
6413
6414 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
6415 return 0;
6416
21be4ca1
SC
6417 vmx->nested.nested_run_pending =
6418 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
6419
5ef8acbd
OU
6420 vmx->nested.mtf_pending =
6421 !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING);
6422
21be4ca1 6423 ret = -EINVAL;
55d2375e 6424 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
64c78508 6425 vmcs12->vmcs_link_pointer != INVALID_GPA) {
55d2375e
SC
6426 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
6427
6ca00dfa
LA
6428 if (kvm_state->size <
6429 sizeof(*kvm_state) +
6430 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12))
21be4ca1 6431 goto error_guest_mode;
55d2375e
SC
6432
6433 if (copy_from_user(shadow_vmcs12,
6ca00dfa
LA
6434 user_vmx_nested_state->shadow_vmcs12,
6435 sizeof(*shadow_vmcs12))) {
21be4ca1
SC
6436 ret = -EFAULT;
6437 goto error_guest_mode;
6438 }
55d2375e
SC
6439
6440 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
6441 !shadow_vmcs12->hdr.shadow_vmcs)
21be4ca1 6442 goto error_guest_mode;
55d2375e
SC
6443 }
6444
83d31e52 6445 vmx->nested.has_preemption_timer_deadline = false;
850448f3
PS
6446 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) {
6447 vmx->nested.has_preemption_timer_deadline = true;
6448 vmx->nested.preemption_timer_deadline =
6449 kvm_state->hdr.vmx.preemption_timer_deadline;
6450 }
6451
5478ba34
SC
6452 if (nested_vmx_check_controls(vcpu, vmcs12) ||
6453 nested_vmx_check_host_state(vcpu, vmcs12) ||
68cda40d 6454 nested_vmx_check_guest_state(vcpu, vmcs12, &ignored))
21be4ca1 6455 goto error_guest_mode;
55d2375e
SC
6456
6457 vmx->nested.dirty_vmcs12 = true;
ed2a4800 6458 vmx->nested.force_msr_bitmap_recalc = true;
55d2375e 6459 ret = nested_vmx_enter_non_root_mode(vcpu, false);
21be4ca1
SC
6460 if (ret)
6461 goto error_guest_mode;
55d2375e
SC
6462
6463 return 0;
21be4ca1
SC
6464
6465error_guest_mode:
6466 vmx->nested.nested_run_pending = 0;
6467 return ret;
55d2375e
SC
6468}
6469
1b84292b 6470void nested_vmx_set_vmcs_shadowing_bitmap(void)
55d2375e
SC
6471{
6472 if (enable_shadow_vmcs) {
55d2375e 6473 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
fadcead0 6474 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
55d2375e
SC
6475 }
6476}
6477
ba1f8245
SC
6478/*
6479 * Indexing into the vmcs12 uses the VMCS encoding rotated left by 6. Undo
6480 * that madness to get the encoding for comparison.
6481 */
6482#define VMCS12_IDX_TO_ENC(idx) ((u16)(((u16)(idx) >> 6) | ((u16)(idx) << 10)))
6483
6484static u64 nested_vmx_calc_vmcs_enum_msr(void)
6485{
6486 /*
6487 * Note these are the so called "index" of the VMCS field encoding, not
6488 * the index into vmcs12.
6489 */
6490 unsigned int max_idx, idx;
6491 int i;
6492
6493 /*
6494 * For better or worse, KVM allows VMREAD/VMWRITE to all fields in
6495 * vmcs12, regardless of whether or not the associated feature is
6496 * exposed to L1. Simply find the field with the highest index.
6497 */
6498 max_idx = 0;
6499 for (i = 0; i < nr_vmcs12_fields; i++) {
6500 /* The vmcs12 table is very, very sparsely populated. */
2423a4c0 6501 if (!vmcs12_field_offsets[i])
ba1f8245
SC
6502 continue;
6503
6504 idx = vmcs_field_index(VMCS12_IDX_TO_ENC(i));
6505 if (idx > max_idx)
6506 max_idx = idx;
6507 }
6508
6509 return (u64)max_idx << VMCS_FIELD_INDEX_SHIFT;
6510}
6511
55d2375e
SC
6512/*
6513 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
6514 * returned for the various VMX controls MSRs when nested VMX is enabled.
6515 * The same values should also be used to verify that vmcs12 control fields are
6516 * valid during nested entry from L1 to L2.
6517 * Each of these control msrs has a low and high 32-bit half: A low bit is on
6518 * if the corresponding bit in the (32-bit) control field *must* be on, and a
6519 * bit in the high half is on if the corresponding bit in the control field
6520 * may be on. See also vmx_control_verify().
6521 */
a4443267 6522void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
55d2375e
SC
6523{
6524 /*
6525 * Note that as a general rule, the high half of the MSRs (bits in
6526 * the control fields which may be 1) should be initialized by the
6527 * intersection of the underlying hardware's MSR (i.e., features which
6528 * can be supported) and the list of features we want to expose -
6529 * because they are known to be properly supported in our code.
6530 * Also, usually, the low half of the MSRs (bits which must be 1) can
6531 * be set to 0, meaning that L1 may turn off any of these bits. The
6532 * reason is that if one of these bits is necessary, it will appear
6533 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
6534 * fields of vmcs01 and vmcs02, will turn these bits off - and
2c1f3323 6535 * nested_vmx_l1_wants_exit() will not pass related exits to L1.
55d2375e
SC
6536 * These rules have exceptions below.
6537 */
6538
6539 /* pin-based controls */
6540 rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
6541 msrs->pinbased_ctls_low,
6542 msrs->pinbased_ctls_high);
6543 msrs->pinbased_ctls_low |=
6544 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
6545 msrs->pinbased_ctls_high &=
6546 PIN_BASED_EXT_INTR_MASK |
6547 PIN_BASED_NMI_EXITING |
6548 PIN_BASED_VIRTUAL_NMIS |
a4443267 6549 (enable_apicv ? PIN_BASED_POSTED_INTR : 0);
55d2375e
SC
6550 msrs->pinbased_ctls_high |=
6551 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
6552 PIN_BASED_VMX_PREEMPTION_TIMER;
6553
6554 /* exit controls */
6555 rdmsr(MSR_IA32_VMX_EXIT_CTLS,
6556 msrs->exit_ctls_low,
6557 msrs->exit_ctls_high);
6558 msrs->exit_ctls_low =
6559 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
6560
6561 msrs->exit_ctls_high &=
6562#ifdef CONFIG_X86_64
6563 VM_EXIT_HOST_ADDR_SPACE_SIZE |
6564#endif
efc83133
CQ
6565 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT |
6566 VM_EXIT_CLEAR_BNDCFGS | VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
55d2375e
SC
6567 msrs->exit_ctls_high |=
6568 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
6569 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
6570 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
6571
6572 /* We support free control of debug control saving. */
6573 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
6574
6575 /* entry controls */
6576 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
6577 msrs->entry_ctls_low,
6578 msrs->entry_ctls_high);
6579 msrs->entry_ctls_low =
6580 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
6581 msrs->entry_ctls_high &=
6582#ifdef CONFIG_X86_64
6583 VM_ENTRY_IA32E_MODE |
6584#endif
efc83133
CQ
6585 VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS |
6586 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
55d2375e
SC
6587 msrs->entry_ctls_high |=
6588 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
6589
6590 /* We support free control of debug control loading. */
6591 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
6592
6593 /* cpu-based controls */
6594 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
6595 msrs->procbased_ctls_low,
6596 msrs->procbased_ctls_high);
6597 msrs->procbased_ctls_low =
6598 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
6599 msrs->procbased_ctls_high &=
9dadc2f9 6600 CPU_BASED_INTR_WINDOW_EXITING |
5e3d394f 6601 CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING |
55d2375e
SC
6602 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
6603 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
6604 CPU_BASED_CR3_STORE_EXITING |
6605#ifdef CONFIG_X86_64
6606 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
6607#endif
6608 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
6609 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
6610 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
6611 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
6612 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
6613 /*
6614 * We can allow some features even when not supported by the
6615 * hardware. For example, L1 can specify an MSR bitmap - and we
6616 * can use it to avoid exits to L1 - even when L0 runs L2
6617 * without MSR bitmaps.
6618 */
6619 msrs->procbased_ctls_high |=
6620 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
6621 CPU_BASED_USE_MSR_BITMAPS;
6622
6623 /* We support free control of CR3 access interception. */
6624 msrs->procbased_ctls_low &=
6625 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
6626
6627 /*
6628 * secondary cpu-based controls. Do not include those that
7c1b761b
XL
6629 * depend on CPUID bits, they are added later by
6630 * vmx_vcpu_after_set_cpuid.
55d2375e 6631 */
6b1971c6
VK
6632 if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
6633 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
6634 msrs->secondary_ctls_low,
6635 msrs->secondary_ctls_high);
6636
55d2375e
SC
6637 msrs->secondary_ctls_low = 0;
6638 msrs->secondary_ctls_high &=
6639 SECONDARY_EXEC_DESC |
7f3603b6 6640 SECONDARY_EXEC_ENABLE_RDTSCP |
55d2375e 6641 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
6defc591 6642 SECONDARY_EXEC_WBINVD_EXITING |
55d2375e
SC
6643 SECONDARY_EXEC_APIC_REGISTER_VIRT |
6644 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
6defc591
PB
6645 SECONDARY_EXEC_RDRAND_EXITING |
6646 SECONDARY_EXEC_ENABLE_INVPCID |
6647 SECONDARY_EXEC_RDSEED_EXITING |
d041b5ea
IS
6648 SECONDARY_EXEC_XSAVES |
6649 SECONDARY_EXEC_TSC_SCALING;
55d2375e
SC
6650
6651 /*
6652 * We can emulate "VMCS shadowing," even if the hardware
6653 * doesn't support it.
6654 */
6655 msrs->secondary_ctls_high |=
6656 SECONDARY_EXEC_SHADOW_VMCS;
6657
6658 if (enable_ept) {
6659 /* nested EPT: emulate EPT also to L1 */
6660 msrs->secondary_ctls_high |=
6661 SECONDARY_EXEC_ENABLE_EPT;
bb1fcc70
SC
6662 msrs->ept_caps =
6663 VMX_EPT_PAGE_WALK_4_BIT |
6664 VMX_EPT_PAGE_WALK_5_BIT |
6665 VMX_EPTP_WB_BIT |
96d47010
SC
6666 VMX_EPT_INVEPT_BIT |
6667 VMX_EPT_EXECUTE_ONLY_BIT;
6668
55d2375e
SC
6669 msrs->ept_caps &= ept_caps;
6670 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
6671 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
6672 VMX_EPT_1GB_PAGE_BIT;
6673 if (enable_ept_ad_bits) {
6674 msrs->secondary_ctls_high |=
6675 SECONDARY_EXEC_ENABLE_PML;
6676 msrs->ept_caps |= VMX_EPT_AD_BIT;
6677 }
6678 }
6679
6680 if (cpu_has_vmx_vmfunc()) {
6681 msrs->secondary_ctls_high |=
6682 SECONDARY_EXEC_ENABLE_VMFUNC;
6683 /*
6684 * Advertise EPTP switching unconditionally
6685 * since we emulate it
6686 */
6687 if (enable_ept)
6688 msrs->vmfunc_controls =
6689 VMX_VMFUNC_EPTP_SWITCHING;
6690 }
6691
6692 /*
6693 * Old versions of KVM use the single-context version without
6694 * checking for support, so declare that it is supported even
6695 * though it is treated as global context. The alternative is
6696 * not failing the single-context invvpid, and it is worse.
6697 */
6698 if (enable_vpid) {
6699 msrs->secondary_ctls_high |=
6700 SECONDARY_EXEC_ENABLE_VPID;
6701 msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
6702 VMX_VPID_EXTENT_SUPPORTED_MASK;
6703 }
6704
6705 if (enable_unrestricted_guest)
6706 msrs->secondary_ctls_high |=
6707 SECONDARY_EXEC_UNRESTRICTED_GUEST;
6708
6709 if (flexpriority_enabled)
6710 msrs->secondary_ctls_high |=
6711 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6712
72add915
SC
6713 if (enable_sgx)
6714 msrs->secondary_ctls_high |= SECONDARY_EXEC_ENCLS_EXITING;
6715
55d2375e
SC
6716 /* miscellaneous data */
6717 rdmsr(MSR_IA32_VMX_MISC,
6718 msrs->misc_low,
6719 msrs->misc_high);
6720 msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
6721 msrs->misc_low |=
6722 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
6723 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
bf0cd88c
YQ
6724 VMX_MISC_ACTIVITY_HLT |
6725 VMX_MISC_ACTIVITY_WAIT_SIPI;
55d2375e
SC
6726 msrs->misc_high = 0;
6727
6728 /*
6729 * This MSR reports some information about VMX support. We
6730 * should return information about the VMX we emulate for the
6731 * guest, and the VMCS structure we give it - not about the
6732 * VMX support of the underlying hardware.
6733 */
6734 msrs->basic =
6735 VMCS12_REVISION |
6736 VMX_BASIC_TRUE_CTLS |
6737 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
6738 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
6739
6740 if (cpu_has_vmx_basic_inout())
6741 msrs->basic |= VMX_BASIC_INOUT;
6742
6743 /*
6744 * These MSRs specify bits which the guest must keep fixed on
6745 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
6746 * We picked the standard core2 setting.
6747 */
6748#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
6749#define VMXON_CR4_ALWAYSON X86_CR4_VMXE
6750 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
6751 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
6752
6753 /* These MSRs specify bits which the guest must keep fixed off. */
6754 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
6755 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
6756
ba1f8245 6757 msrs->vmcs_enum = nested_vmx_calc_vmcs_enum_msr();
55d2375e
SC
6758}
6759
6760void nested_vmx_hardware_unsetup(void)
6761{
6762 int i;
6763
6764 if (enable_shadow_vmcs) {
6765 for (i = 0; i < VMX_BITMAP_NR; i++)
6766 free_page((unsigned long)vmx_bitmap[i]);
6767 }
6768}
6769
6c1c6e58 6770__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
55d2375e
SC
6771{
6772 int i;
6773
6774 if (!cpu_has_vmx_shadow_vmcs())
6775 enable_shadow_vmcs = 0;
6776 if (enable_shadow_vmcs) {
6777 for (i = 0; i < VMX_BITMAP_NR; i++) {
41836839
BG
6778 /*
6779 * The vmx_bitmap is not tied to a VM and so should
6780 * not be charged to a memcg.
6781 */
55d2375e
SC
6782 vmx_bitmap[i] = (unsigned long *)
6783 __get_free_page(GFP_KERNEL);
6784 if (!vmx_bitmap[i]) {
6785 nested_vmx_hardware_unsetup();
6786 return -ENOMEM;
6787 }
6788 }
6789
6790 init_vmcs_shadow_fields();
6791 }
6792
cc877670
LA
6793 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear;
6794 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch;
6795 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld;
6796 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst;
6797 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread;
6798 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume;
6799 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite;
6800 exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff;
6801 exit_handlers[EXIT_REASON_VMON] = handle_vmon;
6802 exit_handlers[EXIT_REASON_INVEPT] = handle_invept;
6803 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid;
6804 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc;
55d2375e 6805
55d2375e
SC
6806 return 0;
6807}
33b22172
PB
6808
6809struct kvm_x86_nested_ops vmx_nested_ops = {
f7e57078 6810 .leave_nested = vmx_leave_nested,
33b22172 6811 .check_events = vmx_check_nested_events,
d2060bd4 6812 .hv_timer_pending = nested_vmx_preemption_timer_pending,
cb6a32c2 6813 .triple_fault = nested_vmx_triple_fault,
33b22172
PB
6814 .get_state = vmx_get_nested_state,
6815 .set_state = vmx_set_nested_state,
9a78e158 6816 .get_nested_state_pages = vmx_get_nested_state_pages,
02f5fb2e 6817 .write_log_dirty = nested_vmx_write_pml_buffer,
33b22172
PB
6818 .enable_evmcs = nested_enable_evmcs,
6819 .get_evmcs_version = nested_get_evmcs_version,
6820};