1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_NESTED_H
3 #define __KVM_X86_VMX_NESTED_H
5 #include "kvm_cache_regs.h"
10 * Status returned by nested_vmx_enter_non_root_mode():
12 enum nvmx_vmentry_status {
13 NVMX_VMENTRY_SUCCESS, /* Entered VMX non-root mode */
14 NVMX_VMENTRY_VMFAIL, /* Consistency check VMFail */
15 NVMX_VMENTRY_VMEXIT, /* Consistency check VMExit */
16 NVMX_VMENTRY_KVM_INTERNAL_ERROR,/* KVM internal error */
19 void vmx_leave_nested(struct kvm_vcpu *vcpu);
20 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
22 void nested_vmx_hardware_unsetup(void);
23 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
24 void nested_vmx_set_vmcs_shadowing_bitmap(void);
25 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
26 enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
28 bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason);
29 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
30 u32 exit_intr_info, unsigned long exit_qualification);
31 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu);
32 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
33 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
34 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
35 u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
36 void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu);
38 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
40 return to_vmx(vcpu)->nested.cached_vmcs12;
43 static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
45 return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
48 static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
50 struct vcpu_vmx *vmx = to_vmx(vcpu);
53 * In case we do two consecutive get/set_nested_state()s while L2 was
54 * running hv_evmcs may end up not being mapped (we map it from
55 * nested_vmx_run()/vmx_vcpu_run()). Check is_guest_mode() as we always
56 * have vmcs12 if it is true.
58 return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull ||
62 static inline unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
64 /* return the page table to be shadowed - in our case, EPT12 */
65 return get_vmcs12(vcpu)->ept_pointer;
68 static inline bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
70 return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
74 * Reflect a VM Exit into L1.
76 static inline int nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu,
79 u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
82 * At this point, the exit interruption info in exit_intr_info
83 * is only valid for EXCEPTION_NMI exits. For EXTERNAL_INTERRUPT
84 * we need to query the in-kernel LAPIC.
86 WARN_ON(exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT);
88 (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
89 (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) {
90 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
92 vmcs12->vm_exit_intr_error_code =
93 vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
96 nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info,
97 vmcs_readl(EXIT_QUALIFICATION));
102 * Return the cr0 value that a nested guest would read. This is a combination
103 * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
104 * its hypervisor (cr0_read_shadow).
106 static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
108 return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
109 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
111 static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
113 return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
114 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
117 static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
119 return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
123 * Do the virtual VMX capability MSRs specify that L1 can use VMWRITE
124 * to modify any valid field of the VMCS, or are the VM-exit
125 * information fields read-only?
127 static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
129 return to_vmx(vcpu)->nested.msrs.misc_low &
130 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
133 static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
135 return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
138 static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
140 return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
141 CPU_BASED_MONITOR_TRAP_FLAG;
144 static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu)
146 return to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
147 SECONDARY_EXEC_SHADOW_VMCS;
150 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
152 return vmcs12->cpu_based_vm_exec_control & bit;
155 static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
157 return (vmcs12->cpu_based_vm_exec_control &
158 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
159 (vmcs12->secondary_vm_exec_control & bit);
162 static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
164 return vmcs12->pin_based_vm_exec_control &
165 PIN_BASED_VMX_PREEMPTION_TIMER;
168 static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
170 return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
173 static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
175 return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
178 static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
180 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
183 static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
185 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
188 static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
190 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
193 static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
195 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
198 static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
200 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
203 static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
205 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
208 static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
210 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
213 static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
215 return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
218 static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
220 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
223 static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
225 return nested_cpu_has_vmfunc(vmcs12) &&
226 (vmcs12->vm_function_control &
227 VMX_VMFUNC_EPTP_SWITCHING);
230 static inline bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12)
232 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS);
235 static inline bool nested_cpu_has_save_preemption_timer(struct vmcs12 *vmcs12)
237 return vmcs12->vm_exit_controls &
238 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
242 * In nested virtualization, check if L1 asked to exit on external interrupts.
243 * For most existing hypervisors, this will always return true.
245 static inline bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
247 return get_vmcs12(vcpu)->pin_based_vm_exec_control &
248 PIN_BASED_EXT_INTR_MASK;
252 * if fixed0[i] == 1: val[i] must be 1
253 * if fixed1[i] == 0: val[i] must be 0
255 static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
257 return ((val & fixed1) | fixed0) == val;
260 static inline bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
262 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
263 u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
264 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
266 if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
267 SECONDARY_EXEC_UNRESTRICTED_GUEST &&
268 nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
269 fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
271 return fixed_bits_valid(val, fixed0, fixed1);
274 static inline bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
276 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
277 u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
279 return fixed_bits_valid(val, fixed0, fixed1);
282 static inline bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
284 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0;
285 u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1;
287 return fixed_bits_valid(val, fixed0, fixed1);
290 /* No difference in the restrictions on guest and host CR4 in VMX operation. */
291 #define nested_guest_cr4_valid nested_cr4_valid
292 #define nested_host_cr4_valid nested_cr4_valid
294 #endif /* __KVM_X86_VMX_NESTED_H */