Commit | Line | Data |
---|---|---|
55d2375e SC |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __KVM_X86_VMX_NESTED_H | |
3 | #define __KVM_X86_VMX_NESTED_H | |
4 | ||
5 | #include "kvm_cache_regs.h" | |
6 | #include "vmcs12.h" | |
7 | #include "vmx.h" | |
8 | ||
9 | void vmx_leave_nested(struct kvm_vcpu *vcpu); | |
10 | void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, | |
11 | bool apicv); | |
12 | void nested_vmx_hardware_unsetup(void); | |
13 | __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)); | |
1b84292b | 14 | void nested_vmx_set_vmcs_shadowing_bitmap(void); |
55d2375e SC |
15 | void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu); |
16 | int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry); | |
17 | bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason); | |
18 | void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, | |
19 | u32 exit_intr_info, unsigned long exit_qualification); | |
3731905e | 20 | void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu); |
55d2375e SC |
21 | int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); |
22 | int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata); | |
23 | int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, | |
fdb28619 | 24 | u32 vmx_instruction_info, bool wr, int len, gva_t *ret); |
03a8871a | 25 | void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu); |
55d2375e SC |
26 | |
27 | static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) | |
28 | { | |
29 | return to_vmx(vcpu)->nested.cached_vmcs12; | |
30 | } | |
31 | ||
32 | static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu) | |
33 | { | |
34 | return to_vmx(vcpu)->nested.cached_shadow_vmcs12; | |
35 | } | |
36 | ||
37 | static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu) | |
38 | { | |
39 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
40 | ||
41 | /* | |
42 | * In case we do two consecutive get/set_nested_state()s while L2 was | |
43 | * running hv_evmcs may end up not being mapped (we map it from | |
44 | * nested_vmx_run()/vmx_vcpu_run()). Check is_guest_mode() as we always | |
45 | * have vmcs12 if it is true. | |
46 | */ | |
47 | return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull || | |
48 | vmx->nested.hv_evmcs; | |
49 | } | |
50 | ||
51 | static inline unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) | |
52 | { | |
53 | /* return the page table to be shadowed - in our case, EPT12 */ | |
54 | return get_vmcs12(vcpu)->ept_pointer; | |
55 | } | |
56 | ||
57 | static inline bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu) | |
58 | { | |
59 | return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT; | |
60 | } | |
61 | ||
62 | /* | |
63 | * Reflect a VM Exit into L1. | |
64 | */ | |
65 | static inline int nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu, | |
66 | u32 exit_reason) | |
67 | { | |
68 | u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | |
69 | ||
70 | /* | |
71 | * At this point, the exit interruption info in exit_intr_info | |
72 | * is only valid for EXCEPTION_NMI exits. For EXTERNAL_INTERRUPT | |
73 | * we need to query the in-kernel LAPIC. | |
74 | */ | |
75 | WARN_ON(exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT); | |
76 | if ((exit_intr_info & | |
77 | (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) == | |
78 | (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) { | |
79 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | |
80 | ||
81 | vmcs12->vm_exit_intr_error_code = | |
82 | vmcs_read32(VM_EXIT_INTR_ERROR_CODE); | |
83 | } | |
84 | ||
85 | nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info, | |
86 | vmcs_readl(EXIT_QUALIFICATION)); | |
87 | return 1; | |
88 | } | |
89 | ||
90 | /* | |
91 | * Return the cr0 value that a nested guest would read. This is a combination | |
92 | * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by | |
93 | * its hypervisor (cr0_read_shadow). | |
94 | */ | |
95 | static inline unsigned long nested_read_cr0(struct vmcs12 *fields) | |
96 | { | |
97 | return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) | | |
98 | (fields->cr0_read_shadow & fields->cr0_guest_host_mask); | |
99 | } | |
100 | static inline unsigned long nested_read_cr4(struct vmcs12 *fields) | |
101 | { | |
102 | return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) | | |
103 | (fields->cr4_read_shadow & fields->cr4_guest_host_mask); | |
104 | } | |
105 | ||
106 | static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu) | |
107 | { | |
108 | return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low); | |
109 | } | |
110 | ||
111 | /* | |
112 | * Do the virtual VMX capability MSRs specify that L1 can use VMWRITE | |
113 | * to modify any valid field of the VMCS, or are the VM-exit | |
114 | * information fields read-only? | |
115 | */ | |
116 | static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu) | |
117 | { | |
118 | return to_vmx(vcpu)->nested.msrs.misc_low & | |
119 | MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS; | |
120 | } | |
121 | ||
122 | static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu) | |
123 | { | |
124 | return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS; | |
125 | } | |
126 | ||
127 | static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu) | |
128 | { | |
129 | return to_vmx(vcpu)->nested.msrs.procbased_ctls_high & | |
130 | CPU_BASED_MONITOR_TRAP_FLAG; | |
131 | } | |
132 | ||
133 | static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu) | |
134 | { | |
135 | return to_vmx(vcpu)->nested.msrs.secondary_ctls_high & | |
136 | SECONDARY_EXEC_SHADOW_VMCS; | |
137 | } | |
138 | ||
139 | static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit) | |
140 | { | |
141 | return vmcs12->cpu_based_vm_exec_control & bit; | |
142 | } | |
143 | ||
144 | static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit) | |
145 | { | |
146 | return (vmcs12->cpu_based_vm_exec_control & | |
147 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && | |
148 | (vmcs12->secondary_vm_exec_control & bit); | |
149 | } | |
150 | ||
151 | static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12) | |
152 | { | |
153 | return vmcs12->pin_based_vm_exec_control & | |
154 | PIN_BASED_VMX_PREEMPTION_TIMER; | |
155 | } | |
156 | ||
157 | static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12) | |
158 | { | |
159 | return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING; | |
160 | } | |
161 | ||
162 | static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12) | |
163 | { | |
164 | return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; | |
165 | } | |
166 | ||
167 | static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12) | |
168 | { | |
169 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT); | |
170 | } | |
171 | ||
172 | static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12) | |
173 | { | |
174 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); | |
175 | } | |
176 | ||
177 | static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12) | |
178 | { | |
179 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML); | |
180 | } | |
181 | ||
182 | static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12) | |
183 | { | |
184 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); | |
185 | } | |
186 | ||
187 | static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12) | |
188 | { | |
189 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID); | |
190 | } | |
191 | ||
192 | static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12) | |
193 | { | |
194 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT); | |
195 | } | |
196 | ||
197 | static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12) | |
198 | { | |
199 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); | |
200 | } | |
201 | ||
202 | static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12) | |
203 | { | |
204 | return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR; | |
205 | } | |
206 | ||
207 | static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12) | |
208 | { | |
209 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC); | |
210 | } | |
211 | ||
212 | static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12) | |
213 | { | |
214 | return nested_cpu_has_vmfunc(vmcs12) && | |
215 | (vmcs12->vm_function_control & | |
216 | VMX_VMFUNC_EPTP_SWITCHING); | |
217 | } | |
218 | ||
219 | static inline bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12) | |
220 | { | |
221 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS); | |
222 | } | |
223 | ||
224 | static inline bool nested_cpu_has_save_preemption_timer(struct vmcs12 *vmcs12) | |
225 | { | |
226 | return vmcs12->vm_exit_controls & | |
227 | VM_EXIT_SAVE_VMX_PREEMPTION_TIMER; | |
228 | } | |
229 | ||
230 | /* | |
231 | * In nested virtualization, check if L1 asked to exit on external interrupts. | |
232 | * For most existing hypervisors, this will always return true. | |
233 | */ | |
234 | static inline bool nested_exit_on_intr(struct kvm_vcpu *vcpu) | |
235 | { | |
236 | return get_vmcs12(vcpu)->pin_based_vm_exec_control & | |
237 | PIN_BASED_EXT_INTR_MASK; | |
238 | } | |
239 | ||
240 | /* | |
241 | * if fixed0[i] == 1: val[i] must be 1 | |
242 | * if fixed1[i] == 0: val[i] must be 0 | |
243 | */ | |
244 | static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1) | |
245 | { | |
246 | return ((val & fixed1) | fixed0) == val; | |
247 | } | |
248 | ||
249 | static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) | |
250 | { | |
251 | u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0; | |
252 | u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1; | |
253 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | |
254 | ||
255 | if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high & | |
256 | SECONDARY_EXEC_UNRESTRICTED_GUEST && | |
257 | nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST)) | |
258 | fixed0 &= ~(X86_CR0_PE | X86_CR0_PG); | |
259 | ||
260 | return fixed_bits_valid(val, fixed0, fixed1); | |
261 | } | |
262 | ||
263 | static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) | |
264 | { | |
265 | u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0; | |
266 | u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1; | |
267 | ||
268 | return fixed_bits_valid(val, fixed0, fixed1); | |
269 | } | |
270 | ||
271 | static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val) | |
272 | { | |
273 | u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0; | |
274 | u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1; | |
275 | ||
276 | return fixed_bits_valid(val, fixed0, fixed1); | |
277 | } | |
278 | ||
279 | /* No difference in the restrictions on guest and host CR4 in VMX operation. */ | |
280 | #define nested_guest_cr4_valid nested_cr4_valid | |
281 | #define nested_host_cr4_valid nested_cr4_valid | |
282 | ||
283 | #endif /* __KVM_X86_VMX_NESTED_H */ |