Commit | Line | Data |
---|---|---|
8373d25d SC |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __KVM_X86_VMX_H | |
3 | #define __KVM_X86_VMX_H | |
4 | ||
5 | #include <linux/kvm_host.h> | |
6 | ||
7 | #include <asm/kvm.h> | |
f99e3daf | 8 | #include <asm/intel_pt.h> |
8373d25d SC |
9 | |
10 | #include "capabilities.h" | |
07853adc | 11 | #include "../kvm_cache_regs.h" |
8888cdd0 | 12 | #include "posted_intr.h" |
8373d25d | 13 | #include "vmcs.h" |
5a085326 | 14 | #include "vmx_ops.h" |
07853adc | 15 | #include "../cpuid.h" |
bb066506 | 16 | #include "run_flags.h" |
8373d25d SC |
17 | |
18 | #define MSR_TYPE_R 1 | |
19 | #define MSR_TYPE_W 2 | |
20 | #define MSR_TYPE_RW 3 | |
21 | ||
22 | #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4)) | |
23 | ||
7d73710d | 24 | #ifdef CONFIG_X86_64 |
eb3db1b1 | 25 | #define MAX_NR_USER_RETURN_MSRS 7 |
7d73710d | 26 | #else |
eb3db1b1 | 27 | #define MAX_NR_USER_RETURN_MSRS 4 |
7d73710d JM |
28 | #endif |
29 | ||
ce833b23 | 30 | #define MAX_NR_LOADSTORE_MSRS 8 |
8373d25d SC |
31 | |
32 | struct vmx_msrs { | |
33 | unsigned int nr; | |
ce833b23 | 34 | struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS]; |
8373d25d SC |
35 | }; |
36 | ||
eb3db1b1 | 37 | struct vmx_uret_msr { |
ee9d22e0 | 38 | bool load_into_hardware; |
8373d25d SC |
39 | u64 data; |
40 | u64 mask; | |
41 | }; | |
42 | ||
43 | enum segment_cache_field { | |
44 | SEG_FIELD_SEL = 0, | |
45 | SEG_FIELD_BASE = 1, | |
46 | SEG_FIELD_LIMIT = 2, | |
47 | SEG_FIELD_AR = 3, | |
48 | ||
49 | SEG_FIELD_NR = 4 | |
50 | }; | |
51 | ||
2ef444f1 CP |
52 | #define RTIT_ADDR_RANGE 4 |
53 | ||
54 | struct pt_ctx { | |
55 | u64 ctl; | |
56 | u64 status; | |
57 | u64 output_base; | |
58 | u64 output_mask; | |
59 | u64 cr3_match; | |
60 | u64 addr_a[RTIT_ADDR_RANGE]; | |
61 | u64 addr_b[RTIT_ADDR_RANGE]; | |
62 | }; | |
63 | ||
64 | struct pt_desc { | |
65 | u64 ctl_bitmask; | |
f4d3a902 | 66 | u32 num_address_ranges; |
2ef444f1 CP |
67 | u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; |
68 | struct pt_ctx host; | |
69 | struct pt_ctx guest; | |
70 | }; | |
8373d25d | 71 | |
8e533240 SC |
72 | union vmx_exit_reason { |
73 | struct { | |
74 | u32 basic : 16; | |
75 | u32 reserved16 : 1; | |
76 | u32 reserved17 : 1; | |
77 | u32 reserved18 : 1; | |
78 | u32 reserved19 : 1; | |
79 | u32 reserved20 : 1; | |
80 | u32 reserved21 : 1; | |
81 | u32 reserved22 : 1; | |
82 | u32 reserved23 : 1; | |
83 | u32 reserved24 : 1; | |
84 | u32 reserved25 : 1; | |
fe6b6bc8 | 85 | u32 bus_lock_detected : 1; |
8e533240 SC |
86 | u32 enclave_mode : 1; |
87 | u32 smi_pending_mtf : 1; | |
88 | u32 smi_from_vmx_root : 1; | |
89 | u32 reserved30 : 1; | |
90 | u32 failed_vmentry : 1; | |
91 | }; | |
92 | u32 full; | |
93 | }; | |
94 | ||
9c9520ce PB |
95 | #define vcpu_to_lbr_desc(vcpu) (&to_vmx(vcpu)->lbr_desc) |
96 | #define vcpu_to_lbr_records(vcpu) (&to_vmx(vcpu)->lbr_desc.records) | |
97 | ||
98 | bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu); | |
c6462363 | 99 | bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu); |
9c9520ce | 100 | |
8e12911b | 101 | int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu); |
1b5ac322 | 102 | void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu); |
8e12911b | 103 | |
9c9520ce PB |
104 | struct lbr_desc { |
105 | /* Basic info about guest LBR records. */ | |
106 | struct x86_pmu_lbr records; | |
8e12911b LX |
107 | |
108 | /* | |
109 | * Emulate LBR feature via passthrough LBR registers when the | |
110 | * per-vcpu guest LBR event is scheduled on the current pcpu. | |
111 | * | |
112 | * The records may be inaccurate if the host reclaims the LBR. | |
113 | */ | |
114 | struct perf_event *event; | |
9254beaa LX |
115 | |
116 | /* True if LBRs are marked as not intercepted in the MSR bitmap */ | |
117 | bool msr_passthrough; | |
9c9520ce PB |
118 | }; |
119 | ||
8373d25d SC |
120 | /* |
121 | * The nested_vmx structure is part of vcpu_vmx, and holds information we need | |
122 | * for correct emulation of VMX (i.e., nested VMX) on this vcpu. | |
123 | */ | |
124 | struct nested_vmx { | |
125 | /* Has the level1 guest done vmxon? */ | |
126 | bool vmxon; | |
127 | gpa_t vmxon_ptr; | |
128 | bool pml_full; | |
129 | ||
130 | /* The guest-physical address of the current VMCS L1 keeps for L2 */ | |
131 | gpa_t current_vmptr; | |
132 | /* | |
133 | * Cache of the guest's VMCS, existing outside of guest memory. | |
134 | * Loaded from guest memory during VMPTRLD. Flushed to guest | |
135 | * memory during VMCLEAR and VMPTRLD. | |
136 | */ | |
137 | struct vmcs12 *cached_vmcs12; | |
138 | /* | |
139 | * Cache of the guest's shadow VMCS, existing outside of guest | |
140 | * memory. Loaded from guest memory during VM entry. Flushed | |
141 | * to guest memory during VM exit. | |
142 | */ | |
143 | struct vmcs12 *cached_shadow_vmcs12; | |
7952d769 | 144 | |
297d597a DW |
145 | /* |
146 | * GPA to HVA cache for accessing vmcs12->vmcs_link_pointer | |
147 | */ | |
148 | struct gfn_to_hva_cache shadow_vmcs12_cache; | |
149 | ||
cee66664 DW |
150 | /* |
151 | * GPA to HVA cache for VMCS12 | |
152 | */ | |
153 | struct gfn_to_hva_cache vmcs12_cache; | |
154 | ||
8373d25d SC |
155 | /* |
156 | * Indicates if the shadow vmcs or enlightened vmcs must be updated | |
157 | * with the data held by struct vmcs12. | |
158 | */ | |
3731905e | 159 | bool need_vmcs12_to_shadow_sync; |
8373d25d SC |
160 | bool dirty_vmcs12; |
161 | ||
ed2a4800 VK |
162 | /* |
163 | * Indicates whether MSR bitmap for L2 needs to be rebuilt due to | |
164 | * changes in MSR bitmap for L1 or switching to a different L2. Note, | |
165 | * this flag can only be used reliably in conjunction with a paravirt L1 | |
166 | * which informs L0 whether any changes to MSR bitmap for L2 were done | |
167 | * on its side. | |
168 | */ | |
169 | bool force_msr_bitmap_recalc; | |
170 | ||
7952d769 SC |
171 | /* |
172 | * Indicates lazily loaded guest state has not yet been decached from | |
173 | * vmcs02. | |
174 | */ | |
175 | bool need_sync_vmcs02_to_vmcs12_rare; | |
176 | ||
8373d25d SC |
177 | /* |
178 | * vmcs02 has been initialized, i.e. state that is constant for | |
179 | * vmcs02 has been written to the backing VMCS. Initialization | |
180 | * is delayed until L1 actually attempts to run a nested VM. | |
181 | */ | |
182 | bool vmcs02_initialized; | |
183 | ||
184 | bool change_vmcs01_virtual_apic_mode; | |
1196cb97 | 185 | bool reload_vmcs01_apic_access_page; |
a85863c2 | 186 | bool update_vmcs01_cpu_dirty_logging; |
7c69661e | 187 | bool update_vmcs01_apicv_status; |
8373d25d SC |
188 | |
189 | /* | |
190 | * Enlightened VMCS has been enabled. It does not mean that L1 has to | |
191 | * use it. However, VMX features available to L1 will be limited based | |
192 | * on what the enlightened VMCS supports. | |
193 | */ | |
194 | bool enlightened_vmcs_enabled; | |
195 | ||
196 | /* L2 must run next, and mustn't decide to exit to L1. */ | |
197 | bool nested_run_pending; | |
198 | ||
5ef8acbd OU |
199 | /* Pending MTF VM-exit into L1. */ |
200 | bool mtf_pending; | |
201 | ||
8373d25d SC |
202 | struct loaded_vmcs vmcs02; |
203 | ||
204 | /* | |
205 | * Guest pages referred to in the vmcs02 with host-physical | |
206 | * pointers, so we must keep them pinned while L2 runs. | |
207 | */ | |
208 | struct page *apic_access_page; | |
96c66e87 | 209 | struct kvm_host_map virtual_apic_map; |
3278e049 | 210 | struct kvm_host_map pi_desc_map; |
31f0b6c4 KA |
211 | |
212 | struct kvm_host_map msr_bitmap_map; | |
213 | ||
8373d25d SC |
214 | struct pi_desc *pi_desc; |
215 | bool pi_pending; | |
216 | u16 posted_intr_nv; | |
217 | ||
218 | struct hrtimer preemption_timer; | |
850448f3 PS |
219 | u64 preemption_timer_deadline; |
220 | bool has_preemption_timer_deadline; | |
8373d25d SC |
221 | bool preemption_timer_expired; |
222 | ||
223 | /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */ | |
224 | u64 vmcs01_debugctl; | |
225 | u64 vmcs01_guest_bndcfgs; | |
226 | ||
02d496cf LA |
227 | /* to migrate it to L1 if L2 writes to L1's CR8 directly */ |
228 | int l1_tpr_threshold; | |
229 | ||
8373d25d SC |
230 | u16 vpid02; |
231 | u16 last_vpid; | |
232 | ||
233 | struct nested_vmx_msrs msrs; | |
234 | ||
235 | /* SMM related state */ | |
236 | struct { | |
237 | /* in VMX operation on SMM entry? */ | |
238 | bool vmxon; | |
239 | /* in guest mode on SMM entry? */ | |
240 | bool guest_mode; | |
241 | } smm; | |
242 | ||
243 | gpa_t hv_evmcs_vmptr; | |
dee9c049 | 244 | struct kvm_host_map hv_evmcs_map; |
8373d25d SC |
245 | struct hv_enlightened_vmcs *hv_evmcs; |
246 | }; | |
247 | ||
248 | struct vcpu_vmx { | |
249 | struct kvm_vcpu vcpu; | |
8373d25d | 250 | u8 fail; |
84ec8d2d | 251 | u8 x2apic_msr_bitmap_mode; |
b464f57e PB |
252 | |
253 | /* | |
254 | * If true, host state has been stored in vmx->loaded_vmcs for | |
255 | * the CPU registers that only need to be switched when transitioning | |
256 | * to/from the kernel, and the registers have been loaded with guest | |
257 | * values. If false, host state is loaded in the CPU registers | |
258 | * and vmx->loaded_vmcs->host_state is invalid. | |
259 | */ | |
260 | bool guest_state_loaded; | |
261 | ||
5addc235 | 262 | unsigned long exit_qualification; |
8373d25d SC |
263 | u32 exit_intr_info; |
264 | u32 idt_vectoring_info; | |
265 | ulong rflags; | |
70f932ec | 266 | |
b6194b94 SC |
267 | /* |
268 | * User return MSRs are always emulated when enabled in the guest, but | |
269 | * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside | |
270 | * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to | |
271 | * be loaded into hardware if those conditions aren't met. | |
b6194b94 | 272 | */ |
eb3db1b1 | 273 | struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS]; |
658ece84 | 274 | bool guest_uret_msrs_loaded; |
8373d25d SC |
275 | #ifdef CONFIG_X86_64 |
276 | u64 msr_host_kernel_gs_base; | |
277 | u64 msr_guest_kernel_gs_base; | |
278 | #endif | |
279 | ||
8373d25d | 280 | u64 spec_ctrl; |
6e3ba4ab | 281 | u32 msr_ia32_umwait_control; |
8373d25d | 282 | |
8373d25d SC |
283 | /* |
284 | * loaded_vmcs points to the VMCS currently used in this vcpu. For a | |
285 | * non-nested (L1) guest, it always points to vmcs01. For a nested | |
b464f57e | 286 | * guest (L2), it points to a different VMCS. |
8373d25d SC |
287 | */ |
288 | struct loaded_vmcs vmcs01; | |
289 | struct loaded_vmcs *loaded_vmcs; | |
c9afc58c | 290 | |
8373d25d SC |
291 | struct msr_autoload { |
292 | struct vmx_msrs guest; | |
293 | struct vmx_msrs host; | |
294 | } msr_autoload; | |
295 | ||
662f1d1d AL |
296 | struct msr_autostore { |
297 | struct vmx_msrs guest; | |
298 | } msr_autostore; | |
299 | ||
8373d25d SC |
300 | struct { |
301 | int vm86_active; | |
302 | ulong save_rflags; | |
303 | struct kvm_segment segs[8]; | |
304 | } rmode; | |
305 | struct { | |
306 | u32 bitmask; /* 4 bits per segment (1 bit per field) */ | |
307 | struct kvm_save_segment { | |
308 | u16 selector; | |
309 | unsigned long base; | |
310 | u32 limit; | |
311 | u32 ar; | |
312 | } seg[8]; | |
313 | } segment_cache; | |
314 | int vpid; | |
315 | bool emulation_required; | |
316 | ||
8e533240 | 317 | union vmx_exit_reason exit_reason; |
8373d25d SC |
318 | |
319 | /* Posted interrupt descriptor */ | |
320 | struct pi_desc pi_desc; | |
321 | ||
12a8eee5 SC |
322 | /* Used if this vCPU is waiting for PI notification wakeup. */ |
323 | struct list_head pi_wakeup_list; | |
324 | ||
8373d25d SC |
325 | /* Support for a guest hypervisor (nested VMX) */ |
326 | struct nested_vmx nested; | |
327 | ||
328 | /* Dynamic PLE window. */ | |
c5c5d6fa | 329 | unsigned int ple_window; |
8373d25d SC |
330 | bool ple_window_dirty; |
331 | ||
332 | bool req_immediate_exit; | |
333 | ||
334 | /* Support for PML */ | |
335 | #define PML_ENTITY_NUM 512 | |
336 | struct page *pml_pg; | |
337 | ||
338 | /* apic deadline value in host tsc */ | |
339 | u64 hv_deadline_tsc; | |
340 | ||
8373d25d SC |
341 | unsigned long host_debugctlmsr; |
342 | ||
343 | /* | |
344 | * Only bits masked by msr_ia32_feature_control_valid_bits can be set in | |
32ad73db | 345 | * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included |
8373d25d SC |
346 | * in msr_ia32_feature_control_valid_bits. |
347 | */ | |
348 | u64 msr_ia32_feature_control; | |
349 | u64 msr_ia32_feature_control_valid_bits; | |
8f102445 SC |
350 | /* SGX Launch Control public key hash */ |
351 | u64 msr_ia32_sgxlepubkeyhash[4]; | |
027bbb88 PG |
352 | u64 msr_ia32_mcu_opt_ctrl; |
353 | bool disable_fb_clear; | |
8f102445 | 354 | |
2ef444f1 | 355 | struct pt_desc pt_desc; |
9c9520ce | 356 | struct lbr_desc lbr_desc; |
3eb90017 AG |
357 | |
358 | /* Save desired MSR intercept (read: pass-through) state */ | |
b5274b1b | 359 | #define MAX_POSSIBLE_PASSTHROUGH_MSRS 15 |
3eb90017 AG |
360 | struct { |
361 | DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS); | |
362 | DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS); | |
363 | } shadow_msr_intercept; | |
8373d25d SC |
364 | }; |
365 | ||
8373d25d SC |
366 | struct kvm_vmx { |
367 | struct kvm kvm; | |
368 | ||
369 | unsigned int tss_addr; | |
370 | bool ept_identity_pagetable_done; | |
371 | gpa_t ept_identity_map_addr; | |
8373d25d SC |
372 | }; |
373 | ||
7c97fcb3 | 374 | bool nested_vmx_allowed(struct kvm_vcpu *vcpu); |
5c911bef SC |
375 | void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, |
376 | struct loaded_vmcs *buddy); | |
97b7ead3 SC |
377 | int allocate_vpid(void); |
378 | void free_vpid(int vpid); | |
379 | void vmx_set_constant_host_state(struct vcpu_vmx *vmx); | |
380 | void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu); | |
bca06b85 SC |
381 | void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, |
382 | unsigned long fs_base, unsigned long gs_base); | |
97b7ead3 | 383 | int vmx_get_cpl(struct kvm_vcpu *vcpu); |
dbab610a | 384 | bool vmx_emulation_required(struct kvm_vcpu *vcpu); |
97b7ead3 SC |
385 | unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu); |
386 | void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); | |
387 | u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu); | |
388 | void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask); | |
72f211ec | 389 | int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer); |
97b7ead3 | 390 | void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
c2fe3cd4 | 391 | void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
97b7ead3 SC |
392 | void set_cr4_guest_host_mask(struct vcpu_vmx *vmx); |
393 | void ept_save_pdptrs(struct kvm_vcpu *vcpu); | |
394 | void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); | |
816be9e9 | 395 | void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); |
e83bc09c | 396 | u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level); |
2ba4493a | 397 | |
b33bb78a | 398 | bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu); |
b6a7cc35 | 399 | void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu); |
1b660b6b SC |
400 | bool vmx_nmi_blocked(struct kvm_vcpu *vcpu); |
401 | bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu); | |
97b7ead3 SC |
402 | bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu); |
403 | void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); | |
404 | void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu); | |
d85a8034 | 405 | struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr); |
476c9bd8 | 406 | void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu); |
4d259965 | 407 | void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); |
fc02735b | 408 | void vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags); |
bb066506 JP |
409 | unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx); |
410 | bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, | |
411 | unsigned int flags); | |
a128a934 | 412 | int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr); |
43fea4e4 | 413 | void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu); |
e23f6d49 SC |
414 | |
415 | void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type); | |
416 | void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type); | |
417 | ||
307a94c7 IS |
418 | u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu); |
419 | u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu); | |
420 | ||
e23f6d49 SC |
421 | static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, |
422 | int type, bool value) | |
423 | { | |
424 | if (value) | |
425 | vmx_enable_intercept_for_msr(vcpu, msr, type); | |
426 | else | |
427 | vmx_disable_intercept_for_msr(vcpu, msr, type); | |
428 | } | |
429 | ||
a85863c2 | 430 | void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu); |
97b7ead3 | 431 | |
0cacb80b SC |
432 | /* |
433 | * Note, early Intel manuals have the write-low and read-high bitmap offsets | |
434 | * the wrong way round. The bitmaps control MSRs 0x00000000-0x00001fff and | |
435 | * 0xc0000000-0xc0001fff. The former (low) uses bytes 0-0x3ff for reads and | |
436 | * 0x800-0xbff for writes. The latter (high) uses 0x400-0x7ff for reads and | |
437 | * 0xc00-0xfff for writes. MSRs not covered by either of the ranges always | |
438 | * VM-Exit. | |
439 | */ | |
440 | #define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base) \ | |
441 | static inline rtype vmx_##action##_msr_bitmap_##access(unsigned long *bitmap, \ | |
442 | u32 msr) \ | |
443 | { \ | |
444 | int f = sizeof(unsigned long); \ | |
445 | \ | |
446 | if (msr <= 0x1fff) \ | |
447 | return bitop##_bit(msr, bitmap + base / f); \ | |
448 | else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) \ | |
449 | return bitop##_bit(msr & 0x1fff, bitmap + (base + 0x400) / f); \ | |
450 | return (rtype)true; \ | |
67f4b996 | 451 | } |
0cacb80b SC |
452 | #define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop) \ |
453 | __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, read, 0x0) \ | |
454 | __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 0x800) | |
67f4b996 | 455 | |
0cacb80b SC |
456 | BUILD_VMX_MSR_BITMAP_HELPERS(bool, test, test) |
457 | BUILD_VMX_MSR_BITMAP_HELPERS(void, clear, __clear) | |
458 | BUILD_VMX_MSR_BITMAP_HELPERS(void, set, __set) | |
67f4b996 | 459 | |
89b0c9f5 SC |
460 | static inline u8 vmx_get_rvi(void) |
461 | { | |
462 | return vmcs_read16(GUEST_INTR_STATUS) & 0xff; | |
463 | } | |
464 | ||
70f932ec | 465 | #define BUILD_CONTROLS_SHADOW(lname, uname) \ |
70f932ec SC |
466 | static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \ |
467 | { \ | |
09e226cf SC |
468 | if (vmx->loaded_vmcs->controls_shadow.lname != val) { \ |
469 | vmcs_write32(uname, val); \ | |
470 | vmx->loaded_vmcs->controls_shadow.lname = val; \ | |
471 | } \ | |
70f932ec | 472 | } \ |
389ab252 SC |
473 | static inline u32 __##lname##_controls_get(struct loaded_vmcs *vmcs) \ |
474 | { \ | |
475 | return vmcs->controls_shadow.lname; \ | |
476 | } \ | |
70f932ec SC |
477 | static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \ |
478 | { \ | |
389ab252 | 479 | return __##lname##_controls_get(vmx->loaded_vmcs); \ |
70f932ec SC |
480 | } \ |
481 | static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \ | |
482 | { \ | |
483 | lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \ | |
484 | } \ | |
485 | static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \ | |
486 | { \ | |
487 | lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \ | |
89b0c9f5 | 488 | } |
70f932ec SC |
489 | BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS) |
490 | BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS) | |
c5f2c766 | 491 | BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL) |
2183f564 | 492 | BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL) |
fe7f895d | 493 | BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL) |
89b0c9f5 | 494 | |
41e68b69 PB |
495 | /* |
496 | * VMX_REGS_LAZY_LOAD_SET - The set of registers that will be updated in the | |
497 | * cache on demand. Other registers not listed here are synced to | |
498 | * the cache immediately after VM-Exit. | |
499 | */ | |
500 | #define VMX_REGS_LAZY_LOAD_SET ((1 << VCPU_REGS_RIP) | \ | |
501 | (1 << VCPU_REGS_RSP) | \ | |
502 | (1 << VCPU_EXREG_RFLAGS) | \ | |
503 | (1 << VCPU_EXREG_PDPTR) | \ | |
504 | (1 << VCPU_EXREG_SEGMENTS) | \ | |
505 | (1 << VCPU_EXREG_CR0) | \ | |
506 | (1 << VCPU_EXREG_CR3) | \ | |
507 | (1 << VCPU_EXREG_CR4) | \ | |
508 | (1 << VCPU_EXREG_EXIT_INFO_1) | \ | |
509 | (1 << VCPU_EXREG_EXIT_INFO_2)) | |
e5d03de5 | 510 | |
8373d25d SC |
511 | static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm) |
512 | { | |
513 | return container_of(kvm, struct kvm_vmx, kvm); | |
514 | } | |
515 | ||
516 | static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) | |
517 | { | |
518 | return container_of(vcpu, struct vcpu_vmx, vcpu); | |
519 | } | |
520 | ||
5addc235 SC |
521 | static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu) |
522 | { | |
523 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
524 | ||
525 | if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) { | |
526 | kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1); | |
527 | vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | |
528 | } | |
529 | return vmx->exit_qualification; | |
530 | } | |
531 | ||
87915858 SC |
532 | static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu) |
533 | { | |
534 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
535 | ||
536 | if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) { | |
537 | kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2); | |
538 | vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | |
539 | } | |
540 | return vmx->exit_intr_info; | |
541 | } | |
542 | ||
41836839 | 543 | struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags); |
89b0c9f5 SC |
544 | void free_vmcs(struct vmcs *vmcs); |
545 | int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs); | |
546 | void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs); | |
89b0c9f5 SC |
547 | void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs); |
548 | ||
549 | static inline struct vmcs *alloc_vmcs(bool shadow) | |
550 | { | |
41836839 BG |
551 | return alloc_vmcs_cpu(shadow, raw_smp_processor_id(), |
552 | GFP_KERNEL_ACCOUNT); | |
89b0c9f5 SC |
553 | } |
554 | ||
6e3ba4ab TX |
555 | static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx) |
556 | { | |
7b9cae02 | 557 | return secondary_exec_controls_get(vmx) & |
6e3ba4ab TX |
558 | SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; |
559 | } | |
560 | ||
a0c13434 PB |
561 | static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu) |
562 | { | |
b96e6506 MG |
563 | if (!enable_ept) |
564 | return true; | |
565 | ||
566 | return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits; | |
a0c13434 PB |
567 | } |
568 | ||
bddd82d1 KS |
569 | static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu) |
570 | { | |
571 | return enable_unrestricted_guest && (!is_guest_mode(vcpu) || | |
572 | (secondary_exec_controls_get(to_vmx(vcpu)) & | |
573 | SECONDARY_EXEC_UNRESTRICTED_GUEST)); | |
574 | } | |
575 | ||
2ba4493a SC |
576 | bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu); |
577 | static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu) | |
578 | { | |
579 | return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu); | |
580 | } | |
581 | ||
0702a3cb | 582 | void dump_vmcs(struct kvm_vcpu *vcpu); |
69090810 | 583 | |
329bd56c VS |
584 | static inline int vmx_get_instr_info_reg2(u32 vmx_instr_info) |
585 | { | |
586 | return (vmx_instr_info >> 28) & 0xf; | |
587 | } | |
588 | ||
8373d25d | 589 | #endif /* __KVM_X86_VMX_H */ |