Commit | Line | Data |
---|---|---|
8373d25d SC |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __KVM_X86_VMX_H | |
3 | #define __KVM_X86_VMX_H | |
4 | ||
5 | #include <linux/kvm_host.h> | |
6 | ||
7 | #include <asm/kvm.h> | |
f99e3daf | 8 | #include <asm/intel_pt.h> |
8373d25d SC |
9 | |
10 | #include "capabilities.h" | |
89b0c9f5 | 11 | #include "ops.h" |
8373d25d SC |
12 | #include "vmcs.h" |
13 | ||
cf3646eb | 14 | extern const u32 vmx_msr_index[]; |
cf3646eb SC |
15 | extern u64 host_efer; |
16 | ||
8373d25d SC |
17 | #define MSR_TYPE_R 1 |
18 | #define MSR_TYPE_W 2 | |
19 | #define MSR_TYPE_RW 3 | |
20 | ||
21 | #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4)) | |
22 | ||
23 | #define NR_AUTOLOAD_MSRS 8 | |
24 | ||
25 | struct vmx_msrs { | |
26 | unsigned int nr; | |
27 | struct vmx_msr_entry val[NR_AUTOLOAD_MSRS]; | |
28 | }; | |
29 | ||
30 | struct shared_msr_entry { | |
31 | unsigned index; | |
32 | u64 data; | |
33 | u64 mask; | |
34 | }; | |
35 | ||
36 | enum segment_cache_field { | |
37 | SEG_FIELD_SEL = 0, | |
38 | SEG_FIELD_BASE = 1, | |
39 | SEG_FIELD_LIMIT = 2, | |
40 | SEG_FIELD_AR = 3, | |
41 | ||
42 | SEG_FIELD_NR = 4 | |
43 | }; | |
44 | ||
45 | /* Posted-Interrupt Descriptor */ | |
46 | struct pi_desc { | |
47 | u32 pir[8]; /* Posted interrupt requested */ | |
48 | union { | |
49 | struct { | |
50 | /* bit 256 - Outstanding Notification */ | |
51 | u16 on : 1, | |
52 | /* bit 257 - Suppress Notification */ | |
53 | sn : 1, | |
54 | /* bit 271:258 - Reserved */ | |
55 | rsvd_1 : 14; | |
56 | /* bit 279:272 - Notification Vector */ | |
57 | u8 nv; | |
58 | /* bit 287:280 - Reserved */ | |
59 | u8 rsvd_2; | |
60 | /* bit 319:288 - Notification Destination */ | |
61 | u32 ndst; | |
62 | }; | |
63 | u64 control; | |
64 | }; | |
65 | u32 rsvd[6]; | |
66 | } __aligned(64); | |
67 | ||
2ef444f1 CP |
68 | #define RTIT_ADDR_RANGE 4 |
69 | ||
70 | struct pt_ctx { | |
71 | u64 ctl; | |
72 | u64 status; | |
73 | u64 output_base; | |
74 | u64 output_mask; | |
75 | u64 cr3_match; | |
76 | u64 addr_a[RTIT_ADDR_RANGE]; | |
77 | u64 addr_b[RTIT_ADDR_RANGE]; | |
78 | }; | |
79 | ||
80 | struct pt_desc { | |
81 | u64 ctl_bitmask; | |
82 | u32 addr_range; | |
83 | u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; | |
84 | struct pt_ctx host; | |
85 | struct pt_ctx guest; | |
86 | }; | |
8373d25d SC |
87 | |
88 | /* | |
89 | * The nested_vmx structure is part of vcpu_vmx, and holds information we need | |
90 | * for correct emulation of VMX (i.e., nested VMX) on this vcpu. | |
91 | */ | |
92 | struct nested_vmx { | |
93 | /* Has the level1 guest done vmxon? */ | |
94 | bool vmxon; | |
95 | gpa_t vmxon_ptr; | |
96 | bool pml_full; | |
97 | ||
98 | /* The guest-physical address of the current VMCS L1 keeps for L2 */ | |
99 | gpa_t current_vmptr; | |
100 | /* | |
101 | * Cache of the guest's VMCS, existing outside of guest memory. | |
102 | * Loaded from guest memory during VMPTRLD. Flushed to guest | |
103 | * memory during VMCLEAR and VMPTRLD. | |
104 | */ | |
105 | struct vmcs12 *cached_vmcs12; | |
106 | /* | |
107 | * Cache of the guest's shadow VMCS, existing outside of guest | |
108 | * memory. Loaded from guest memory during VM entry. Flushed | |
109 | * to guest memory during VM exit. | |
110 | */ | |
111 | struct vmcs12 *cached_shadow_vmcs12; | |
112 | /* | |
113 | * Indicates if the shadow vmcs or enlightened vmcs must be updated | |
114 | * with the data held by struct vmcs12. | |
115 | */ | |
116 | bool need_vmcs12_sync; | |
117 | bool dirty_vmcs12; | |
118 | ||
119 | /* | |
120 | * vmcs02 has been initialized, i.e. state that is constant for | |
121 | * vmcs02 has been written to the backing VMCS. Initialization | |
122 | * is delayed until L1 actually attempts to run a nested VM. | |
123 | */ | |
124 | bool vmcs02_initialized; | |
125 | ||
126 | bool change_vmcs01_virtual_apic_mode; | |
127 | ||
128 | /* | |
129 | * Enlightened VMCS has been enabled. It does not mean that L1 has to | |
130 | * use it. However, VMX features available to L1 will be limited based | |
131 | * on what the enlightened VMCS supports. | |
132 | */ | |
133 | bool enlightened_vmcs_enabled; | |
134 | ||
135 | /* L2 must run next, and mustn't decide to exit to L1. */ | |
136 | bool nested_run_pending; | |
137 | ||
138 | struct loaded_vmcs vmcs02; | |
139 | ||
140 | /* | |
141 | * Guest pages referred to in the vmcs02 with host-physical | |
142 | * pointers, so we must keep them pinned while L2 runs. | |
143 | */ | |
144 | struct page *apic_access_page; | |
145 | struct page *virtual_apic_page; | |
146 | struct page *pi_desc_page; | |
147 | struct pi_desc *pi_desc; | |
148 | bool pi_pending; | |
149 | u16 posted_intr_nv; | |
150 | ||
151 | struct hrtimer preemption_timer; | |
152 | bool preemption_timer_expired; | |
153 | ||
154 | /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */ | |
155 | u64 vmcs01_debugctl; | |
156 | u64 vmcs01_guest_bndcfgs; | |
157 | ||
158 | u16 vpid02; | |
159 | u16 last_vpid; | |
160 | ||
161 | struct nested_vmx_msrs msrs; | |
162 | ||
163 | /* SMM related state */ | |
164 | struct { | |
165 | /* in VMX operation on SMM entry? */ | |
166 | bool vmxon; | |
167 | /* in guest mode on SMM entry? */ | |
168 | bool guest_mode; | |
169 | } smm; | |
170 | ||
171 | gpa_t hv_evmcs_vmptr; | |
172 | struct page *hv_evmcs_page; | |
173 | struct hv_enlightened_vmcs *hv_evmcs; | |
174 | }; | |
175 | ||
176 | struct vcpu_vmx { | |
177 | struct kvm_vcpu vcpu; | |
8373d25d SC |
178 | u8 fail; |
179 | u8 msr_bitmap_mode; | |
180 | u32 exit_intr_info; | |
181 | u32 idt_vectoring_info; | |
182 | ulong rflags; | |
183 | struct shared_msr_entry *guest_msrs; | |
184 | int nmsrs; | |
185 | int save_nmsrs; | |
186 | bool guest_msrs_dirty; | |
187 | unsigned long host_idt_base; | |
188 | #ifdef CONFIG_X86_64 | |
189 | u64 msr_host_kernel_gs_base; | |
190 | u64 msr_guest_kernel_gs_base; | |
191 | #endif | |
192 | ||
193 | u64 arch_capabilities; | |
194 | u64 spec_ctrl; | |
195 | ||
196 | u32 vm_entry_controls_shadow; | |
197 | u32 vm_exit_controls_shadow; | |
198 | u32 secondary_exec_control; | |
199 | ||
200 | /* | |
201 | * loaded_vmcs points to the VMCS currently used in this vcpu. For a | |
202 | * non-nested (L1) guest, it always points to vmcs01. For a nested | |
203 | * guest (L2), it points to a different VMCS. loaded_cpu_state points | |
204 | * to the VMCS whose state is loaded into the CPU registers that only | |
205 | * need to be switched when transitioning to/from the kernel; a NULL | |
206 | * value indicates that host state is loaded. | |
207 | */ | |
208 | struct loaded_vmcs vmcs01; | |
209 | struct loaded_vmcs *loaded_vmcs; | |
210 | struct loaded_vmcs *loaded_cpu_state; | |
211 | bool __launched; /* temporary, used in vmx_vcpu_run */ | |
212 | struct msr_autoload { | |
213 | struct vmx_msrs guest; | |
214 | struct vmx_msrs host; | |
215 | } msr_autoload; | |
216 | ||
217 | struct { | |
218 | int vm86_active; | |
219 | ulong save_rflags; | |
220 | struct kvm_segment segs[8]; | |
221 | } rmode; | |
222 | struct { | |
223 | u32 bitmask; /* 4 bits per segment (1 bit per field) */ | |
224 | struct kvm_save_segment { | |
225 | u16 selector; | |
226 | unsigned long base; | |
227 | u32 limit; | |
228 | u32 ar; | |
229 | } seg[8]; | |
230 | } segment_cache; | |
231 | int vpid; | |
232 | bool emulation_required; | |
233 | ||
234 | u32 exit_reason; | |
235 | ||
236 | /* Posted interrupt descriptor */ | |
237 | struct pi_desc pi_desc; | |
238 | ||
239 | /* Support for a guest hypervisor (nested VMX) */ | |
240 | struct nested_vmx nested; | |
241 | ||
242 | /* Dynamic PLE window. */ | |
243 | int ple_window; | |
244 | bool ple_window_dirty; | |
245 | ||
246 | bool req_immediate_exit; | |
247 | ||
248 | /* Support for PML */ | |
249 | #define PML_ENTITY_NUM 512 | |
250 | struct page *pml_pg; | |
251 | ||
252 | /* apic deadline value in host tsc */ | |
253 | u64 hv_deadline_tsc; | |
254 | ||
255 | u64 current_tsc_ratio; | |
256 | ||
257 | u32 host_pkru; | |
258 | ||
259 | unsigned long host_debugctlmsr; | |
260 | ||
261 | /* | |
262 | * Only bits masked by msr_ia32_feature_control_valid_bits can be set in | |
263 | * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included | |
264 | * in msr_ia32_feature_control_valid_bits. | |
265 | */ | |
266 | u64 msr_ia32_feature_control; | |
267 | u64 msr_ia32_feature_control_valid_bits; | |
268 | u64 ept_pointer; | |
2ef444f1 CP |
269 | |
270 | struct pt_desc pt_desc; | |
8373d25d SC |
271 | }; |
272 | ||
273 | enum ept_pointers_status { | |
274 | EPT_POINTERS_CHECK = 0, | |
275 | EPT_POINTERS_MATCH = 1, | |
276 | EPT_POINTERS_MISMATCH = 2 | |
277 | }; | |
278 | ||
279 | struct kvm_vmx { | |
280 | struct kvm kvm; | |
281 | ||
282 | unsigned int tss_addr; | |
283 | bool ept_identity_pagetable_done; | |
284 | gpa_t ept_identity_map_addr; | |
285 | ||
286 | enum ept_pointers_status ept_pointers_match; | |
287 | spinlock_t ept_pointer_lock; | |
288 | }; | |
289 | ||
7c97fcb3 | 290 | bool nested_vmx_allowed(struct kvm_vcpu *vcpu); |
97b7ead3 SC |
291 | void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
292 | void vmx_vcpu_put(struct kvm_vcpu *vcpu); | |
293 | int allocate_vpid(void); | |
294 | void free_vpid(int vpid); | |
295 | void vmx_set_constant_host_state(struct vcpu_vmx *vmx); | |
296 | void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu); | |
297 | int vmx_get_cpl(struct kvm_vcpu *vcpu); | |
298 | unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu); | |
299 | void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); | |
300 | u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu); | |
301 | void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask); | |
302 | void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer); | |
303 | void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); | |
304 | void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); | |
305 | int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); | |
306 | void set_cr4_guest_host_mask(struct vcpu_vmx *vmx); | |
307 | void ept_save_pdptrs(struct kvm_vcpu *vcpu); | |
308 | void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); | |
309 | void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); | |
310 | u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa); | |
311 | void update_exception_bitmap(struct kvm_vcpu *vcpu); | |
312 | void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); | |
313 | bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu); | |
314 | void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); | |
315 | void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu); | |
316 | struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr); | |
b08c2896 | 317 | void pt_update_intercept_for_msr(struct vcpu_vmx *vmx); |
97b7ead3 | 318 | |
8373d25d SC |
319 | #define POSTED_INTR_ON 0 |
320 | #define POSTED_INTR_SN 1 | |
321 | ||
322 | static inline bool pi_test_and_set_on(struct pi_desc *pi_desc) | |
323 | { | |
324 | return test_and_set_bit(POSTED_INTR_ON, | |
325 | (unsigned long *)&pi_desc->control); | |
326 | } | |
327 | ||
328 | static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc) | |
329 | { | |
330 | return test_and_clear_bit(POSTED_INTR_ON, | |
331 | (unsigned long *)&pi_desc->control); | |
332 | } | |
333 | ||
334 | static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) | |
335 | { | |
336 | return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); | |
337 | } | |
338 | ||
339 | static inline void pi_clear_sn(struct pi_desc *pi_desc) | |
340 | { | |
341 | return clear_bit(POSTED_INTR_SN, | |
342 | (unsigned long *)&pi_desc->control); | |
343 | } | |
344 | ||
345 | static inline void pi_set_sn(struct pi_desc *pi_desc) | |
346 | { | |
347 | return set_bit(POSTED_INTR_SN, | |
348 | (unsigned long *)&pi_desc->control); | |
349 | } | |
350 | ||
351 | static inline void pi_clear_on(struct pi_desc *pi_desc) | |
352 | { | |
353 | clear_bit(POSTED_INTR_ON, | |
354 | (unsigned long *)&pi_desc->control); | |
355 | } | |
356 | ||
357 | static inline int pi_test_on(struct pi_desc *pi_desc) | |
358 | { | |
359 | return test_bit(POSTED_INTR_ON, | |
360 | (unsigned long *)&pi_desc->control); | |
361 | } | |
362 | ||
363 | static inline int pi_test_sn(struct pi_desc *pi_desc) | |
364 | { | |
365 | return test_bit(POSTED_INTR_SN, | |
366 | (unsigned long *)&pi_desc->control); | |
367 | } | |
368 | ||
89b0c9f5 SC |
369 | static inline u8 vmx_get_rvi(void) |
370 | { | |
371 | return vmcs_read16(GUEST_INTR_STATUS) & 0xff; | |
372 | } | |
373 | ||
374 | static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx) | |
375 | { | |
376 | vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS); | |
377 | } | |
378 | ||
379 | static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val) | |
380 | { | |
381 | vmcs_write32(VM_ENTRY_CONTROLS, val); | |
382 | vmx->vm_entry_controls_shadow = val; | |
383 | } | |
384 | ||
385 | static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val) | |
386 | { | |
387 | if (vmx->vm_entry_controls_shadow != val) | |
388 | vm_entry_controls_init(vmx, val); | |
389 | } | |
390 | ||
391 | static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx) | |
392 | { | |
393 | return vmx->vm_entry_controls_shadow; | |
394 | } | |
395 | ||
396 | static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val) | |
397 | { | |
398 | vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val); | |
399 | } | |
400 | ||
401 | static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val) | |
402 | { | |
403 | vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val); | |
404 | } | |
405 | ||
406 | static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx) | |
407 | { | |
408 | vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS); | |
409 | } | |
410 | ||
411 | static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val) | |
412 | { | |
413 | vmcs_write32(VM_EXIT_CONTROLS, val); | |
414 | vmx->vm_exit_controls_shadow = val; | |
415 | } | |
416 | ||
417 | static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val) | |
418 | { | |
419 | if (vmx->vm_exit_controls_shadow != val) | |
420 | vm_exit_controls_init(vmx, val); | |
421 | } | |
422 | ||
423 | static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx) | |
424 | { | |
425 | return vmx->vm_exit_controls_shadow; | |
426 | } | |
427 | ||
428 | static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val) | |
429 | { | |
430 | vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val); | |
431 | } | |
432 | ||
433 | static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val) | |
434 | { | |
435 | vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val); | |
436 | } | |
437 | ||
8373d25d SC |
438 | static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx) |
439 | { | |
440 | vmx->segment_cache.bitmask = 0; | |
441 | } | |
442 | ||
443 | static inline u32 vmx_vmentry_ctrl(void) | |
444 | { | |
f99e3daf CP |
445 | u32 vmentry_ctrl = vmcs_config.vmentry_ctrl; |
446 | if (pt_mode == PT_MODE_SYSTEM) | |
447 | vmentry_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP | VM_EXIT_CLEAR_IA32_RTIT_CTL); | |
8373d25d | 448 | /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */ |
f99e3daf | 449 | return vmentry_ctrl & |
8373d25d SC |
450 | ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER); |
451 | } | |
452 | ||
453 | static inline u32 vmx_vmexit_ctrl(void) | |
454 | { | |
f99e3daf CP |
455 | u32 vmexit_ctrl = vmcs_config.vmexit_ctrl; |
456 | if (pt_mode == PT_MODE_SYSTEM) | |
457 | vmexit_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP | VM_ENTRY_LOAD_IA32_RTIT_CTL); | |
8373d25d SC |
458 | /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */ |
459 | return vmcs_config.vmexit_ctrl & | |
460 | ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER); | |
461 | } | |
462 | ||
463 | u32 vmx_exec_control(struct vcpu_vmx *vmx); | |
464 | ||
465 | static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm) | |
466 | { | |
467 | return container_of(kvm, struct kvm_vmx, kvm); | |
468 | } | |
469 | ||
470 | static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) | |
471 | { | |
472 | return container_of(vcpu, struct vcpu_vmx, vcpu); | |
473 | } | |
474 | ||
475 | static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) | |
476 | { | |
477 | return &(to_vmx(vcpu)->pi_desc); | |
478 | } | |
479 | ||
89b0c9f5 SC |
480 | struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu); |
481 | void free_vmcs(struct vmcs *vmcs); | |
482 | int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs); | |
483 | void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs); | |
484 | void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs); | |
485 | void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs); | |
486 | ||
487 | static inline struct vmcs *alloc_vmcs(bool shadow) | |
488 | { | |
489 | return alloc_vmcs_cpu(shadow, raw_smp_processor_id()); | |
490 | } | |
491 | ||
492 | u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa); | |
493 | ||
494 | static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid, | |
495 | bool invalidate_gpa) | |
496 | { | |
497 | if (enable_ept && (invalidate_gpa || !enable_vpid)) { | |
498 | if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) | |
499 | return; | |
500 | ept_sync_context(construct_eptp(vcpu, | |
501 | vcpu->arch.mmu->root_hpa)); | |
502 | } else { | |
503 | vpid_sync_context(vpid); | |
504 | } | |
505 | } | |
506 | ||
507 | static inline void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa) | |
508 | { | |
509 | __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa); | |
510 | } | |
511 | ||
512 | static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx) | |
513 | { | |
514 | vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio; | |
515 | vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio); | |
516 | } | |
517 | ||
8373d25d | 518 | #endif /* __KVM_X86_VMX_H */ |