KVM: VMX: Consider PID.PIR to determine if vCPU has pending interrupts
[linux-block.git] / arch / x86 / kvm / vmx / vmx.h
CommitLineData
8373d25d
SC
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __KVM_X86_VMX_H
3#define __KVM_X86_VMX_H
4
5#include <linux/kvm_host.h>
6
7#include <asm/kvm.h>
f99e3daf 8#include <asm/intel_pt.h>
8373d25d
SC
9
10#include "capabilities.h"
89b0c9f5 11#include "ops.h"
8373d25d
SC
12#include "vmcs.h"
13
cf3646eb 14extern const u32 vmx_msr_index[];
cf3646eb
SC
15extern u64 host_efer;
16
6e3ba4ab
TX
17extern u32 get_umwait_control_msr(void);
18
8373d25d
SC
19#define MSR_TYPE_R 1
20#define MSR_TYPE_W 2
21#define MSR_TYPE_RW 3
22
23#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
24
25#define NR_AUTOLOAD_MSRS 8
26
27struct vmx_msrs {
28 unsigned int nr;
29 struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
30};
31
32struct shared_msr_entry {
33 unsigned index;
34 u64 data;
35 u64 mask;
36};
37
38enum segment_cache_field {
39 SEG_FIELD_SEL = 0,
40 SEG_FIELD_BASE = 1,
41 SEG_FIELD_LIMIT = 2,
42 SEG_FIELD_AR = 3,
43
44 SEG_FIELD_NR = 4
45};
46
47/* Posted-Interrupt Descriptor */
48struct pi_desc {
49 u32 pir[8]; /* Posted interrupt requested */
50 union {
51 struct {
52 /* bit 256 - Outstanding Notification */
53 u16 on : 1,
54 /* bit 257 - Suppress Notification */
55 sn : 1,
56 /* bit 271:258 - Reserved */
57 rsvd_1 : 14;
58 /* bit 279:272 - Notification Vector */
59 u8 nv;
60 /* bit 287:280 - Reserved */
61 u8 rsvd_2;
62 /* bit 319:288 - Notification Destination */
63 u32 ndst;
64 };
65 u64 control;
66 };
67 u32 rsvd[6];
68} __aligned(64);
69
2ef444f1
CP
70#define RTIT_ADDR_RANGE 4
71
72struct pt_ctx {
73 u64 ctl;
74 u64 status;
75 u64 output_base;
76 u64 output_mask;
77 u64 cr3_match;
78 u64 addr_a[RTIT_ADDR_RANGE];
79 u64 addr_b[RTIT_ADDR_RANGE];
80};
81
82struct pt_desc {
83 u64 ctl_bitmask;
84 u32 addr_range;
85 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
86 struct pt_ctx host;
87 struct pt_ctx guest;
88};
8373d25d
SC
89
90/*
91 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
92 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
93 */
94struct nested_vmx {
95 /* Has the level1 guest done vmxon? */
96 bool vmxon;
97 gpa_t vmxon_ptr;
98 bool pml_full;
99
100 /* The guest-physical address of the current VMCS L1 keeps for L2 */
101 gpa_t current_vmptr;
102 /*
103 * Cache of the guest's VMCS, existing outside of guest memory.
104 * Loaded from guest memory during VMPTRLD. Flushed to guest
105 * memory during VMCLEAR and VMPTRLD.
106 */
107 struct vmcs12 *cached_vmcs12;
108 /*
109 * Cache of the guest's shadow VMCS, existing outside of guest
110 * memory. Loaded from guest memory during VM entry. Flushed
111 * to guest memory during VM exit.
112 */
113 struct vmcs12 *cached_shadow_vmcs12;
7952d769 114
8373d25d
SC
115 /*
116 * Indicates if the shadow vmcs or enlightened vmcs must be updated
117 * with the data held by struct vmcs12.
118 */
3731905e 119 bool need_vmcs12_to_shadow_sync;
8373d25d
SC
120 bool dirty_vmcs12;
121
7952d769
SC
122 /*
123 * Indicates lazily loaded guest state has not yet been decached from
124 * vmcs02.
125 */
126 bool need_sync_vmcs02_to_vmcs12_rare;
127
8373d25d
SC
128 /*
129 * vmcs02 has been initialized, i.e. state that is constant for
130 * vmcs02 has been written to the backing VMCS. Initialization
131 * is delayed until L1 actually attempts to run a nested VM.
132 */
133 bool vmcs02_initialized;
134
135 bool change_vmcs01_virtual_apic_mode;
136
137 /*
138 * Enlightened VMCS has been enabled. It does not mean that L1 has to
139 * use it. However, VMX features available to L1 will be limited based
140 * on what the enlightened VMCS supports.
141 */
142 bool enlightened_vmcs_enabled;
143
144 /* L2 must run next, and mustn't decide to exit to L1. */
145 bool nested_run_pending;
146
147 struct loaded_vmcs vmcs02;
148
149 /*
150 * Guest pages referred to in the vmcs02 with host-physical
151 * pointers, so we must keep them pinned while L2 runs.
152 */
153 struct page *apic_access_page;
96c66e87 154 struct kvm_host_map virtual_apic_map;
3278e049 155 struct kvm_host_map pi_desc_map;
31f0b6c4
KA
156
157 struct kvm_host_map msr_bitmap_map;
158
8373d25d
SC
159 struct pi_desc *pi_desc;
160 bool pi_pending;
161 u16 posted_intr_nv;
162
163 struct hrtimer preemption_timer;
164 bool preemption_timer_expired;
165
166 /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
167 u64 vmcs01_debugctl;
168 u64 vmcs01_guest_bndcfgs;
169
170 u16 vpid02;
171 u16 last_vpid;
172
173 struct nested_vmx_msrs msrs;
174
175 /* SMM related state */
176 struct {
177 /* in VMX operation on SMM entry? */
178 bool vmxon;
179 /* in guest mode on SMM entry? */
180 bool guest_mode;
181 } smm;
182
183 gpa_t hv_evmcs_vmptr;
dee9c049 184 struct kvm_host_map hv_evmcs_map;
8373d25d
SC
185 struct hv_enlightened_vmcs *hv_evmcs;
186};
187
188struct vcpu_vmx {
189 struct kvm_vcpu vcpu;
8373d25d
SC
190 u8 fail;
191 u8 msr_bitmap_mode;
b464f57e
PB
192
193 /*
194 * If true, host state has been stored in vmx->loaded_vmcs for
195 * the CPU registers that only need to be switched when transitioning
196 * to/from the kernel, and the registers have been loaded with guest
197 * values. If false, host state is loaded in the CPU registers
198 * and vmx->loaded_vmcs->host_state is invalid.
199 */
200 bool guest_state_loaded;
201
8373d25d
SC
202 u32 exit_intr_info;
203 u32 idt_vectoring_info;
204 ulong rflags;
70f932ec 205
8373d25d
SC
206 struct shared_msr_entry *guest_msrs;
207 int nmsrs;
208 int save_nmsrs;
b464f57e 209 bool guest_msrs_ready;
8373d25d
SC
210#ifdef CONFIG_X86_64
211 u64 msr_host_kernel_gs_base;
212 u64 msr_guest_kernel_gs_base;
213#endif
214
8373d25d 215 u64 spec_ctrl;
6e3ba4ab 216 u32 msr_ia32_umwait_control;
8373d25d 217
8373d25d
SC
218 u32 secondary_exec_control;
219
220 /*
221 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
222 * non-nested (L1) guest, it always points to vmcs01. For a nested
b464f57e 223 * guest (L2), it points to a different VMCS.
8373d25d
SC
224 */
225 struct loaded_vmcs vmcs01;
226 struct loaded_vmcs *loaded_vmcs;
c9afc58c 227
8373d25d
SC
228 struct msr_autoload {
229 struct vmx_msrs guest;
230 struct vmx_msrs host;
231 } msr_autoload;
232
233 struct {
234 int vm86_active;
235 ulong save_rflags;
236 struct kvm_segment segs[8];
237 } rmode;
238 struct {
239 u32 bitmask; /* 4 bits per segment (1 bit per field) */
240 struct kvm_save_segment {
241 u16 selector;
242 unsigned long base;
243 u32 limit;
244 u32 ar;
245 } seg[8];
246 } segment_cache;
247 int vpid;
248 bool emulation_required;
249
250 u32 exit_reason;
251
252 /* Posted interrupt descriptor */
253 struct pi_desc pi_desc;
254
255 /* Support for a guest hypervisor (nested VMX) */
256 struct nested_vmx nested;
257
258 /* Dynamic PLE window. */
c5c5d6fa 259 unsigned int ple_window;
8373d25d
SC
260 bool ple_window_dirty;
261
262 bool req_immediate_exit;
263
264 /* Support for PML */
265#define PML_ENTITY_NUM 512
266 struct page *pml_pg;
267
268 /* apic deadline value in host tsc */
269 u64 hv_deadline_tsc;
270
271 u64 current_tsc_ratio;
272
273 u32 host_pkru;
274
275 unsigned long host_debugctlmsr;
276
277 /*
278 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
279 * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
280 * in msr_ia32_feature_control_valid_bits.
281 */
282 u64 msr_ia32_feature_control;
283 u64 msr_ia32_feature_control_valid_bits;
284 u64 ept_pointer;
2ef444f1
CP
285
286 struct pt_desc pt_desc;
8373d25d
SC
287};
288
289enum ept_pointers_status {
290 EPT_POINTERS_CHECK = 0,
291 EPT_POINTERS_MATCH = 1,
292 EPT_POINTERS_MISMATCH = 2
293};
294
295struct kvm_vmx {
296 struct kvm kvm;
297
298 unsigned int tss_addr;
299 bool ept_identity_pagetable_done;
300 gpa_t ept_identity_map_addr;
301
302 enum ept_pointers_status ept_pointers_match;
303 spinlock_t ept_pointer_lock;
304};
305
7c97fcb3 306bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
8ef863e6 307void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu);
97b7ead3 308void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
97b7ead3
SC
309int allocate_vpid(void);
310void free_vpid(int vpid);
311void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
312void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
13b964a2
SC
313void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
314 unsigned long fs_base, unsigned long gs_base);
97b7ead3
SC
315int vmx_get_cpl(struct kvm_vcpu *vcpu);
316unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
317void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
318u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
319void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
320void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
321void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
322void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
323int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
324void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
325void ept_save_pdptrs(struct kvm_vcpu *vcpu);
326void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
327void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
328u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
329void update_exception_bitmap(struct kvm_vcpu *vcpu);
330void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
331bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
332void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
333void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
334struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
b08c2896 335void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
4d259965 336void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
97b7ead3 337
8373d25d
SC
338#define POSTED_INTR_ON 0
339#define POSTED_INTR_SN 1
340
341static inline bool pi_test_and_set_on(struct pi_desc *pi_desc)
342{
343 return test_and_set_bit(POSTED_INTR_ON,
344 (unsigned long *)&pi_desc->control);
345}
346
347static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc)
348{
349 return test_and_clear_bit(POSTED_INTR_ON,
350 (unsigned long *)&pi_desc->control);
351}
352
353static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
354{
355 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
356}
357
81b01667 358static inline void pi_set_sn(struct pi_desc *pi_desc)
8373d25d 359{
81b01667
LK
360 set_bit(POSTED_INTR_SN,
361 (unsigned long *)&pi_desc->control);
8373d25d
SC
362}
363
81b01667 364static inline void pi_set_on(struct pi_desc *pi_desc)
8373d25d 365{
81b01667
LK
366 set_bit(POSTED_INTR_ON,
367 (unsigned long *)&pi_desc->control);
8373d25d
SC
368}
369
370static inline void pi_clear_on(struct pi_desc *pi_desc)
371{
372 clear_bit(POSTED_INTR_ON,
373 (unsigned long *)&pi_desc->control);
374}
375
376static inline int pi_test_on(struct pi_desc *pi_desc)
377{
378 return test_bit(POSTED_INTR_ON,
379 (unsigned long *)&pi_desc->control);
380}
381
382static inline int pi_test_sn(struct pi_desc *pi_desc)
383{
384 return test_bit(POSTED_INTR_SN,
385 (unsigned long *)&pi_desc->control);
386}
387
89b0c9f5
SC
388static inline u8 vmx_get_rvi(void)
389{
390 return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
391}
392
70f932ec 393#define BUILD_CONTROLS_SHADOW(lname, uname) \
70f932ec
SC
394static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \
395{ \
09e226cf
SC
396 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
397 vmcs_write32(uname, val); \
398 vmx->loaded_vmcs->controls_shadow.lname = val; \
399 } \
70f932ec
SC
400} \
401static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \
402{ \
09e226cf 403 return vmx->loaded_vmcs->controls_shadow.lname; \
70f932ec
SC
404} \
405static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \
406{ \
407 lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
408} \
409static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \
410{ \
411 lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
89b0c9f5 412}
70f932ec
SC
413BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS)
414BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS)
c5f2c766 415BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)
2183f564 416BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL)
fe7f895d 417BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL)
89b0c9f5 418
8373d25d
SC
419static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
420{
421 vmx->segment_cache.bitmask = 0;
422}
423
424static inline u32 vmx_vmentry_ctrl(void)
425{
f99e3daf
CP
426 u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
427 if (pt_mode == PT_MODE_SYSTEM)
d9293597
YZ
428 vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
429 VM_ENTRY_LOAD_IA32_RTIT_CTL);
8373d25d 430 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
f99e3daf 431 return vmentry_ctrl &
8373d25d
SC
432 ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER);
433}
434
435static inline u32 vmx_vmexit_ctrl(void)
436{
f99e3daf
CP
437 u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
438 if (pt_mode == PT_MODE_SYSTEM)
d9293597
YZ
439 vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
440 VM_EXIT_CLEAR_IA32_RTIT_CTL);
8373d25d 441 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
d9293597 442 return vmexit_ctrl &
8373d25d
SC
443 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
444}
445
446u32 vmx_exec_control(struct vcpu_vmx *vmx);
c075c3e4 447u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx);
8373d25d
SC
448
449static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
450{
451 return container_of(kvm, struct kvm_vmx, kvm);
452}
453
454static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
455{
456 return container_of(vcpu, struct vcpu_vmx, vcpu);
457}
458
459static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
460{
461 return &(to_vmx(vcpu)->pi_desc);
462}
463
41836839 464struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
89b0c9f5
SC
465void free_vmcs(struct vmcs *vmcs);
466int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
467void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
468void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs);
469void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
470
471static inline struct vmcs *alloc_vmcs(bool shadow)
472{
41836839
BG
473 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
474 GFP_KERNEL_ACCOUNT);
89b0c9f5
SC
475}
476
477u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
478
479static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
480 bool invalidate_gpa)
481{
482 if (enable_ept && (invalidate_gpa || !enable_vpid)) {
483 if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
484 return;
485 ept_sync_context(construct_eptp(vcpu,
486 vcpu->arch.mmu->root_hpa));
487 } else {
488 vpid_sync_context(vpid);
489 }
490}
491
492static inline void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
493{
494 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
495}
496
497static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
498{
499 vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
500 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
501}
502
6e3ba4ab
TX
503static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
504{
505 return vmx->secondary_exec_control &
506 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
507}
508
69090810
PB
509void dump_vmcs(void);
510
8373d25d 511#endif /* __KVM_X86_VMX_H */