KVM: nVMX: Fix using __this_cpu_read() in preemptible context
[linux-block.git] / arch / x86 / kvm / vmx / vmx.h
CommitLineData
8373d25d
SC
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __KVM_X86_VMX_H
3#define __KVM_X86_VMX_H
4
5#include <linux/kvm_host.h>
6
7#include <asm/kvm.h>
f99e3daf 8#include <asm/intel_pt.h>
8373d25d
SC
9
10#include "capabilities.h"
89b0c9f5 11#include "ops.h"
8373d25d
SC
12#include "vmcs.h"
13
cf3646eb 14extern const u32 vmx_msr_index[];
cf3646eb
SC
15extern u64 host_efer;
16
8373d25d
SC
17#define MSR_TYPE_R 1
18#define MSR_TYPE_W 2
19#define MSR_TYPE_RW 3
20
21#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
22
23#define NR_AUTOLOAD_MSRS 8
24
25struct vmx_msrs {
26 unsigned int nr;
27 struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
28};
29
30struct shared_msr_entry {
31 unsigned index;
32 u64 data;
33 u64 mask;
34};
35
36enum segment_cache_field {
37 SEG_FIELD_SEL = 0,
38 SEG_FIELD_BASE = 1,
39 SEG_FIELD_LIMIT = 2,
40 SEG_FIELD_AR = 3,
41
42 SEG_FIELD_NR = 4
43};
44
45/* Posted-Interrupt Descriptor */
46struct pi_desc {
47 u32 pir[8]; /* Posted interrupt requested */
48 union {
49 struct {
50 /* bit 256 - Outstanding Notification */
51 u16 on : 1,
52 /* bit 257 - Suppress Notification */
53 sn : 1,
54 /* bit 271:258 - Reserved */
55 rsvd_1 : 14;
56 /* bit 279:272 - Notification Vector */
57 u8 nv;
58 /* bit 287:280 - Reserved */
59 u8 rsvd_2;
60 /* bit 319:288 - Notification Destination */
61 u32 ndst;
62 };
63 u64 control;
64 };
65 u32 rsvd[6];
66} __aligned(64);
67
2ef444f1
CP
68#define RTIT_ADDR_RANGE 4
69
70struct pt_ctx {
71 u64 ctl;
72 u64 status;
73 u64 output_base;
74 u64 output_mask;
75 u64 cr3_match;
76 u64 addr_a[RTIT_ADDR_RANGE];
77 u64 addr_b[RTIT_ADDR_RANGE];
78};
79
80struct pt_desc {
81 u64 ctl_bitmask;
82 u32 addr_range;
83 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
84 struct pt_ctx host;
85 struct pt_ctx guest;
86};
8373d25d
SC
87
88/*
89 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
90 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
91 */
92struct nested_vmx {
93 /* Has the level1 guest done vmxon? */
94 bool vmxon;
95 gpa_t vmxon_ptr;
96 bool pml_full;
97
98 /* The guest-physical address of the current VMCS L1 keeps for L2 */
99 gpa_t current_vmptr;
100 /*
101 * Cache of the guest's VMCS, existing outside of guest memory.
102 * Loaded from guest memory during VMPTRLD. Flushed to guest
103 * memory during VMCLEAR and VMPTRLD.
104 */
105 struct vmcs12 *cached_vmcs12;
106 /*
107 * Cache of the guest's shadow VMCS, existing outside of guest
108 * memory. Loaded from guest memory during VM entry. Flushed
109 * to guest memory during VM exit.
110 */
111 struct vmcs12 *cached_shadow_vmcs12;
112 /*
113 * Indicates if the shadow vmcs or enlightened vmcs must be updated
114 * with the data held by struct vmcs12.
115 */
116 bool need_vmcs12_sync;
117 bool dirty_vmcs12;
118
119 /*
120 * vmcs02 has been initialized, i.e. state that is constant for
121 * vmcs02 has been written to the backing VMCS. Initialization
122 * is delayed until L1 actually attempts to run a nested VM.
123 */
124 bool vmcs02_initialized;
125
126 bool change_vmcs01_virtual_apic_mode;
127
128 /*
129 * Enlightened VMCS has been enabled. It does not mean that L1 has to
130 * use it. However, VMX features available to L1 will be limited based
131 * on what the enlightened VMCS supports.
132 */
133 bool enlightened_vmcs_enabled;
134
135 /* L2 must run next, and mustn't decide to exit to L1. */
136 bool nested_run_pending;
137
138 struct loaded_vmcs vmcs02;
139
140 /*
141 * Guest pages referred to in the vmcs02 with host-physical
142 * pointers, so we must keep them pinned while L2 runs.
143 */
144 struct page *apic_access_page;
96c66e87 145 struct kvm_host_map virtual_apic_map;
3278e049 146 struct kvm_host_map pi_desc_map;
31f0b6c4
KA
147
148 struct kvm_host_map msr_bitmap_map;
149
8373d25d
SC
150 struct pi_desc *pi_desc;
151 bool pi_pending;
152 u16 posted_intr_nv;
153
154 struct hrtimer preemption_timer;
155 bool preemption_timer_expired;
156
157 /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
158 u64 vmcs01_debugctl;
159 u64 vmcs01_guest_bndcfgs;
160
161 u16 vpid02;
162 u16 last_vpid;
163
164 struct nested_vmx_msrs msrs;
165
166 /* SMM related state */
167 struct {
168 /* in VMX operation on SMM entry? */
169 bool vmxon;
170 /* in guest mode on SMM entry? */
171 bool guest_mode;
172 } smm;
173
174 gpa_t hv_evmcs_vmptr;
dee9c049 175 struct kvm_host_map hv_evmcs_map;
8373d25d
SC
176 struct hv_enlightened_vmcs *hv_evmcs;
177};
178
179struct vcpu_vmx {
180 struct kvm_vcpu vcpu;
8373d25d
SC
181 u8 fail;
182 u8 msr_bitmap_mode;
183 u32 exit_intr_info;
184 u32 idt_vectoring_info;
185 ulong rflags;
186 struct shared_msr_entry *guest_msrs;
187 int nmsrs;
188 int save_nmsrs;
189 bool guest_msrs_dirty;
190 unsigned long host_idt_base;
191#ifdef CONFIG_X86_64
192 u64 msr_host_kernel_gs_base;
193 u64 msr_guest_kernel_gs_base;
194#endif
195
8373d25d
SC
196 u64 spec_ctrl;
197
198 u32 vm_entry_controls_shadow;
199 u32 vm_exit_controls_shadow;
200 u32 secondary_exec_control;
201
202 /*
203 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
204 * non-nested (L1) guest, it always points to vmcs01. For a nested
205 * guest (L2), it points to a different VMCS. loaded_cpu_state points
206 * to the VMCS whose state is loaded into the CPU registers that only
207 * need to be switched when transitioning to/from the kernel; a NULL
208 * value indicates that host state is loaded.
209 */
210 struct loaded_vmcs vmcs01;
211 struct loaded_vmcs *loaded_vmcs;
212 struct loaded_vmcs *loaded_cpu_state;
c9afc58c 213
8373d25d
SC
214 struct msr_autoload {
215 struct vmx_msrs guest;
216 struct vmx_msrs host;
217 } msr_autoload;
218
219 struct {
220 int vm86_active;
221 ulong save_rflags;
222 struct kvm_segment segs[8];
223 } rmode;
224 struct {
225 u32 bitmask; /* 4 bits per segment (1 bit per field) */
226 struct kvm_save_segment {
227 u16 selector;
228 unsigned long base;
229 u32 limit;
230 u32 ar;
231 } seg[8];
232 } segment_cache;
233 int vpid;
234 bool emulation_required;
235
236 u32 exit_reason;
237
238 /* Posted interrupt descriptor */
239 struct pi_desc pi_desc;
240
241 /* Support for a guest hypervisor (nested VMX) */
242 struct nested_vmx nested;
243
244 /* Dynamic PLE window. */
245 int ple_window;
246 bool ple_window_dirty;
247
248 bool req_immediate_exit;
249
250 /* Support for PML */
251#define PML_ENTITY_NUM 512
252 struct page *pml_pg;
253
254 /* apic deadline value in host tsc */
255 u64 hv_deadline_tsc;
256
257 u64 current_tsc_ratio;
258
259 u32 host_pkru;
260
261 unsigned long host_debugctlmsr;
262
6c6a2ab9
LA
263 u64 msr_ia32_power_ctl;
264
8373d25d
SC
265 /*
266 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
267 * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
268 * in msr_ia32_feature_control_valid_bits.
269 */
270 u64 msr_ia32_feature_control;
271 u64 msr_ia32_feature_control_valid_bits;
272 u64 ept_pointer;
2ef444f1
CP
273
274 struct pt_desc pt_desc;
8373d25d
SC
275};
276
277enum ept_pointers_status {
278 EPT_POINTERS_CHECK = 0,
279 EPT_POINTERS_MATCH = 1,
280 EPT_POINTERS_MISMATCH = 2
281};
282
283struct kvm_vmx {
284 struct kvm kvm;
285
286 unsigned int tss_addr;
287 bool ept_identity_pagetable_done;
288 gpa_t ept_identity_map_addr;
289
290 enum ept_pointers_status ept_pointers_match;
291 spinlock_t ept_pointer_lock;
292};
293
7c97fcb3 294bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
97b7ead3
SC
295void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
296void vmx_vcpu_put(struct kvm_vcpu *vcpu);
297int allocate_vpid(void);
298void free_vpid(int vpid);
299void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
300void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
301int vmx_get_cpl(struct kvm_vcpu *vcpu);
302unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
303void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
304u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
305void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
306void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
307void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
308void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
309int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
310void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
311void ept_save_pdptrs(struct kvm_vcpu *vcpu);
312void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
313void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
314u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
315void update_exception_bitmap(struct kvm_vcpu *vcpu);
316void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
317bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
318void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
319void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
320struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
b08c2896 321void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
97b7ead3 322
8373d25d
SC
323#define POSTED_INTR_ON 0
324#define POSTED_INTR_SN 1
325
326static inline bool pi_test_and_set_on(struct pi_desc *pi_desc)
327{
328 return test_and_set_bit(POSTED_INTR_ON,
329 (unsigned long *)&pi_desc->control);
330}
331
332static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc)
333{
334 return test_and_clear_bit(POSTED_INTR_ON,
335 (unsigned long *)&pi_desc->control);
336}
337
338static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
339{
340 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
341}
342
81b01667 343static inline void pi_set_sn(struct pi_desc *pi_desc)
8373d25d 344{
81b01667
LK
345 set_bit(POSTED_INTR_SN,
346 (unsigned long *)&pi_desc->control);
8373d25d
SC
347}
348
81b01667 349static inline void pi_set_on(struct pi_desc *pi_desc)
8373d25d 350{
81b01667
LK
351 set_bit(POSTED_INTR_ON,
352 (unsigned long *)&pi_desc->control);
8373d25d
SC
353}
354
355static inline void pi_clear_on(struct pi_desc *pi_desc)
356{
357 clear_bit(POSTED_INTR_ON,
358 (unsigned long *)&pi_desc->control);
359}
360
361static inline int pi_test_on(struct pi_desc *pi_desc)
362{
363 return test_bit(POSTED_INTR_ON,
364 (unsigned long *)&pi_desc->control);
365}
366
367static inline int pi_test_sn(struct pi_desc *pi_desc)
368{
369 return test_bit(POSTED_INTR_SN,
370 (unsigned long *)&pi_desc->control);
371}
372
89b0c9f5
SC
373static inline u8 vmx_get_rvi(void)
374{
375 return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
376}
377
378static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
379{
380 vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
381}
382
383static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
384{
385 vmcs_write32(VM_ENTRY_CONTROLS, val);
386 vmx->vm_entry_controls_shadow = val;
387}
388
389static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
390{
391 if (vmx->vm_entry_controls_shadow != val)
392 vm_entry_controls_init(vmx, val);
393}
394
395static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
396{
397 return vmx->vm_entry_controls_shadow;
398}
399
400static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
401{
402 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
403}
404
405static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
406{
407 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
408}
409
410static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
411{
412 vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
413}
414
415static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
416{
417 vmcs_write32(VM_EXIT_CONTROLS, val);
418 vmx->vm_exit_controls_shadow = val;
419}
420
421static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
422{
423 if (vmx->vm_exit_controls_shadow != val)
424 vm_exit_controls_init(vmx, val);
425}
426
427static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
428{
429 return vmx->vm_exit_controls_shadow;
430}
431
432static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
433{
434 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
435}
436
437static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
438{
439 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
440}
441
8373d25d
SC
442static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
443{
444 vmx->segment_cache.bitmask = 0;
445}
446
447static inline u32 vmx_vmentry_ctrl(void)
448{
f99e3daf
CP
449 u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
450 if (pt_mode == PT_MODE_SYSTEM)
d9293597
YZ
451 vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
452 VM_ENTRY_LOAD_IA32_RTIT_CTL);
8373d25d 453 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
f99e3daf 454 return vmentry_ctrl &
8373d25d
SC
455 ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER);
456}
457
458static inline u32 vmx_vmexit_ctrl(void)
459{
f99e3daf
CP
460 u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
461 if (pt_mode == PT_MODE_SYSTEM)
d9293597
YZ
462 vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
463 VM_EXIT_CLEAR_IA32_RTIT_CTL);
8373d25d 464 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
d9293597 465 return vmexit_ctrl &
8373d25d
SC
466 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
467}
468
469u32 vmx_exec_control(struct vcpu_vmx *vmx);
470
471static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
472{
473 return container_of(kvm, struct kvm_vmx, kvm);
474}
475
476static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
477{
478 return container_of(vcpu, struct vcpu_vmx, vcpu);
479}
480
481static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
482{
483 return &(to_vmx(vcpu)->pi_desc);
484}
485
41836839 486struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
89b0c9f5
SC
487void free_vmcs(struct vmcs *vmcs);
488int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
489void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
490void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs);
491void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
492
493static inline struct vmcs *alloc_vmcs(bool shadow)
494{
41836839
BG
495 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
496 GFP_KERNEL_ACCOUNT);
89b0c9f5
SC
497}
498
499u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
500
501static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
502 bool invalidate_gpa)
503{
504 if (enable_ept && (invalidate_gpa || !enable_vpid)) {
505 if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
506 return;
507 ept_sync_context(construct_eptp(vcpu,
508 vcpu->arch.mmu->root_hpa));
509 } else {
510 vpid_sync_context(vpid);
511 }
512}
513
514static inline void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
515{
516 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
517}
518
519static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
520{
521 vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
522 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
523}
524
69090810
PB
525void dump_vmcs(void);
526
8373d25d 527#endif /* __KVM_X86_VMX_H */