KVM: vmx, svm: clean up mass updates to regs_avail/regs_dirty bits
[linux-2.6-block.git] / arch / x86 / kvm / svm / svm.h
CommitLineData
883b0a91
JR
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15#ifndef __SVM_SVM_H
16#define __SVM_SVM_H
17
18#include <linux/kvm_types.h>
19#include <linux/kvm_host.h>
291bd20d 20#include <linux/bits.h>
883b0a91
JR
21
22#include <asm/svm.h>
b81fc74d 23#include <asm/sev-common.h>
883b0a91 24
85ca8be9
TL
25#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
26
47903dc1
KS
27#define IOPM_SIZE PAGE_SIZE * 3
28#define MSRPM_SIZE PAGE_SIZE * 2
29
adc2a237 30#define MAX_DIRECT_ACCESS_MSRS 20
883b0a91
JR
31#define MSRPM_OFFSETS 16
32extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
33extern bool npt_enabled;
4b639a9f 34extern bool intercept_smi;
b1d66dad 35extern bool pmu;
883b0a91 36
59d21d67
VP
37/*
38 * Clean bits in VMCB.
39 * VMCB_ALL_CLEAN_MASK might also need to
40 * be updated if this enum is modified.
41 */
883b0a91
JR
42enum {
43 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
44 pause filter count */
45 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
46 VMCB_ASID, /* ASID */
47 VMCB_INTR, /* int_ctl, int_vector */
48 VMCB_NPT, /* npt_en, nCR3, gPAT */
49 VMCB_CR, /* CR0, CR3, CR4, EFER */
50 VMCB_DR, /* DR6, DR7 */
51 VMCB_DT, /* GDT, IDT */
52 VMCB_SEG, /* CS, DS, SS, ES, CPL */
53 VMCB_CR2, /* CR2 only */
54 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
55 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
56 * AVIC PHYSICAL_TABLE pointer,
57 * AVIC LOGICAL_TABLE pointer
58 */
59d21d67 59 VMCB_SW = 31, /* Reserved for hypervisor/software use */
883b0a91
JR
60};
61
59d21d67
VP
62#define VMCB_ALL_CLEAN_MASK ( \
63 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \
64 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \
65 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
66 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
67 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \
68 (1U << VMCB_SW))
69
883b0a91
JR
70/* TPR and CR2 are always written before VMRUN */
71#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
72
73struct kvm_sev_info {
74 bool active; /* SEV enabled guest */
916391a2 75 bool es_active; /* SEV-ES enabled guest */
883b0a91
JR
76 unsigned int asid; /* ASID used for this guest */
77 unsigned int handle; /* SEV firmware handle */
78 int fd; /* SEV device fd */
79 unsigned long pages_locked; /* Number of pages locked */
80 struct list_head regions_list; /* List of registered regions */
8640ca58 81 u64 ap_jump_table; /* SEV-ES AP Jump Table address */
54526d1f 82 struct kvm *enc_context_owner; /* Owner of copied encryption context */
17d44a96 83 unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */
7aef27f0 84 struct misc_cg *misc_cg; /* For misc cgroup accounting */
b5663931 85 atomic_t migration_in_progress;
883b0a91
JR
86};
87
88struct kvm_svm {
89 struct kvm kvm;
90
91 /* Struct members for AVIC */
92 u32 avic_vm_id;
93 struct page *avic_logical_id_table_page;
94 struct page *avic_physical_id_table_page;
95 struct hlist_node hnode;
96
97 struct kvm_sev_info sev_info;
98};
99
100struct kvm_vcpu;
101
4995a368
CA
102struct kvm_vmcb_info {
103 struct vmcb *ptr;
104 unsigned long pa;
af18fa77 105 int cpu;
193015ad 106 uint64_t asid_generation;
4995a368
CA
107};
108
f2740a8d
EGE
109struct vmcb_save_area_cached {
110 u64 efer;
111 u64 cr4;
112 u64 cr3;
113 u64 cr0;
114 u64 dr7;
115 u64 dr6;
116};
117
8fc78909
EGE
118struct vmcb_ctrl_area_cached {
119 u32 intercepts[MAX_INTERCEPT];
120 u16 pause_filter_thresh;
121 u16 pause_filter_count;
122 u64 iopm_base_pa;
123 u64 msrpm_base_pa;
124 u64 tsc_offset;
125 u32 asid;
126 u8 tlb_ctl;
127 u32 int_ctl;
128 u32 int_vector;
129 u32 int_state;
130 u32 exit_code;
131 u32 exit_code_hi;
132 u64 exit_info_1;
133 u64 exit_info_2;
134 u32 exit_int_info;
135 u32 exit_int_info_err;
136 u64 nested_ctl;
137 u32 event_inj;
138 u32 event_inj_err;
139 u64 nested_cr3;
140 u64 virt_ext;
141};
142
7693b3eb 143struct svm_nested_state {
4995a368 144 struct kvm_vmcb_info vmcb02;
883b0a91
JR
145 u64 hsave_msr;
146 u64 vm_cr_msr;
0dd16b5b 147 u64 vmcb12_gpa;
8173396e 148 u64 last_vmcb12_gpa;
883b0a91
JR
149
150 /* These are the merged vectors */
151 u32 *msrpm;
152
f74f9414
PB
153 /* A VMRUN has started but has not yet been performed, so
154 * we cannot inject a nested vmexit yet. */
155 bool nested_run_pending;
156
e670bf68 157 /* cache for control fields of the guest */
8fc78909 158 struct vmcb_ctrl_area_cached ctl;
2fcf4876 159
f2740a8d
EGE
160 /*
161 * Note: this struct is not kept up-to-date while L2 runs; it is only
162 * valid within nested_svm_vmrun.
163 */
164 struct vmcb_save_area_cached save;
165
2fcf4876 166 bool initialized;
883b0a91
JR
167};
168
b67a4cc3
PG
169struct vcpu_sev_es_state {
170 /* SEV-ES support */
171 struct vmcb_save_area *vmsa;
172 struct ghcb *ghcb;
173 struct kvm_host_map ghcb_map;
174 bool received_first_sipi;
175
176 /* SEV-ES scratch area support */
177 void *ghcb_sa;
1f058331 178 u32 ghcb_sa_len;
b67a4cc3
PG
179 bool ghcb_sa_sync;
180 bool ghcb_sa_free;
181};
182
883b0a91
JR
183struct vcpu_svm {
184 struct kvm_vcpu vcpu;
554cf314 185 /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
883b0a91 186 struct vmcb *vmcb;
4995a368
CA
187 struct kvm_vmcb_info vmcb01;
188 struct kvm_vmcb_info *current_vmcb;
883b0a91 189 struct svm_cpu_data *svm_data;
7e8e6eed 190 u32 asid;
adc2a237
ML
191 u32 sysenter_esp_hi;
192 u32 sysenter_eip_hi;
883b0a91
JR
193 uint64_t tsc_aux;
194
195 u64 msr_decfg;
196
197 u64 next_rip;
198
883b0a91 199 u64 spec_ctrl;
5228eb96
ML
200
201 u64 tsc_ratio_msr;
883b0a91
JR
202 /*
203 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
204 * translated into the appropriate L2_CFG bits on the host to
205 * perform speculative control.
206 */
207 u64 virt_spec_ctrl;
208
209 u32 *msrpm;
210
211 ulong nmi_iret_rip;
212
7693b3eb 213 struct svm_nested_state nested;
883b0a91
JR
214
215 bool nmi_singlestep;
216 u64 nmi_singlestep_guest_rflags;
217
218 unsigned int3_injected;
219 unsigned long int3_rip;
220
221 /* cached guest cpuid flags for faster access */
5228eb96
ML
222 bool nrips_enabled : 1;
223 bool tsc_scaling_enabled : 1;
883b0a91
JR
224
225 u32 ldr_reg;
226 u32 dfr_reg;
227 struct page *avic_backing_page;
228 u64 *avic_physical_id_cache;
229 bool avic_is_running;
230
231 /*
232 * Per-vcpu list of struct amd_svm_iommu_ir:
233 * This is used mainly to store interrupt remapping information used
234 * when update the vcpu affinity. This avoids the need to scan for
235 * IRTE and try to match ga_tag in the IOMMU driver.
236 */
237 struct list_head ir_list;
238 spinlock_t ir_list_lock;
fd6fa73d
AG
239
240 /* Save desired MSR intercept (read: pass-through) state */
241 struct {
242 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
243 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
244 } shadow_msr_intercept;
add5e2f0 245
b67a4cc3 246 struct vcpu_sev_es_state sev_es;
a7fc06dd
MR
247
248 bool guest_state_loaded;
883b0a91
JR
249};
250
eaf78265
JR
251struct svm_cpu_data {
252 int cpu;
253
254 u64 asid_generation;
255 u32 max_asid;
256 u32 next_asid;
257 u32 min_asid;
258 struct kvm_ldttss_desc *tss_desc;
259
260 struct page *save_area;
261 struct vmcb *current_vmcb;
262
263 /* index = sev_asid, value = vmcb pointer */
264 struct vmcb **sev_vmcbs;
265};
266
267DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
268
883b0a91
JR
269void recalc_intercepts(struct vcpu_svm *svm);
270
2b2f72d4 271static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
ef0f6496
JR
272{
273 return container_of(kvm, struct kvm_svm, kvm);
274}
275
2b2f72d4 276static __always_inline bool sev_guest(struct kvm *kvm)
916391a2
TL
277{
278#ifdef CONFIG_KVM_AMD_SEV
279 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
280
281 return sev->active;
282#else
283 return false;
284#endif
285}
286
2b2f72d4 287static __always_inline bool sev_es_guest(struct kvm *kvm)
916391a2
TL
288{
289#ifdef CONFIG_KVM_AMD_SEV
290 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
291
1bd00a42 292 return sev->es_active && !WARN_ON_ONCE(!sev->active);
916391a2
TL
293#else
294 return false;
295#endif
296}
297
06e7852c 298static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
883b0a91
JR
299{
300 vmcb->control.clean = 0;
301}
302
06e7852c 303static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
883b0a91 304{
59d21d67 305 vmcb->control.clean = VMCB_ALL_CLEAN_MASK
883b0a91
JR
306 & ~VMCB_ALWAYS_DIRTY_MASK;
307}
308
c4327f15
VP
309static inline bool vmcb_is_clean(struct vmcb *vmcb, int bit)
310{
311 return (vmcb->control.clean & (1 << bit));
312}
313
06e7852c 314static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
883b0a91
JR
315{
316 vmcb->control.clean &= ~(1 << bit);
317}
318
8173396e
CA
319static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
320{
321 return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
322}
323
aee045ed 324static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
883b0a91
JR
325{
326 return container_of(vcpu, struct vcpu_svm, vcpu);
327}
328
41e68b69
PB
329/*
330 * Only the PDPTRs are loaded on demand into the shadow MMU. All other
331 * fields are synchronized in handle_exit, because accessing the VMCB is cheap.
332 *
333 * CR3 might be out of date in the VMCB but it is not marked dirty; instead,
334 * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3
335 * is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB.
336 */
337#define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR)
338
c45ad722
BM
339static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
340{
341 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
342 __set_bit(bit, (unsigned long *)&control->intercepts);
343}
344
345static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
346{
347 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
348 __clear_bit(bit, (unsigned long *)&control->intercepts);
349}
350
351static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
352{
353 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
354 return test_bit(bit, (unsigned long *)&control->intercepts);
355}
356
8fc78909
EGE
357static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
358{
359 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
360 return test_bit(bit, (unsigned long *)&control->intercepts);
361}
362
883b0a91
JR
363static inline void set_dr_intercepts(struct vcpu_svm *svm)
364{
4995a368 365 struct vmcb *vmcb = svm->vmcb01.ptr;
883b0a91 366
8d4846b9
TL
367 if (!sev_es_guest(svm->vcpu.kvm)) {
368 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
369 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
370 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
371 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
372 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
373 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
374 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
375 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
376 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
377 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
378 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
379 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
380 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
381 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
382 }
383
30abaa88 384 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
30abaa88 385 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
883b0a91
JR
386
387 recalc_intercepts(svm);
388}
389
390static inline void clr_dr_intercepts(struct vcpu_svm *svm)
391{
4995a368 392 struct vmcb *vmcb = svm->vmcb01.ptr;
883b0a91 393
30abaa88 394 vmcb->control.intercepts[INTERCEPT_DR] = 0;
883b0a91 395
8d4846b9
TL
396 /* DR7 access must remain intercepted for an SEV-ES guest */
397 if (sev_es_guest(svm->vcpu.kvm)) {
398 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
399 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
400 }
401
883b0a91
JR
402 recalc_intercepts(svm);
403}
404
9780d51d 405static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
883b0a91 406{
4995a368 407 struct vmcb *vmcb = svm->vmcb01.ptr;
883b0a91 408
9780d51d
BM
409 WARN_ON_ONCE(bit >= 32);
410 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
883b0a91
JR
411
412 recalc_intercepts(svm);
413}
414
9780d51d 415static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
883b0a91 416{
4995a368 417 struct vmcb *vmcb = svm->vmcb01.ptr;
883b0a91 418
9780d51d
BM
419 WARN_ON_ONCE(bit >= 32);
420 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
883b0a91
JR
421
422 recalc_intercepts(svm);
423}
424
a284ba56 425static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
883b0a91 426{
4995a368 427 struct vmcb *vmcb = svm->vmcb01.ptr;
883b0a91 428
c62e2e94 429 vmcb_set_intercept(&vmcb->control, bit);
883b0a91
JR
430
431 recalc_intercepts(svm);
432}
433
a284ba56 434static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
883b0a91 435{
4995a368 436 struct vmcb *vmcb = svm->vmcb01.ptr;
883b0a91 437
c62e2e94 438 vmcb_clr_intercept(&vmcb->control, bit);
883b0a91
JR
439
440 recalc_intercepts(svm);
441}
442
a284ba56 443static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
883b0a91 444{
c62e2e94 445 return vmcb_is_intercept(&svm->vmcb->control, bit);
883b0a91
JR
446}
447
448static inline bool vgif_enabled(struct vcpu_svm *svm)
449{
450 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
451}
452
453static inline void enable_gif(struct vcpu_svm *svm)
454{
455 if (vgif_enabled(svm))
456 svm->vmcb->control.int_ctl |= V_GIF_MASK;
457 else
458 svm->vcpu.arch.hflags |= HF_GIF_MASK;
459}
460
461static inline void disable_gif(struct vcpu_svm *svm)
462{
463 if (vgif_enabled(svm))
464 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
465 else
466 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
467}
468
469static inline bool gif_set(struct vcpu_svm *svm)
470{
471 if (vgif_enabled(svm))
472 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
473 else
474 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
475}
476
477/* svm.c */
761e4169 478#define MSR_INVALID 0xffffffffU
883b0a91 479
291bd20d 480extern bool dump_invalid_vmcb;
916391a2 481
883b0a91 482u32 svm_msrpm_offset(u32 msr);
2fcf4876
ML
483u32 *svm_vcpu_alloc_msrpm(void);
484void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
485void svm_vcpu_free_msrpm(u32 *msrpm);
486
72f211ec 487int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
883b0a91 488void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
c2fe3cd4 489void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
f55ac304 490void svm_flush_tlb(struct kvm_vcpu *vcpu);
883b0a91 491void disable_nmi_singlestep(struct vcpu_svm *svm);
cae96af1
PB
492bool svm_smi_blocked(struct kvm_vcpu *vcpu);
493bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
494bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
ffdf7f9e 495void svm_set_gif(struct vcpu_svm *svm, bool value);
63129754 496int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
376c6d28
TL
497void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
498 int read, int write);
883b0a91
JR
499
500/* nested.c */
501
502#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
503#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
504#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
505
01c3b2b5 506static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
883b0a91 507{
e9fd761a
PB
508 struct vcpu_svm *svm = to_svm(vcpu);
509
510 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
883b0a91
JR
511}
512
55714cdd
PB
513static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
514{
8fc78909 515 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
55714cdd
PB
516}
517
fc6f7c03
PB
518static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
519{
8fc78909 520 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
fc6f7c03
PB
521}
522
bbdad0b5
PB
523static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
524{
8fc78909 525 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
bbdad0b5
PB
526}
527
e85d3e7b
ML
528int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
529 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
c513f484 530void svm_leave_nested(struct vcpu_svm *svm);
2fcf4876
ML
531void svm_free_nested(struct vcpu_svm *svm);
532int svm_allocate_nested(struct vcpu_svm *svm);
63129754 533int nested_svm_vmrun(struct kvm_vcpu *vcpu);
2bb16bea
VK
534void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
535 struct vmcb_save_area *from_save);
536void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
883b0a91 537int nested_svm_vmexit(struct vcpu_svm *svm);
3a87c7e0
SC
538
539static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
540{
541 svm->vmcb->control.exit_code = exit_code;
542 svm->vmcb->control.exit_info_1 = 0;
543 svm->vmcb->control.exit_info_2 = 0;
544 return nested_svm_vmexit(svm);
545}
546
883b0a91 547int nested_svm_exit_handled(struct vcpu_svm *svm);
63129754 548int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
883b0a91
JR
549int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
550 bool has_error_code, u32 error_code);
883b0a91 551int nested_svm_exit_special(struct vcpu_svm *svm);
5228eb96
ML
552void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
553void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier);
7907160d
EGE
554void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
555 struct vmcb_control_area *control);
f2740a8d
EGE
556void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
557 struct vmcb_save_area *save);
9e8f0fbf 558void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
4995a368
CA
559void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
560void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
883b0a91 561
33b22172
PB
562extern struct kvm_x86_nested_ops svm_nested_ops;
563
ef0f6496
JR
564/* avic.c */
565
566#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
567#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
568#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
569
570#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
571#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
572#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
573#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
574
575#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
576
ef0f6496
JR
577static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
578{
579 struct vcpu_svm *svm = to_svm(vcpu);
580 u64 *entry = svm->avic_physical_id_cache;
581
582 if (!entry)
583 return false;
584
585 return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
586}
587
588int avic_ga_log_notifier(u32 ga_tag);
589void avic_vm_destroy(struct kvm *kvm);
590int avic_vm_init(struct kvm *kvm);
591void avic_init_vmcb(struct vcpu_svm *svm);
63129754
PB
592int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
593int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
ef0f6496
JR
594int avic_init_vcpu(struct vcpu_svm *svm);
595void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
596void avic_vcpu_put(struct kvm_vcpu *vcpu);
597void avic_post_state_restore(struct kvm_vcpu *vcpu);
598void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
599void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
600bool svm_check_apicv_inhibit_reasons(ulong bit);
ef0f6496
JR
601void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
602void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
603void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
604int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
605bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
606int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
607 uint32_t guest_irq, bool set);
608void svm_vcpu_blocking(struct kvm_vcpu *vcpu);
609void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
610
eaf78265
JR
611/* sev.c */
612
b81fc74d
BS
613#define GHCB_VERSION_MAX 1ULL
614#define GHCB_VERSION_MIN 1ULL
615
e1d71116 616
eaf78265
JR
617extern unsigned int max_sev_asid;
618
eaf78265
JR
619void sev_vm_destroy(struct kvm *kvm);
620int svm_mem_enc_op(struct kvm *kvm, void __user *argp);
621int svm_register_enc_region(struct kvm *kvm,
622 struct kvm_enc_region *range);
623int svm_unregister_enc_region(struct kvm *kvm,
624 struct kvm_enc_region *range);
54526d1f 625int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd);
b5663931 626int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd);
eaf78265 627void pre_sev_run(struct vcpu_svm *svm, int cpu);
d9db0fd6 628void __init sev_set_cpu_caps(void);
916391a2 629void __init sev_hardware_setup(void);
eaf78265 630void sev_hardware_teardown(void);
b95c221c 631int sev_cpu_init(struct svm_cpu_data *sd);
add5e2f0 632void sev_free_vcpu(struct kvm_vcpu *vcpu);
63129754 633int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
7ed9abfe 634int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
376c6d28 635void sev_es_init_vmcb(struct vcpu_svm *svm);
9ebe530b 636void sev_es_vcpu_reset(struct vcpu_svm *svm);
647daca2 637void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
a7fc06dd 638void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu);
ce7ea0cf 639void sev_es_unmap_ghcb(struct vcpu_svm *svm);
eaf78265 640
16809ecd
TL
641/* vmenter.S */
642
643void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
644void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
645
883b0a91 646#endif