KVM: x86: Use more verbose names for mem encrypt kvm_x86_ops hooks
[linux-block.git] / arch / x86 / kvm / svm / svm.h
CommitLineData
883b0a91
JR
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15#ifndef __SVM_SVM_H
16#define __SVM_SVM_H
17
18#include <linux/kvm_types.h>
19#include <linux/kvm_host.h>
291bd20d 20#include <linux/bits.h>
883b0a91
JR
21
22#include <asm/svm.h>
b81fc74d 23#include <asm/sev-common.h>
883b0a91 24
85ca8be9
TL
25#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
26
47903dc1
KS
27#define IOPM_SIZE PAGE_SIZE * 3
28#define MSRPM_SIZE PAGE_SIZE * 2
29
adc2a237 30#define MAX_DIRECT_ACCESS_MSRS 20
883b0a91
JR
31#define MSRPM_OFFSETS 16
32extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
33extern bool npt_enabled;
4b639a9f 34extern bool intercept_smi;
883b0a91 35
59d21d67
VP
36/*
37 * Clean bits in VMCB.
38 * VMCB_ALL_CLEAN_MASK might also need to
39 * be updated if this enum is modified.
40 */
883b0a91
JR
41enum {
42 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
43 pause filter count */
44 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
45 VMCB_ASID, /* ASID */
46 VMCB_INTR, /* int_ctl, int_vector */
47 VMCB_NPT, /* npt_en, nCR3, gPAT */
48 VMCB_CR, /* CR0, CR3, CR4, EFER */
49 VMCB_DR, /* DR6, DR7 */
50 VMCB_DT, /* GDT, IDT */
51 VMCB_SEG, /* CS, DS, SS, ES, CPL */
52 VMCB_CR2, /* CR2 only */
53 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
54 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
55 * AVIC PHYSICAL_TABLE pointer,
56 * AVIC LOGICAL_TABLE pointer
57 */
59d21d67 58 VMCB_SW = 31, /* Reserved for hypervisor/software use */
883b0a91
JR
59};
60
59d21d67
VP
61#define VMCB_ALL_CLEAN_MASK ( \
62 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \
63 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \
64 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
65 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
66 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \
67 (1U << VMCB_SW))
68
883b0a91
JR
69/* TPR and CR2 are always written before VMRUN */
70#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
71
72struct kvm_sev_info {
73 bool active; /* SEV enabled guest */
916391a2 74 bool es_active; /* SEV-ES enabled guest */
883b0a91
JR
75 unsigned int asid; /* ASID used for this guest */
76 unsigned int handle; /* SEV firmware handle */
77 int fd; /* SEV device fd */
78 unsigned long pages_locked; /* Number of pages locked */
79 struct list_head regions_list; /* List of registered regions */
8640ca58 80 u64 ap_jump_table; /* SEV-ES AP Jump Table address */
54526d1f 81 struct kvm *enc_context_owner; /* Owner of copied encryption context */
17d44a96 82 unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */
7aef27f0 83 struct misc_cg *misc_cg; /* For misc cgroup accounting */
b5663931 84 atomic_t migration_in_progress;
883b0a91
JR
85};
86
87struct kvm_svm {
88 struct kvm kvm;
89
90 /* Struct members for AVIC */
91 u32 avic_vm_id;
92 struct page *avic_logical_id_table_page;
93 struct page *avic_physical_id_table_page;
94 struct hlist_node hnode;
95
96 struct kvm_sev_info sev_info;
97};
98
99struct kvm_vcpu;
100
4995a368
CA
101struct kvm_vmcb_info {
102 struct vmcb *ptr;
103 unsigned long pa;
af18fa77 104 int cpu;
193015ad 105 uint64_t asid_generation;
4995a368
CA
106};
107
f2740a8d
EGE
108struct vmcb_save_area_cached {
109 u64 efer;
110 u64 cr4;
111 u64 cr3;
112 u64 cr0;
113 u64 dr7;
114 u64 dr6;
115};
116
8fc78909
EGE
117struct vmcb_ctrl_area_cached {
118 u32 intercepts[MAX_INTERCEPT];
119 u16 pause_filter_thresh;
120 u16 pause_filter_count;
121 u64 iopm_base_pa;
122 u64 msrpm_base_pa;
123 u64 tsc_offset;
124 u32 asid;
125 u8 tlb_ctl;
126 u32 int_ctl;
127 u32 int_vector;
128 u32 int_state;
129 u32 exit_code;
130 u32 exit_code_hi;
131 u64 exit_info_1;
132 u64 exit_info_2;
133 u32 exit_int_info;
134 u32 exit_int_info_err;
135 u64 nested_ctl;
136 u32 event_inj;
137 u32 event_inj_err;
138 u64 nested_cr3;
139 u64 virt_ext;
140};
141
7693b3eb 142struct svm_nested_state {
4995a368 143 struct kvm_vmcb_info vmcb02;
883b0a91
JR
144 u64 hsave_msr;
145 u64 vm_cr_msr;
0dd16b5b 146 u64 vmcb12_gpa;
8173396e 147 u64 last_vmcb12_gpa;
883b0a91
JR
148
149 /* These are the merged vectors */
150 u32 *msrpm;
151
f74f9414
PB
152 /* A VMRUN has started but has not yet been performed, so
153 * we cannot inject a nested vmexit yet. */
154 bool nested_run_pending;
155
e670bf68 156 /* cache for control fields of the guest */
8fc78909 157 struct vmcb_ctrl_area_cached ctl;
2fcf4876 158
f2740a8d
EGE
159 /*
160 * Note: this struct is not kept up-to-date while L2 runs; it is only
161 * valid within nested_svm_vmrun.
162 */
163 struct vmcb_save_area_cached save;
164
2fcf4876 165 bool initialized;
883b0a91
JR
166};
167
b67a4cc3
PG
168struct vcpu_sev_es_state {
169 /* SEV-ES support */
170 struct vmcb_save_area *vmsa;
171 struct ghcb *ghcb;
172 struct kvm_host_map ghcb_map;
173 bool received_first_sipi;
174
175 /* SEV-ES scratch area support */
176 void *ghcb_sa;
1f058331 177 u32 ghcb_sa_len;
b67a4cc3
PG
178 bool ghcb_sa_sync;
179 bool ghcb_sa_free;
180};
181
883b0a91
JR
182struct vcpu_svm {
183 struct kvm_vcpu vcpu;
554cf314 184 /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
883b0a91 185 struct vmcb *vmcb;
4995a368
CA
186 struct kvm_vmcb_info vmcb01;
187 struct kvm_vmcb_info *current_vmcb;
883b0a91 188 struct svm_cpu_data *svm_data;
7e8e6eed 189 u32 asid;
adc2a237
ML
190 u32 sysenter_esp_hi;
191 u32 sysenter_eip_hi;
883b0a91
JR
192 uint64_t tsc_aux;
193
194 u64 msr_decfg;
195
196 u64 next_rip;
197
883b0a91 198 u64 spec_ctrl;
5228eb96
ML
199
200 u64 tsc_ratio_msr;
883b0a91
JR
201 /*
202 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
203 * translated into the appropriate L2_CFG bits on the host to
204 * perform speculative control.
205 */
206 u64 virt_spec_ctrl;
207
208 u32 *msrpm;
209
210 ulong nmi_iret_rip;
211
7693b3eb 212 struct svm_nested_state nested;
883b0a91
JR
213
214 bool nmi_singlestep;
215 u64 nmi_singlestep_guest_rflags;
216
217 unsigned int3_injected;
218 unsigned long int3_rip;
219
220 /* cached guest cpuid flags for faster access */
5228eb96
ML
221 bool nrips_enabled : 1;
222 bool tsc_scaling_enabled : 1;
883b0a91
JR
223
224 u32 ldr_reg;
225 u32 dfr_reg;
226 struct page *avic_backing_page;
227 u64 *avic_physical_id_cache;
883b0a91
JR
228
229 /*
230 * Per-vcpu list of struct amd_svm_iommu_ir:
231 * This is used mainly to store interrupt remapping information used
232 * when update the vcpu affinity. This avoids the need to scan for
233 * IRTE and try to match ga_tag in the IOMMU driver.
234 */
235 struct list_head ir_list;
236 spinlock_t ir_list_lock;
fd6fa73d
AG
237
238 /* Save desired MSR intercept (read: pass-through) state */
239 struct {
240 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
241 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
242 } shadow_msr_intercept;
add5e2f0 243
b67a4cc3 244 struct vcpu_sev_es_state sev_es;
a7fc06dd
MR
245
246 bool guest_state_loaded;
883b0a91
JR
247};
248
eaf78265
JR
249struct svm_cpu_data {
250 int cpu;
251
252 u64 asid_generation;
253 u32 max_asid;
254 u32 next_asid;
255 u32 min_asid;
256 struct kvm_ldttss_desc *tss_desc;
257
258 struct page *save_area;
259 struct vmcb *current_vmcb;
260
261 /* index = sev_asid, value = vmcb pointer */
262 struct vmcb **sev_vmcbs;
263};
264
265DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
266
883b0a91
JR
267void recalc_intercepts(struct vcpu_svm *svm);
268
2b2f72d4 269static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
ef0f6496
JR
270{
271 return container_of(kvm, struct kvm_svm, kvm);
272}
273
2b2f72d4 274static __always_inline bool sev_guest(struct kvm *kvm)
916391a2
TL
275{
276#ifdef CONFIG_KVM_AMD_SEV
277 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
278
279 return sev->active;
280#else
281 return false;
282#endif
283}
284
2b2f72d4 285static __always_inline bool sev_es_guest(struct kvm *kvm)
916391a2
TL
286{
287#ifdef CONFIG_KVM_AMD_SEV
288 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
289
1bd00a42 290 return sev->es_active && !WARN_ON_ONCE(!sev->active);
916391a2
TL
291#else
292 return false;
293#endif
294}
295
06e7852c 296static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
883b0a91
JR
297{
298 vmcb->control.clean = 0;
299}
300
06e7852c 301static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
883b0a91 302{
59d21d67 303 vmcb->control.clean = VMCB_ALL_CLEAN_MASK
883b0a91
JR
304 & ~VMCB_ALWAYS_DIRTY_MASK;
305}
306
06e7852c 307static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
883b0a91
JR
308{
309 vmcb->control.clean &= ~(1 << bit);
310}
311
8173396e
CA
312static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
313{
314 return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
315}
316
aee045ed 317static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
883b0a91
JR
318{
319 return container_of(vcpu, struct vcpu_svm, vcpu);
320}
321
41e68b69
PB
322/*
323 * Only the PDPTRs are loaded on demand into the shadow MMU. All other
324 * fields are synchronized in handle_exit, because accessing the VMCB is cheap.
325 *
326 * CR3 might be out of date in the VMCB but it is not marked dirty; instead,
327 * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3
328 * is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB.
329 */
330#define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR)
331
c45ad722
BM
332static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
333{
334 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
335 __set_bit(bit, (unsigned long *)&control->intercepts);
336}
337
338static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
339{
340 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
341 __clear_bit(bit, (unsigned long *)&control->intercepts);
342}
343
344static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
345{
346 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
347 return test_bit(bit, (unsigned long *)&control->intercepts);
348}
349
8fc78909
EGE
350static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
351{
352 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
353 return test_bit(bit, (unsigned long *)&control->intercepts);
354}
355
883b0a91
JR
356static inline void set_dr_intercepts(struct vcpu_svm *svm)
357{
4995a368 358 struct vmcb *vmcb = svm->vmcb01.ptr;
883b0a91 359
8d4846b9
TL
360 if (!sev_es_guest(svm->vcpu.kvm)) {
361 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
362 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
363 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
364 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
365 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
366 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
367 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
368 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
369 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
370 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
371 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
372 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
373 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
374 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
375 }
376
30abaa88 377 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
30abaa88 378 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
883b0a91
JR
379
380 recalc_intercepts(svm);
381}
382
383static inline void clr_dr_intercepts(struct vcpu_svm *svm)
384{
4995a368 385 struct vmcb *vmcb = svm->vmcb01.ptr;
883b0a91 386
30abaa88 387 vmcb->control.intercepts[INTERCEPT_DR] = 0;
883b0a91 388
8d4846b9
TL
389 /* DR7 access must remain intercepted for an SEV-ES guest */
390 if (sev_es_guest(svm->vcpu.kvm)) {
391 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
392 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
393 }
394
883b0a91
JR
395 recalc_intercepts(svm);
396}
397
9780d51d 398static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
883b0a91 399{
4995a368 400 struct vmcb *vmcb = svm->vmcb01.ptr;
883b0a91 401
9780d51d
BM
402 WARN_ON_ONCE(bit >= 32);
403 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
883b0a91
JR
404
405 recalc_intercepts(svm);
406}
407
9780d51d 408static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
883b0a91 409{
4995a368 410 struct vmcb *vmcb = svm->vmcb01.ptr;
883b0a91 411
9780d51d
BM
412 WARN_ON_ONCE(bit >= 32);
413 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
883b0a91
JR
414
415 recalc_intercepts(svm);
416}
417
a284ba56 418static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
883b0a91 419{
4995a368 420 struct vmcb *vmcb = svm->vmcb01.ptr;
883b0a91 421
c62e2e94 422 vmcb_set_intercept(&vmcb->control, bit);
883b0a91
JR
423
424 recalc_intercepts(svm);
425}
426
a284ba56 427static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
883b0a91 428{
4995a368 429 struct vmcb *vmcb = svm->vmcb01.ptr;
883b0a91 430
c62e2e94 431 vmcb_clr_intercept(&vmcb->control, bit);
883b0a91
JR
432
433 recalc_intercepts(svm);
434}
435
a284ba56 436static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
883b0a91 437{
c62e2e94 438 return vmcb_is_intercept(&svm->vmcb->control, bit);
883b0a91
JR
439}
440
441static inline bool vgif_enabled(struct vcpu_svm *svm)
442{
443 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
444}
445
446static inline void enable_gif(struct vcpu_svm *svm)
447{
448 if (vgif_enabled(svm))
449 svm->vmcb->control.int_ctl |= V_GIF_MASK;
450 else
451 svm->vcpu.arch.hflags |= HF_GIF_MASK;
452}
453
454static inline void disable_gif(struct vcpu_svm *svm)
455{
456 if (vgif_enabled(svm))
457 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
458 else
459 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
460}
461
462static inline bool gif_set(struct vcpu_svm *svm)
463{
464 if (vgif_enabled(svm))
465 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
466 else
467 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
468}
469
470/* svm.c */
761e4169 471#define MSR_INVALID 0xffffffffU
883b0a91 472
291bd20d 473extern bool dump_invalid_vmcb;
916391a2 474
883b0a91 475u32 svm_msrpm_offset(u32 msr);
2fcf4876
ML
476u32 *svm_vcpu_alloc_msrpm(void);
477void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
478void svm_vcpu_free_msrpm(u32 *msrpm);
479
72f211ec 480int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
883b0a91 481void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
c2fe3cd4 482void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
883b0a91 483void disable_nmi_singlestep(struct vcpu_svm *svm);
cae96af1
PB
484bool svm_smi_blocked(struct kvm_vcpu *vcpu);
485bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
486bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
ffdf7f9e 487void svm_set_gif(struct vcpu_svm *svm, bool value);
63129754 488int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
376c6d28
TL
489void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
490 int read, int write);
883b0a91
JR
491
492/* nested.c */
493
494#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
495#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
496#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
497
01c3b2b5 498static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
883b0a91 499{
e9fd761a
PB
500 struct vcpu_svm *svm = to_svm(vcpu);
501
502 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
883b0a91
JR
503}
504
55714cdd
PB
505static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
506{
8fc78909 507 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
55714cdd
PB
508}
509
fc6f7c03
PB
510static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
511{
8fc78909 512 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
fc6f7c03
PB
513}
514
bbdad0b5
PB
515static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
516{
8fc78909 517 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
bbdad0b5
PB
518}
519
e85d3e7b
ML
520int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
521 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
f7e57078 522void svm_leave_nested(struct kvm_vcpu *vcpu);
2fcf4876
ML
523void svm_free_nested(struct vcpu_svm *svm);
524int svm_allocate_nested(struct vcpu_svm *svm);
63129754 525int nested_svm_vmrun(struct kvm_vcpu *vcpu);
2bb16bea
VK
526void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
527 struct vmcb_save_area *from_save);
528void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
883b0a91 529int nested_svm_vmexit(struct vcpu_svm *svm);
3a87c7e0
SC
530
531static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
532{
533 svm->vmcb->control.exit_code = exit_code;
534 svm->vmcb->control.exit_info_1 = 0;
535 svm->vmcb->control.exit_info_2 = 0;
536 return nested_svm_vmexit(svm);
537}
538
883b0a91 539int nested_svm_exit_handled(struct vcpu_svm *svm);
63129754 540int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
883b0a91
JR
541int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
542 bool has_error_code, u32 error_code);
883b0a91 543int nested_svm_exit_special(struct vcpu_svm *svm);
5228eb96
ML
544void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
545void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier);
7907160d
EGE
546void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
547 struct vmcb_control_area *control);
f2740a8d
EGE
548void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
549 struct vmcb_save_area *save);
9e8f0fbf 550void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
4995a368
CA
551void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
552void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
883b0a91 553
33b22172
PB
554extern struct kvm_x86_nested_ops svm_nested_ops;
555
ef0f6496
JR
556/* avic.c */
557
ef0f6496
JR
558int avic_ga_log_notifier(u32 ga_tag);
559void avic_vm_destroy(struct kvm *kvm);
560int avic_vm_init(struct kvm *kvm);
561void avic_init_vmcb(struct vcpu_svm *svm);
63129754
PB
562int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
563int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
ef0f6496
JR
564int avic_init_vcpu(struct vcpu_svm *svm);
565void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
566void avic_vcpu_put(struct kvm_vcpu *vcpu);
567void avic_post_state_restore(struct kvm_vcpu *vcpu);
568void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
569void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
570bool svm_check_apicv_inhibit_reasons(ulong bit);
ef0f6496
JR
571void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
572void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
573void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
574int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
575bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
576int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
577 uint32_t guest_irq, bool set);
a3c19d5b
SC
578void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
579void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
ef0f6496 580
eaf78265
JR
581/* sev.c */
582
b81fc74d
BS
583#define GHCB_VERSION_MAX 1ULL
584#define GHCB_VERSION_MIN 1ULL
585
e1d71116 586
eaf78265
JR
587extern unsigned int max_sev_asid;
588
eaf78265 589void sev_vm_destroy(struct kvm *kvm);
03d004cd 590int svm_mem_enc_ioctl(struct kvm *kvm, void __user *argp);
eaf78265
JR
591int svm_register_enc_region(struct kvm *kvm,
592 struct kvm_enc_region *range);
593int svm_unregister_enc_region(struct kvm *kvm,
594 struct kvm_enc_region *range);
54526d1f 595int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd);
b5663931 596int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd);
eaf78265 597void pre_sev_run(struct vcpu_svm *svm, int cpu);
d9db0fd6 598void __init sev_set_cpu_caps(void);
916391a2 599void __init sev_hardware_setup(void);
eaf78265 600void sev_hardware_teardown(void);
b95c221c 601int sev_cpu_init(struct svm_cpu_data *sd);
add5e2f0 602void sev_free_vcpu(struct kvm_vcpu *vcpu);
63129754 603int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
7ed9abfe 604int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
376c6d28 605void sev_es_init_vmcb(struct vcpu_svm *svm);
9ebe530b 606void sev_es_vcpu_reset(struct vcpu_svm *svm);
647daca2 607void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
068f7ea6 608void sev_es_prepare_guest_switch(struct vmcb_save_area *hostsa);
ce7ea0cf 609void sev_es_unmap_ghcb(struct vcpu_svm *svm);
eaf78265 610
16809ecd
TL
611/* vmenter.S */
612
613void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
614void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
615
883b0a91 616#endif