Merge tag 'pm-6.16-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[linux-block.git] / arch / x86 / kvm / svm / svm.h
CommitLineData
883b0a91
JR
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15#ifndef __SVM_SVM_H
16#define __SVM_SVM_H
17
18#include <linux/kvm_types.h>
19#include <linux/kvm_host.h>
291bd20d 20#include <linux/bits.h>
883b0a91
JR
21
22#include <asm/svm.h>
b81fc74d 23#include <asm/sev-common.h>
883b0a91 24
b89456ae 25#include "cpuid.h"
4a9e7b9e
PG
26#include "kvm_cache_regs.h"
27
48547fe7
SC
28/*
29 * Helpers to convert to/from physical addresses for pages whose address is
30 * consumed directly by hardware. Even though it's a physical address, SVM
31 * often restricts the address to the natural width, hence 'unsigned long'
32 * instead of 'hpa_t'.
33 */
34static inline unsigned long __sme_page_pa(struct page *page)
35{
36 return __sme_set(page_to_pfn(page) << PAGE_SHIFT);
37}
38
39static inline struct page *__sme_pa_to_page(unsigned long pa)
40{
41 return pfn_to_page(__sme_clr(pa) >> PAGE_SHIFT);
42}
85ca8be9 43
47903dc1
KS
44#define IOPM_SIZE PAGE_SIZE * 3
45#define MSRPM_SIZE PAGE_SIZE * 2
46
b7e4be0a 47#define MAX_DIRECT_ACCESS_MSRS 48
5c127c85 48#define MSRPM_OFFSETS 32
883b0a91
JR
49extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
50extern bool npt_enabled;
80d0f521 51extern int nrips;
ea91559b 52extern int vgif;
4b639a9f 53extern bool intercept_smi;
f628a34a 54extern bool x2avic_enabled;
fa4c027a 55extern bool vnmi;
d9220562 56extern int lbrv;
4bdec12a 57
59d21d67
VP
58/*
59 * Clean bits in VMCB.
60 * VMCB_ALL_CLEAN_MASK might also need to
61 * be updated if this enum is modified.
62 */
883b0a91
JR
63enum {
64 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
65 pause filter count */
66 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
67 VMCB_ASID, /* ASID */
68 VMCB_INTR, /* int_ctl, int_vector */
69 VMCB_NPT, /* npt_en, nCR3, gPAT */
70 VMCB_CR, /* CR0, CR3, CR4, EFER */
71 VMCB_DR, /* DR6, DR7 */
72 VMCB_DT, /* GDT, IDT */
73 VMCB_SEG, /* CS, DS, SS, ES, CPL */
74 VMCB_CR2, /* CR2 only */
75 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
76 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
77 * AVIC PHYSICAL_TABLE pointer,
78 * AVIC LOGICAL_TABLE pointer
79 */
59d21d67 80 VMCB_SW = 31, /* Reserved for hypervisor/software use */
883b0a91
JR
81};
82
59d21d67
VP
83#define VMCB_ALL_CLEAN_MASK ( \
84 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \
85 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \
86 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
87 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
88 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \
89 (1U << VMCB_SW))
90
883b0a91
JR
91/* TPR and CR2 are always written before VMRUN */
92#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
93
94struct kvm_sev_info {
95 bool active; /* SEV enabled guest */
916391a2 96 bool es_active; /* SEV-ES enabled guest */
26c44aa9 97 bool need_init; /* waiting for SEV_INIT2 */
883b0a91
JR
98 unsigned int asid; /* ASID used for this guest */
99 unsigned int handle; /* SEV firmware handle */
100 int fd; /* SEV device fd */
962e2b61 101 unsigned long policy;
883b0a91
JR
102 unsigned long pages_locked; /* Number of pages locked */
103 struct list_head regions_list; /* List of registered regions */
8640ca58 104 u64 ap_jump_table; /* SEV-ES AP Jump Table address */
605bbdc1 105 u64 vmsa_features;
4af663c2 106 u16 ghcb_version; /* Highest guest GHCB protocol version allowed */
54526d1f 107 struct kvm *enc_context_owner; /* Owner of copied encryption context */
b2125513
PG
108 struct list_head mirror_vms; /* List of VMs mirroring */
109 struct list_head mirror_entry; /* Use as a list entry of mirrors */
7aef27f0 110 struct misc_cg *misc_cg; /* For misc cgroup accounting */
b5663931 111 atomic_t migration_in_progress;
136d8bc9 112 void *snp_context; /* SNP guest context page */
88caf544
BS
113 void *guest_req_buf; /* Bounce buffer for SNP Guest Request input */
114 void *guest_resp_buf; /* Bounce buffer for SNP Guest Request output */
115 struct mutex guest_req_mutex; /* Must acquire before using bounce buffers */
883b0a91
JR
116};
117
962e2b61
TL
118#define SEV_POLICY_NODBG BIT_ULL(0)
119#define SNP_POLICY_DEBUG BIT_ULL(19)
120
883b0a91
JR
121struct kvm_svm {
122 struct kvm kvm;
123
124 /* Struct members for AVIC */
125 u32 avic_vm_id;
126 struct page *avic_logical_id_table_page;
127 struct page *avic_physical_id_table_page;
128 struct hlist_node hnode;
129
130 struct kvm_sev_info sev_info;
131};
132
133struct kvm_vcpu;
134
4995a368
CA
135struct kvm_vmcb_info {
136 struct vmcb *ptr;
137 unsigned long pa;
af18fa77 138 int cpu;
193015ad 139 uint64_t asid_generation;
4995a368
CA
140};
141
f2740a8d
EGE
142struct vmcb_save_area_cached {
143 u64 efer;
144 u64 cr4;
145 u64 cr3;
146 u64 cr0;
147 u64 dr7;
148 u64 dr6;
149};
150
8fc78909
EGE
151struct vmcb_ctrl_area_cached {
152 u32 intercepts[MAX_INTERCEPT];
153 u16 pause_filter_thresh;
154 u16 pause_filter_count;
155 u64 iopm_base_pa;
156 u64 msrpm_base_pa;
157 u64 tsc_offset;
158 u32 asid;
159 u8 tlb_ctl;
160 u32 int_ctl;
161 u32 int_vector;
162 u32 int_state;
163 u32 exit_code;
164 u32 exit_code_hi;
165 u64 exit_info_1;
166 u64 exit_info_2;
167 u32 exit_int_info;
168 u32 exit_int_info_err;
169 u64 nested_ctl;
170 u32 event_inj;
171 u32 event_inj_err;
00f08d99 172 u64 next_rip;
8fc78909
EGE
173 u64 nested_cr3;
174 u64 virt_ext;
66c03a92 175 u32 clean;
89f9edf4 176 u64 bus_lock_rip;
68ae7c7b 177 union {
017a99a9 178#if IS_ENABLED(CONFIG_HYPERV) || IS_ENABLED(CONFIG_KVM_HYPERV)
26b516bb 179 struct hv_vmcb_enlightenments hv_enlightenments;
017a99a9 180#endif
68ae7c7b
SC
181 u8 reserved_sw[32];
182 };
8fc78909
EGE
183};
184
7693b3eb 185struct svm_nested_state {
4995a368 186 struct kvm_vmcb_info vmcb02;
883b0a91
JR
187 u64 hsave_msr;
188 u64 vm_cr_msr;
0dd16b5b 189 u64 vmcb12_gpa;
8173396e 190 u64 last_vmcb12_gpa;
883b0a91
JR
191
192 /* These are the merged vectors */
193 u32 *msrpm;
194
f74f9414
PB
195 /* A VMRUN has started but has not yet been performed, so
196 * we cannot inject a nested vmexit yet. */
197 bool nested_run_pending;
198
e670bf68 199 /* cache for control fields of the guest */
8fc78909 200 struct vmcb_ctrl_area_cached ctl;
2fcf4876 201
f2740a8d
EGE
202 /*
203 * Note: this struct is not kept up-to-date while L2 runs; it is only
204 * valid within nested_svm_vmrun.
205 */
206 struct vmcb_save_area_cached save;
207
2fcf4876 208 bool initialized;
73c25546
VK
209
210 /*
211 * Indicates whether MSR bitmap for L2 needs to be rebuilt due to
212 * changes in MSR bitmap for L1 or switching to a different L2. Note,
213 * this flag can only be used reliably in conjunction with a paravirt L1
214 * which informs L0 whether any changes to MSR bitmap for L2 were done
215 * on its side.
216 */
217 bool force_msr_bitmap_recalc;
883b0a91
JR
218};
219
b67a4cc3
PG
220struct vcpu_sev_es_state {
221 /* SEV-ES support */
3dd2775b 222 struct sev_es_save_area *vmsa;
b67a4cc3 223 struct ghcb *ghcb;
4e15a0dd 224 u8 valid_bitmap[16];
b67a4cc3
PG
225 struct kvm_host_map ghcb_map;
226 bool received_first_sipi;
d916f003 227 unsigned int ap_reset_hold_type;
b67a4cc3
PG
228
229 /* SEV-ES scratch area support */
4e15a0dd 230 u64 sw_scratch;
b67a4cc3 231 void *ghcb_sa;
1f058331 232 u32 ghcb_sa_len;
b67a4cc3
PG
233 bool ghcb_sa_sync;
234 bool ghcb_sa_free;
0c76b1d0 235
9b54e248
MR
236 /* SNP Page-State-Change buffer entries currently being processed */
237 u16 psc_idx;
238 u16 psc_inflight;
239 bool psc_2m;
240
0c76b1d0 241 u64 ghcb_registered_gpa;
e366f92e
TL
242
243 struct mutex snp_vmsa_mutex; /* Used to handle concurrent updates of VMSA. */
244 gpa_t snp_vmsa_gpa;
245 bool snp_ap_waiting_for_reset;
246 bool snp_has_guest_vmsa;
b67a4cc3
PG
247};
248
883b0a91
JR
249struct vcpu_svm {
250 struct kvm_vcpu vcpu;
554cf314 251 /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
883b0a91 252 struct vmcb *vmcb;
4995a368
CA
253 struct kvm_vmcb_info vmcb01;
254 struct kvm_vmcb_info *current_vmcb;
7e8e6eed 255 u32 asid;
adc2a237
ML
256 u32 sysenter_esp_hi;
257 u32 sysenter_eip_hi;
883b0a91
JR
258 uint64_t tsc_aux;
259
260 u64 msr_decfg;
261
262 u64 next_rip;
263
883b0a91 264 u64 spec_ctrl;
5228eb96
ML
265
266 u64 tsc_ratio_msr;
883b0a91
JR
267 /*
268 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
269 * translated into the appropriate L2_CFG bits on the host to
270 * perform speculative control.
271 */
272 u64 virt_spec_ctrl;
273
274 u32 *msrpm;
275
276 ulong nmi_iret_rip;
277
7693b3eb 278 struct svm_nested_state nested;
883b0a91 279
916b54a7
ML
280 /* NMI mask value, used when vNMI is not enabled */
281 bool nmi_masked;
282
283 /*
284 * True when NMIs are still masked but guest IRET was just intercepted
285 * and KVM is waiting for RIP to change, which will signal that the
286 * intercepted IRET was retired and thus NMI can be unmasked.
287 */
288 bool awaiting_iret_completion;
289
290 /*
291 * Set when KVM is awaiting IRET completion and needs to inject NMIs as
292 * soon as the IRET completes (e.g. NMI is pending injection). KVM
293 * temporarily steals RFLAGS.TF to single-step the guest in this case
294 * in order to regain control as soon as the NMI-blocking condition
295 * goes away.
296 */
883b0a91
JR
297 bool nmi_singlestep;
298 u64 nmi_singlestep_guest_rflags;
916b54a7 299
159fc6fa 300 bool nmi_l1_to_l2;
883b0a91 301
6ef88d6e
SC
302 unsigned long soft_int_csbase;
303 unsigned long soft_int_old_rip;
304 unsigned long soft_int_next_rip;
305 bool soft_int_injected;
883b0a91 306
883b0a91
JR
307 u32 ldr_reg;
308 u32 dfr_reg;
309 struct page *avic_backing_page;
310 u64 *avic_physical_id_cache;
883b0a91
JR
311
312 /*
313 * Per-vcpu list of struct amd_svm_iommu_ir:
314 * This is used mainly to store interrupt remapping information used
315 * when update the vcpu affinity. This avoids the need to scan for
316 * IRTE and try to match ga_tag in the IOMMU driver.
317 */
318 struct list_head ir_list;
319 spinlock_t ir_list_lock;
fd6fa73d
AG
320
321 /* Save desired MSR intercept (read: pass-through) state */
322 struct {
323 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
324 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
325 } shadow_msr_intercept;
add5e2f0 326
b67a4cc3 327 struct vcpu_sev_es_state sev_es;
a7fc06dd
MR
328
329 bool guest_state_loaded;
091abbf5
ML
330
331 bool x2avic_msrs_intercepted;
c760e86f
ML
332
333 /* Guest GIF value, used when vGIF is not enabled */
334 bool guest_gif;
883b0a91
JR
335};
336
eaf78265 337struct svm_cpu_data {
eaf78265
JR
338 u64 asid_generation;
339 u32 max_asid;
340 u32 next_asid;
341 u32 min_asid;
eaf78265 342
e3417ab7
SC
343 bool bp_spec_reduce_set;
344
32071fa3 345 struct vmcb *save_area;
e287bd00
PB
346 unsigned long save_area_pa;
347
eaf78265
JR
348 /* index = sev_asid, value = vmcb pointer */
349 struct vmcb **sev_vmcbs;
350};
351
73412dfe 352DECLARE_PER_CPU(struct svm_cpu_data, svm_data);
eaf78265 353
883b0a91
JR
354void recalc_intercepts(struct vcpu_svm *svm);
355
2b2f72d4 356static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
ef0f6496
JR
357{
358 return container_of(kvm, struct kvm_svm, kvm);
359}
360
4ebb105e
PB
361static __always_inline struct kvm_sev_info *to_kvm_sev_info(struct kvm *kvm)
362{
363 return &to_kvm_svm(kvm)->sev_info;
364}
365
45d522d3 366#ifdef CONFIG_KVM_AMD_SEV
2b2f72d4 367static __always_inline bool sev_guest(struct kvm *kvm)
916391a2 368{
8a01902a 369 return to_kvm_sev_info(kvm)->active;
916391a2 370}
2b2f72d4 371static __always_inline bool sev_es_guest(struct kvm *kvm)
916391a2 372{
8a01902a 373 struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
916391a2 374
1bd00a42 375 return sev->es_active && !WARN_ON_ONCE(!sev->active);
916391a2
TL
376}
377
1dfe571c
BS
378static __always_inline bool sev_snp_guest(struct kvm *kvm)
379{
8a01902a 380 struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
1dfe571c
BS
381
382 return (sev->vmsa_features & SVM_SEV_FEAT_SNP_ACTIVE) &&
383 !WARN_ON_ONCE(!sev_es_guest(kvm));
45d522d3 384}
1dfe571c 385#else
45d522d3
SC
386#define sev_guest(kvm) false
387#define sev_es_guest(kvm) false
388#define sev_snp_guest(kvm) false
1dfe571c 389#endif
1dfe571c 390
0c76b1d0
BS
391static inline bool ghcb_gpa_is_registered(struct vcpu_svm *svm, u64 val)
392{
393 return svm->sev_es.ghcb_registered_gpa == val;
394}
395
06e7852c 396static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
883b0a91
JR
397{
398 vmcb->control.clean = 0;
399}
400
06e7852c 401static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
883b0a91 402{
59d21d67 403 vmcb->control.clean = VMCB_ALL_CLEAN_MASK
883b0a91
JR
404 & ~VMCB_ALWAYS_DIRTY_MASK;
405}
406
06e7852c 407static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
883b0a91
JR
408{
409 vmcb->control.clean &= ~(1 << bit);
410}
411
8173396e
CA
412static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
413{
414 return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
415}
416
aee045ed 417static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
883b0a91
JR
418{
419 return container_of(vcpu, struct vcpu_svm, vcpu);
420}
421
41e68b69
PB
422/*
423 * Only the PDPTRs are loaded on demand into the shadow MMU. All other
23e5092b 424 * fields are synchronized on VM-Exit, because accessing the VMCB is cheap.
41e68b69
PB
425 *
426 * CR3 might be out of date in the VMCB but it is not marked dirty; instead,
427 * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3
428 * is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB.
429 */
430#define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR)
431
c45ad722
BM
432static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
433{
434 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
435 __set_bit(bit, (unsigned long *)&control->intercepts);
436}
437
438static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
439{
440 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
441 __clear_bit(bit, (unsigned long *)&control->intercepts);
442}
443
444static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
445{
446 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
447 return test_bit(bit, (unsigned long *)&control->intercepts);
448}
449
8fc78909
EGE
450static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
451{
452 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
453 return test_bit(bit, (unsigned long *)&control->intercepts);
454}
455
9780d51d 456static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
883b0a91 457{
4995a368 458 struct vmcb *vmcb = svm->vmcb01.ptr;
883b0a91 459
9780d51d
BM
460 WARN_ON_ONCE(bit >= 32);
461 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
883b0a91
JR
462
463 recalc_intercepts(svm);
464}
465
9780d51d 466static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
883b0a91 467{
4995a368 468 struct vmcb *vmcb = svm->vmcb01.ptr;
883b0a91 469
9780d51d
BM
470 WARN_ON_ONCE(bit >= 32);
471 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
883b0a91
JR
472
473 recalc_intercepts(svm);
474}
475
a284ba56 476static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
883b0a91 477{
4995a368 478 struct vmcb *vmcb = svm->vmcb01.ptr;
883b0a91 479
c62e2e94 480 vmcb_set_intercept(&vmcb->control, bit);
883b0a91
JR
481
482 recalc_intercepts(svm);
483}
484
a284ba56 485static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
883b0a91 486{
4995a368 487 struct vmcb *vmcb = svm->vmcb01.ptr;
883b0a91 488
c62e2e94 489 vmcb_clr_intercept(&vmcb->control, bit);
883b0a91
JR
490
491 recalc_intercepts(svm);
492}
493
a284ba56 494static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
883b0a91 495{
c62e2e94 496 return vmcb_is_intercept(&svm->vmcb->control, bit);
883b0a91
JR
497}
498
0b349662
ML
499static inline bool nested_vgif_enabled(struct vcpu_svm *svm)
500{
2c5e168e 501 return guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_VGIF) &&
b89456ae 502 (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK);
0b349662
ML
503}
504
505static inline struct vmcb *get_vgif_vmcb(struct vcpu_svm *svm)
506{
507 if (!vgif)
508 return NULL;
509
510 if (is_guest_mode(&svm->vcpu) && !nested_vgif_enabled(svm))
511 return svm->nested.vmcb02.ptr;
512 else
513 return svm->vmcb01.ptr;
514}
515
883b0a91
JR
516static inline void enable_gif(struct vcpu_svm *svm)
517{
0b349662
ML
518 struct vmcb *vmcb = get_vgif_vmcb(svm);
519
520 if (vmcb)
521 vmcb->control.int_ctl |= V_GIF_MASK;
883b0a91 522 else
c760e86f 523 svm->guest_gif = true;
883b0a91
JR
524}
525
526static inline void disable_gif(struct vcpu_svm *svm)
527{
0b349662
ML
528 struct vmcb *vmcb = get_vgif_vmcb(svm);
529
530 if (vmcb)
531 vmcb->control.int_ctl &= ~V_GIF_MASK;
883b0a91 532 else
c760e86f 533 svm->guest_gif = false;
883b0a91
JR
534}
535
536static inline bool gif_set(struct vcpu_svm *svm)
537{
0b349662
ML
538 struct vmcb *vmcb = get_vgif_vmcb(svm);
539
540 if (vmcb)
541 return !!(vmcb->control.int_ctl & V_GIF_MASK);
883b0a91 542 else
c760e86f 543 return svm->guest_gif;
883b0a91
JR
544}
545
b9f3973a
ML
546static inline bool nested_npt_enabled(struct vcpu_svm *svm)
547{
548 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
549}
550
0977cfac
SS
551static inline bool nested_vnmi_enabled(struct vcpu_svm *svm)
552{
2c5e168e 553 return guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_VNMI) &&
0977cfac
SS
554 (svm->nested.ctl.int_ctl & V_NMI_ENABLE_MASK);
555}
556
7a8f7c1f
ML
557static inline bool is_x2apic_msrpm_offset(u32 offset)
558{
559 /* 4 msrs per u8, and 4 u8 in u32 */
560 u32 msr = offset * 16;
561
562 return (msr >= APIC_BASE_MSR) &&
563 (msr < (APIC_BASE_MSR + 0x100));
564}
565
fa4c027a
SS
566static inline struct vmcb *get_vnmi_vmcb_l1(struct vcpu_svm *svm)
567{
568 if (!vnmi)
569 return NULL;
570
571 if (is_guest_mode(&svm->vcpu))
572 return NULL;
573 else
574 return svm->vmcb01.ptr;
575}
576
577static inline bool is_vnmi_enabled(struct vcpu_svm *svm)
578{
579 struct vmcb *vmcb = get_vnmi_vmcb_l1(svm);
580
581 if (vmcb)
582 return !!(vmcb->control.int_ctl & V_NMI_ENABLE_MASK);
583 else
584 return false;
585}
586
c3392d0a
MW
587static inline void svm_vmgexit_set_return_code(struct vcpu_svm *svm,
588 u64 response, u64 data)
589{
590 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, response);
591 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, data);
592}
593
594static inline void svm_vmgexit_inject_exception(struct vcpu_svm *svm, u8 vector)
595{
596 u64 data = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT | vector;
597
598 svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_ISSUE_EXCEPTION, data);
599}
600
601static inline void svm_vmgexit_bad_input(struct vcpu_svm *svm, u64 suberror)
602{
603 svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_MALFORMED_INPUT, suberror);
604}
605
606static inline void svm_vmgexit_success(struct vcpu_svm *svm, u64 data)
607{
608 svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_NO_ACTION, data);
609}
610
611static inline void svm_vmgexit_no_action(struct vcpu_svm *svm, u64 data)
612{
613 svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_NO_ACTION, data);
614}
615
883b0a91 616/* svm.c */
761e4169 617#define MSR_INVALID 0xffffffffU
883b0a91 618
d0eac42f 619#define DEBUGCTL_RESERVED_BITS (~DEBUGCTLMSR_LBR)
d20c796c 620
291bd20d 621extern bool dump_invalid_vmcb;
916391a2 622
883b0a91 623u32 svm_msrpm_offset(u32 msr);
2fcf4876
ML
624u32 *svm_vcpu_alloc_msrpm(void);
625void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
626void svm_vcpu_free_msrpm(u32 *msrpm);
1d5a1b58 627void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
b7e4be0a 628void svm_enable_lbrv(struct kvm_vcpu *vcpu);
1d5a1b58 629void svm_update_lbrv(struct kvm_vcpu *vcpu);
2fcf4876 630
72f211ec 631int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
883b0a91 632void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
c2fe3cd4 633void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
883b0a91 634void disable_nmi_singlestep(struct vcpu_svm *svm);
cae96af1
PB
635bool svm_smi_blocked(struct kvm_vcpu *vcpu);
636bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
637bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
ffdf7f9e 638void svm_set_gif(struct vcpu_svm *svm, bool value);
63129754 639int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
376c6d28
TL
640void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
641 int read, int write);
4d1d7942 642void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable);
66fa226c
ML
643void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
644 int trig_mode, int vec);
883b0a91
JR
645
646/* nested.c */
647
648#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
649#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
650#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
651
01c3b2b5 652static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
883b0a91 653{
e9fd761a
PB
654 struct vcpu_svm *svm = to_svm(vcpu);
655
656 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
883b0a91
JR
657}
658
55714cdd
PB
659static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
660{
8fc78909 661 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
55714cdd
PB
662}
663
fc6f7c03
PB
664static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
665{
8fc78909 666 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
fc6f7c03
PB
667}
668
bbdad0b5
PB
669static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
670{
8fc78909 671 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
bbdad0b5
PB
672}
673
e85d3e7b
ML
674int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
675 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
f7e57078 676void svm_leave_nested(struct kvm_vcpu *vcpu);
2fcf4876
ML
677void svm_free_nested(struct vcpu_svm *svm);
678int svm_allocate_nested(struct vcpu_svm *svm);
63129754 679int nested_svm_vmrun(struct kvm_vcpu *vcpu);
2bb16bea
VK
680void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
681 struct vmcb_save_area *from_save);
682void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
883b0a91 683int nested_svm_vmexit(struct vcpu_svm *svm);
3a87c7e0
SC
684
685static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
686{
687 svm->vmcb->control.exit_code = exit_code;
688 svm->vmcb->control.exit_info_1 = 0;
689 svm->vmcb->control.exit_info_2 = 0;
690 return nested_svm_vmexit(svm);
691}
692
883b0a91 693int nested_svm_exit_handled(struct vcpu_svm *svm);
63129754 694int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
883b0a91
JR
695int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
696 bool has_error_code, u32 error_code);
883b0a91 697int nested_svm_exit_special(struct vcpu_svm *svm);
5228eb96 698void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
2d636990 699void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu);
7907160d
EGE
700void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
701 struct vmcb_control_area *control);
f2740a8d
EGE
702void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
703 struct vmcb_save_area *save);
9e8f0fbf 704void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
4995a368
CA
705void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
706void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
883b0a91 707
33b22172
PB
708extern struct kvm_x86_nested_ops svm_nested_ops;
709
ef0f6496 710/* avic.c */
b3f257a8
SC
711#define AVIC_REQUIRED_APICV_INHIBITS \
712( \
f9925721 713 BIT(APICV_INHIBIT_REASON_DISABLED) | \
b3f257a8
SC
714 BIT(APICV_INHIBIT_REASON_ABSENT) | \
715 BIT(APICV_INHIBIT_REASON_HYPERV) | \
716 BIT(APICV_INHIBIT_REASON_NESTED) | \
717 BIT(APICV_INHIBIT_REASON_IRQWIN) | \
718 BIT(APICV_INHIBIT_REASON_PIT_REINJ) | \
719 BIT(APICV_INHIBIT_REASON_BLOCKIRQ) | \
720 BIT(APICV_INHIBIT_REASON_SEV) | \
721 BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) | \
722 BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | \
723 BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED) | \
724 BIT(APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED) \
725)
ef0f6496 726
36b02567 727bool avic_hardware_setup(void);
ef0f6496
JR
728int avic_ga_log_notifier(u32 ga_tag);
729void avic_vm_destroy(struct kvm *kvm);
730int avic_vm_init(struct kvm *kvm);
1ee73a33 731void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb);
63129754
PB
732int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
733int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
ef0f6496 734int avic_init_vcpu(struct vcpu_svm *svm);
ba8ec273
ML
735void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
736void avic_vcpu_put(struct kvm_vcpu *vcpu);
db6e7adf 737void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
db6e7adf 738void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
db6e7adf
SC
739int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
740 uint32_t guest_irq, bool set);
a3c19d5b
SC
741void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
742void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
66fa226c 743void avic_ring_doorbell(struct kvm_vcpu *vcpu);
f44509f8 744unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu);
e0bead97 745void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu);
05c4fe8c 746
ef0f6496 747
eaf78265
JR
748/* sev.c */
749
72d12715 750int pre_sev_run(struct vcpu_svm *svm, int cpu);
0d7bf5e5
PB
751void sev_init_vmcb(struct vcpu_svm *svm);
752void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm);
753int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
754void sev_es_vcpu_reset(struct vcpu_svm *svm);
755void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
605bbdc1 756void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa);
0d7bf5e5 757void sev_es_unmap_ghcb(struct vcpu_svm *svm);
eaf78265 758
0d7bf5e5 759#ifdef CONFIG_KVM_AMD_SEV
559c7c75
SC
760int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp);
761int sev_mem_enc_register_region(struct kvm *kvm,
762 struct kvm_enc_region *range);
763int sev_mem_enc_unregister_region(struct kvm *kvm,
764 struct kvm_enc_region *range);
765int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd);
766int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
683412cc 767void sev_guest_memory_reclaimed(struct kvm *kvm);
0d7bf5e5 768int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
683412cc 769
0d7bf5e5 770/* These symbols are used in common code and are stubbed below. */
9f44286d 771
99a49093 772struct page *snp_safe_alloc_page_node(int node, gfp_t gfp);
9f44286d
LR
773static inline struct page *snp_safe_alloc_page(void)
774{
99a49093 775 return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
9f44286d
LR
776}
777
0d7bf5e5
PB
778void sev_free_vcpu(struct kvm_vcpu *vcpu);
779void sev_vm_destroy(struct kvm *kvm);
d9db0fd6 780void __init sev_set_cpu_caps(void);
916391a2 781void __init sev_hardware_setup(void);
23e5092b 782void sev_hardware_unsetup(void);
b95c221c 783int sev_cpu_init(struct svm_cpu_data *sd);
ac5c4802 784int sev_dev_get_attr(u32 group, u64 attr, u64 *val);
0d7bf5e5 785extern unsigned int max_sev_asid;
c63cf135 786void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
e366f92e 787void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
4f2e7aa1 788int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
8eb01900 789void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
b2104024 790int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
962e2b61
TL
791struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu);
792void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa);
0d7bf5e5 793#else
99a49093 794static inline struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
9f44286d 795{
99a49093 796 return alloc_pages_node(node, gfp | __GFP_ZERO, 0);
9f44286d
LR
797}
798
f51af346
LR
799static inline struct page *snp_safe_alloc_page(void)
800{
99a49093 801 return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
0d7bf5e5
PB
802}
803
804static inline void sev_free_vcpu(struct kvm_vcpu *vcpu) {}
805static inline void sev_vm_destroy(struct kvm *kvm) {}
806static inline void __init sev_set_cpu_caps(void) {}
807static inline void __init sev_hardware_setup(void) {}
808static inline void sev_hardware_unsetup(void) {}
809static inline int sev_cpu_init(struct svm_cpu_data *sd) { return 0; }
ac5c4802 810static inline int sev_dev_get_attr(u32 group, u64 attr, u64 *val) { return -ENXIO; }
0d7bf5e5 811#define max_sev_asid 0
c63cf135 812static inline void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) {}
e366f92e 813static inline void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu) {}
4f2e7aa1
MR
814static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
815{
816 return 0;
817}
8eb01900 818static inline void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) {}
b2104024
MR
819static inline int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
820{
821 return 0;
822}
c63cf135 823
962e2b61
TL
824static inline struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu)
825{
826 return NULL;
827}
828static inline void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa) {}
0d7bf5e5 829#endif
eaf78265 830
16809ecd
TL
831/* vmenter.S */
832
c92be2fd
SC
833void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted,
834 struct sev_es_save_area *hostsa);
9f2febf3 835void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
16809ecd 836
4e15a0dd
PB
837#define DEFINE_KVM_GHCB_ACCESSORS(field) \
838 static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \
839 { \
840 return test_bit(GHCB_BITMAP_IDX(field), \
841 (unsigned long *)&svm->sev_es.valid_bitmap); \
842 } \
843 \
844 static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \
845 { \
846 return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0; \
847 } \
848
849DEFINE_KVM_GHCB_ACCESSORS(cpl)
850DEFINE_KVM_GHCB_ACCESSORS(rax)
851DEFINE_KVM_GHCB_ACCESSORS(rcx)
852DEFINE_KVM_GHCB_ACCESSORS(rdx)
853DEFINE_KVM_GHCB_ACCESSORS(rbx)
854DEFINE_KVM_GHCB_ACCESSORS(rsi)
855DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code)
856DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1)
857DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2)
858DEFINE_KVM_GHCB_ACCESSORS(sw_scratch)
859DEFINE_KVM_GHCB_ACCESSORS(xcr0)
860
883b0a91 861#endif