Commit | Line | Data |
---|---|---|
883b0a91 JR |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * Kernel-based Virtual Machine driver for Linux | |
4 | * | |
5 | * AMD SVM support | |
6 | * | |
7 | * Copyright (C) 2006 Qumranet, Inc. | |
8 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. | |
9 | * | |
10 | * Authors: | |
11 | * Yaniv Kamay <yaniv@qumranet.com> | |
12 | * Avi Kivity <avi@qumranet.com> | |
13 | */ | |
14 | ||
15 | #ifndef __SVM_SVM_H | |
16 | #define __SVM_SVM_H | |
17 | ||
18 | #include <linux/kvm_types.h> | |
19 | #include <linux/kvm_host.h> | |
291bd20d | 20 | #include <linux/bits.h> |
883b0a91 JR |
21 | |
22 | #include <asm/svm.h> | |
23 | ||
85ca8be9 TL |
24 | #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) |
25 | ||
553cc15f MR |
26 | static const u32 host_save_user_msrs[] = { |
27 | MSR_TSC_AUX, | |
883b0a91 | 28 | }; |
883b0a91 JR |
29 | #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) |
30 | ||
47903dc1 KS |
31 | #define IOPM_SIZE PAGE_SIZE * 3 |
32 | #define MSRPM_SIZE PAGE_SIZE * 2 | |
33 | ||
adc2a237 | 34 | #define MAX_DIRECT_ACCESS_MSRS 20 |
883b0a91 JR |
35 | #define MSRPM_OFFSETS 16 |
36 | extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; | |
37 | extern bool npt_enabled; | |
38 | ||
39 | enum { | |
40 | VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, | |
41 | pause filter count */ | |
42 | VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */ | |
43 | VMCB_ASID, /* ASID */ | |
44 | VMCB_INTR, /* int_ctl, int_vector */ | |
45 | VMCB_NPT, /* npt_en, nCR3, gPAT */ | |
46 | VMCB_CR, /* CR0, CR3, CR4, EFER */ | |
47 | VMCB_DR, /* DR6, DR7 */ | |
48 | VMCB_DT, /* GDT, IDT */ | |
49 | VMCB_SEG, /* CS, DS, SS, ES, CPL */ | |
50 | VMCB_CR2, /* CR2 only */ | |
51 | VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */ | |
52 | VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE, | |
53 | * AVIC PHYSICAL_TABLE pointer, | |
54 | * AVIC LOGICAL_TABLE pointer | |
55 | */ | |
56 | VMCB_DIRTY_MAX, | |
57 | }; | |
58 | ||
59 | /* TPR and CR2 are always written before VMRUN */ | |
60 | #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2)) | |
61 | ||
62 | struct kvm_sev_info { | |
63 | bool active; /* SEV enabled guest */ | |
916391a2 | 64 | bool es_active; /* SEV-ES enabled guest */ |
883b0a91 JR |
65 | unsigned int asid; /* ASID used for this guest */ |
66 | unsigned int handle; /* SEV firmware handle */ | |
67 | int fd; /* SEV device fd */ | |
68 | unsigned long pages_locked; /* Number of pages locked */ | |
69 | struct list_head regions_list; /* List of registered regions */ | |
8640ca58 | 70 | u64 ap_jump_table; /* SEV-ES AP Jump Table address */ |
883b0a91 JR |
71 | }; |
72 | ||
73 | struct kvm_svm { | |
74 | struct kvm kvm; | |
75 | ||
76 | /* Struct members for AVIC */ | |
77 | u32 avic_vm_id; | |
78 | struct page *avic_logical_id_table_page; | |
79 | struct page *avic_physical_id_table_page; | |
80 | struct hlist_node hnode; | |
81 | ||
82 | struct kvm_sev_info sev_info; | |
83 | }; | |
84 | ||
85 | struct kvm_vcpu; | |
86 | ||
4995a368 CA |
87 | struct kvm_vmcb_info { |
88 | struct vmcb *ptr; | |
89 | unsigned long pa; | |
af18fa77 | 90 | int cpu; |
193015ad | 91 | uint64_t asid_generation; |
4995a368 CA |
92 | }; |
93 | ||
7693b3eb | 94 | struct svm_nested_state { |
4995a368 | 95 | struct kvm_vmcb_info vmcb02; |
883b0a91 JR |
96 | u64 hsave_msr; |
97 | u64 vm_cr_msr; | |
0dd16b5b | 98 | u64 vmcb12_gpa; |
8173396e | 99 | u64 last_vmcb12_gpa; |
883b0a91 JR |
100 | |
101 | /* These are the merged vectors */ | |
102 | u32 *msrpm; | |
103 | ||
f74f9414 PB |
104 | /* A VMRUN has started but has not yet been performed, so |
105 | * we cannot inject a nested vmexit yet. */ | |
106 | bool nested_run_pending; | |
107 | ||
e670bf68 PB |
108 | /* cache for control fields of the guest */ |
109 | struct vmcb_control_area ctl; | |
2fcf4876 ML |
110 | |
111 | bool initialized; | |
883b0a91 JR |
112 | }; |
113 | ||
114 | struct vcpu_svm { | |
115 | struct kvm_vcpu vcpu; | |
554cf314 | 116 | /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */ |
883b0a91 | 117 | struct vmcb *vmcb; |
4995a368 CA |
118 | struct kvm_vmcb_info vmcb01; |
119 | struct kvm_vmcb_info *current_vmcb; | |
883b0a91 | 120 | struct svm_cpu_data *svm_data; |
7e8e6eed | 121 | u32 asid; |
adc2a237 ML |
122 | u32 sysenter_esp_hi; |
123 | u32 sysenter_eip_hi; | |
883b0a91 JR |
124 | uint64_t tsc_aux; |
125 | ||
126 | u64 msr_decfg; | |
127 | ||
128 | u64 next_rip; | |
129 | ||
130 | u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; | |
883b0a91 JR |
131 | |
132 | u64 spec_ctrl; | |
133 | /* | |
134 | * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be | |
135 | * translated into the appropriate L2_CFG bits on the host to | |
136 | * perform speculative control. | |
137 | */ | |
138 | u64 virt_spec_ctrl; | |
139 | ||
140 | u32 *msrpm; | |
141 | ||
142 | ulong nmi_iret_rip; | |
143 | ||
7693b3eb | 144 | struct svm_nested_state nested; |
883b0a91 JR |
145 | |
146 | bool nmi_singlestep; | |
147 | u64 nmi_singlestep_guest_rflags; | |
148 | ||
149 | unsigned int3_injected; | |
150 | unsigned long int3_rip; | |
151 | ||
152 | /* cached guest cpuid flags for faster access */ | |
153 | bool nrips_enabled : 1; | |
154 | ||
155 | u32 ldr_reg; | |
156 | u32 dfr_reg; | |
157 | struct page *avic_backing_page; | |
158 | u64 *avic_physical_id_cache; | |
159 | bool avic_is_running; | |
160 | ||
161 | /* | |
162 | * Per-vcpu list of struct amd_svm_iommu_ir: | |
163 | * This is used mainly to store interrupt remapping information used | |
164 | * when update the vcpu affinity. This avoids the need to scan for | |
165 | * IRTE and try to match ga_tag in the IOMMU driver. | |
166 | */ | |
167 | struct list_head ir_list; | |
168 | spinlock_t ir_list_lock; | |
fd6fa73d AG |
169 | |
170 | /* Save desired MSR intercept (read: pass-through) state */ | |
171 | struct { | |
172 | DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS); | |
173 | DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS); | |
174 | } shadow_msr_intercept; | |
add5e2f0 TL |
175 | |
176 | /* SEV-ES support */ | |
177 | struct vmcb_save_area *vmsa; | |
178 | struct ghcb *ghcb; | |
291bd20d | 179 | struct kvm_host_map ghcb_map; |
647daca2 | 180 | bool received_first_sipi; |
8f423a80 TL |
181 | |
182 | /* SEV-ES scratch area support */ | |
183 | void *ghcb_sa; | |
184 | u64 ghcb_sa_len; | |
185 | bool ghcb_sa_sync; | |
186 | bool ghcb_sa_free; | |
a7fc06dd MR |
187 | |
188 | bool guest_state_loaded; | |
883b0a91 JR |
189 | }; |
190 | ||
eaf78265 JR |
191 | struct svm_cpu_data { |
192 | int cpu; | |
193 | ||
194 | u64 asid_generation; | |
195 | u32 max_asid; | |
196 | u32 next_asid; | |
197 | u32 min_asid; | |
198 | struct kvm_ldttss_desc *tss_desc; | |
199 | ||
200 | struct page *save_area; | |
201 | struct vmcb *current_vmcb; | |
202 | ||
203 | /* index = sev_asid, value = vmcb pointer */ | |
204 | struct vmcb **sev_vmcbs; | |
205 | }; | |
206 | ||
207 | DECLARE_PER_CPU(struct svm_cpu_data *, svm_data); | |
208 | ||
883b0a91 JR |
209 | void recalc_intercepts(struct vcpu_svm *svm); |
210 | ||
ef0f6496 JR |
211 | static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm) |
212 | { | |
213 | return container_of(kvm, struct kvm_svm, kvm); | |
214 | } | |
215 | ||
916391a2 TL |
216 | static inline bool sev_guest(struct kvm *kvm) |
217 | { | |
218 | #ifdef CONFIG_KVM_AMD_SEV | |
219 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; | |
220 | ||
221 | return sev->active; | |
222 | #else | |
223 | return false; | |
224 | #endif | |
225 | } | |
226 | ||
227 | static inline bool sev_es_guest(struct kvm *kvm) | |
228 | { | |
229 | #ifdef CONFIG_KVM_AMD_SEV | |
230 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; | |
231 | ||
232 | return sev_guest(kvm) && sev->es_active; | |
233 | #else | |
234 | return false; | |
235 | #endif | |
236 | } | |
237 | ||
06e7852c | 238 | static inline void vmcb_mark_all_dirty(struct vmcb *vmcb) |
883b0a91 JR |
239 | { |
240 | vmcb->control.clean = 0; | |
241 | } | |
242 | ||
06e7852c | 243 | static inline void vmcb_mark_all_clean(struct vmcb *vmcb) |
883b0a91 JR |
244 | { |
245 | vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1) | |
246 | & ~VMCB_ALWAYS_DIRTY_MASK; | |
247 | } | |
248 | ||
06e7852c | 249 | static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit) |
883b0a91 JR |
250 | { |
251 | vmcb->control.clean &= ~(1 << bit); | |
252 | } | |
253 | ||
8173396e CA |
254 | static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit) |
255 | { | |
256 | return !test_bit(bit, (unsigned long *)&vmcb->control.clean); | |
257 | } | |
258 | ||
883b0a91 JR |
259 | static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) |
260 | { | |
261 | return container_of(vcpu, struct vcpu_svm, vcpu); | |
262 | } | |
263 | ||
c45ad722 BM |
264 | static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit) |
265 | { | |
266 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); | |
267 | __set_bit(bit, (unsigned long *)&control->intercepts); | |
268 | } | |
269 | ||
270 | static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit) | |
271 | { | |
272 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); | |
273 | __clear_bit(bit, (unsigned long *)&control->intercepts); | |
274 | } | |
275 | ||
276 | static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit) | |
277 | { | |
278 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); | |
279 | return test_bit(bit, (unsigned long *)&control->intercepts); | |
280 | } | |
281 | ||
883b0a91 JR |
282 | static inline void set_dr_intercepts(struct vcpu_svm *svm) |
283 | { | |
4995a368 | 284 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 285 | |
8d4846b9 TL |
286 | if (!sev_es_guest(svm->vcpu.kvm)) { |
287 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ); | |
288 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ); | |
289 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ); | |
290 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ); | |
291 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ); | |
292 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ); | |
293 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ); | |
294 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE); | |
295 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE); | |
296 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE); | |
297 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE); | |
298 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE); | |
299 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE); | |
300 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE); | |
301 | } | |
302 | ||
30abaa88 | 303 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); |
30abaa88 | 304 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); |
883b0a91 JR |
305 | |
306 | recalc_intercepts(svm); | |
307 | } | |
308 | ||
309 | static inline void clr_dr_intercepts(struct vcpu_svm *svm) | |
310 | { | |
4995a368 | 311 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 312 | |
30abaa88 | 313 | vmcb->control.intercepts[INTERCEPT_DR] = 0; |
883b0a91 | 314 | |
8d4846b9 TL |
315 | /* DR7 access must remain intercepted for an SEV-ES guest */ |
316 | if (sev_es_guest(svm->vcpu.kvm)) { | |
317 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); | |
318 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); | |
319 | } | |
320 | ||
883b0a91 JR |
321 | recalc_intercepts(svm); |
322 | } | |
323 | ||
9780d51d | 324 | static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit) |
883b0a91 | 325 | { |
4995a368 | 326 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 327 | |
9780d51d BM |
328 | WARN_ON_ONCE(bit >= 32); |
329 | vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); | |
883b0a91 JR |
330 | |
331 | recalc_intercepts(svm); | |
332 | } | |
333 | ||
9780d51d | 334 | static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit) |
883b0a91 | 335 | { |
4995a368 | 336 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 337 | |
9780d51d BM |
338 | WARN_ON_ONCE(bit >= 32); |
339 | vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); | |
883b0a91 JR |
340 | |
341 | recalc_intercepts(svm); | |
342 | } | |
343 | ||
a284ba56 | 344 | static inline void svm_set_intercept(struct vcpu_svm *svm, int bit) |
883b0a91 | 345 | { |
4995a368 | 346 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 347 | |
c62e2e94 | 348 | vmcb_set_intercept(&vmcb->control, bit); |
883b0a91 JR |
349 | |
350 | recalc_intercepts(svm); | |
351 | } | |
352 | ||
a284ba56 | 353 | static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit) |
883b0a91 | 354 | { |
4995a368 | 355 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 356 | |
c62e2e94 | 357 | vmcb_clr_intercept(&vmcb->control, bit); |
883b0a91 JR |
358 | |
359 | recalc_intercepts(svm); | |
360 | } | |
361 | ||
a284ba56 | 362 | static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit) |
883b0a91 | 363 | { |
c62e2e94 | 364 | return vmcb_is_intercept(&svm->vmcb->control, bit); |
883b0a91 JR |
365 | } |
366 | ||
367 | static inline bool vgif_enabled(struct vcpu_svm *svm) | |
368 | { | |
369 | return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK); | |
370 | } | |
371 | ||
372 | static inline void enable_gif(struct vcpu_svm *svm) | |
373 | { | |
374 | if (vgif_enabled(svm)) | |
375 | svm->vmcb->control.int_ctl |= V_GIF_MASK; | |
376 | else | |
377 | svm->vcpu.arch.hflags |= HF_GIF_MASK; | |
378 | } | |
379 | ||
380 | static inline void disable_gif(struct vcpu_svm *svm) | |
381 | { | |
382 | if (vgif_enabled(svm)) | |
383 | svm->vmcb->control.int_ctl &= ~V_GIF_MASK; | |
384 | else | |
385 | svm->vcpu.arch.hflags &= ~HF_GIF_MASK; | |
386 | } | |
387 | ||
388 | static inline bool gif_set(struct vcpu_svm *svm) | |
389 | { | |
390 | if (vgif_enabled(svm)) | |
391 | return !!(svm->vmcb->control.int_ctl & V_GIF_MASK); | |
392 | else | |
393 | return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); | |
394 | } | |
395 | ||
396 | /* svm.c */ | |
761e4169 | 397 | #define MSR_INVALID 0xffffffffU |
883b0a91 | 398 | |
916391a2 TL |
399 | extern int sev; |
400 | extern int sev_es; | |
291bd20d | 401 | extern bool dump_invalid_vmcb; |
916391a2 | 402 | |
883b0a91 | 403 | u32 svm_msrpm_offset(u32 msr); |
2fcf4876 ML |
404 | u32 *svm_vcpu_alloc_msrpm(void); |
405 | void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm); | |
406 | void svm_vcpu_free_msrpm(u32 *msrpm); | |
407 | ||
72f211ec | 408 | int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer); |
883b0a91 | 409 | void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
c2fe3cd4 | 410 | void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
f55ac304 | 411 | void svm_flush_tlb(struct kvm_vcpu *vcpu); |
883b0a91 | 412 | void disable_nmi_singlestep(struct vcpu_svm *svm); |
cae96af1 PB |
413 | bool svm_smi_blocked(struct kvm_vcpu *vcpu); |
414 | bool svm_nmi_blocked(struct kvm_vcpu *vcpu); | |
415 | bool svm_interrupt_blocked(struct kvm_vcpu *vcpu); | |
ffdf7f9e | 416 | void svm_set_gif(struct vcpu_svm *svm, bool value); |
63129754 | 417 | int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code); |
376c6d28 TL |
418 | void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, |
419 | int read, int write); | |
883b0a91 JR |
420 | |
421 | /* nested.c */ | |
422 | ||
423 | #define NESTED_EXIT_HOST 0 /* Exit handled on host level */ | |
424 | #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */ | |
425 | #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */ | |
426 | ||
01c3b2b5 | 427 | static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu) |
883b0a91 | 428 | { |
e9fd761a PB |
429 | struct vcpu_svm *svm = to_svm(vcpu); |
430 | ||
431 | return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK); | |
883b0a91 JR |
432 | } |
433 | ||
55714cdd PB |
434 | static inline bool nested_exit_on_smi(struct vcpu_svm *svm) |
435 | { | |
c62e2e94 | 436 | return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI); |
55714cdd PB |
437 | } |
438 | ||
fc6f7c03 PB |
439 | static inline bool nested_exit_on_intr(struct vcpu_svm *svm) |
440 | { | |
c62e2e94 | 441 | return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR); |
fc6f7c03 PB |
442 | } |
443 | ||
bbdad0b5 PB |
444 | static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) |
445 | { | |
c62e2e94 | 446 | return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); |
bbdad0b5 PB |
447 | } |
448 | ||
63129754 | 449 | int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12); |
c513f484 | 450 | void svm_leave_nested(struct vcpu_svm *svm); |
2fcf4876 ML |
451 | void svm_free_nested(struct vcpu_svm *svm); |
452 | int svm_allocate_nested(struct vcpu_svm *svm); | |
63129754 | 453 | int nested_svm_vmrun(struct kvm_vcpu *vcpu); |
883b0a91 JR |
454 | void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb); |
455 | int nested_svm_vmexit(struct vcpu_svm *svm); | |
3a87c7e0 SC |
456 | |
457 | static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code) | |
458 | { | |
459 | svm->vmcb->control.exit_code = exit_code; | |
460 | svm->vmcb->control.exit_info_1 = 0; | |
461 | svm->vmcb->control.exit_info_2 = 0; | |
462 | return nested_svm_vmexit(svm); | |
463 | } | |
464 | ||
883b0a91 | 465 | int nested_svm_exit_handled(struct vcpu_svm *svm); |
63129754 | 466 | int nested_svm_check_permissions(struct kvm_vcpu *vcpu); |
883b0a91 JR |
467 | int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, |
468 | bool has_error_code, u32 error_code); | |
883b0a91 | 469 | int nested_svm_exit_special(struct vcpu_svm *svm); |
9e8f0fbf | 470 | void nested_sync_control_from_vmcb02(struct vcpu_svm *svm); |
4995a368 CA |
471 | void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm); |
472 | void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb); | |
883b0a91 | 473 | |
33b22172 PB |
474 | extern struct kvm_x86_nested_ops svm_nested_ops; |
475 | ||
ef0f6496 JR |
476 | /* avic.c */ |
477 | ||
478 | #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF) | |
479 | #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31 | |
480 | #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31) | |
481 | ||
482 | #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL) | |
483 | #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12) | |
484 | #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62) | |
485 | #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63) | |
486 | ||
487 | #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL | |
488 | ||
489 | extern int avic; | |
490 | ||
491 | static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data) | |
492 | { | |
493 | svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK; | |
06e7852c | 494 | vmcb_mark_dirty(svm->vmcb, VMCB_AVIC); |
ef0f6496 JR |
495 | } |
496 | ||
497 | static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu) | |
498 | { | |
499 | struct vcpu_svm *svm = to_svm(vcpu); | |
500 | u64 *entry = svm->avic_physical_id_cache; | |
501 | ||
502 | if (!entry) | |
503 | return false; | |
504 | ||
505 | return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK); | |
506 | } | |
507 | ||
508 | int avic_ga_log_notifier(u32 ga_tag); | |
509 | void avic_vm_destroy(struct kvm *kvm); | |
510 | int avic_vm_init(struct kvm *kvm); | |
511 | void avic_init_vmcb(struct vcpu_svm *svm); | |
512 | void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate); | |
63129754 PB |
513 | int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu); |
514 | int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu); | |
ef0f6496 JR |
515 | int avic_init_vcpu(struct vcpu_svm *svm); |
516 | void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | |
517 | void avic_vcpu_put(struct kvm_vcpu *vcpu); | |
518 | void avic_post_state_restore(struct kvm_vcpu *vcpu); | |
519 | void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu); | |
520 | void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu); | |
521 | bool svm_check_apicv_inhibit_reasons(ulong bit); | |
522 | void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate); | |
523 | void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); | |
524 | void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr); | |
525 | void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr); | |
526 | int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec); | |
527 | bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu); | |
528 | int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, | |
529 | uint32_t guest_irq, bool set); | |
530 | void svm_vcpu_blocking(struct kvm_vcpu *vcpu); | |
531 | void svm_vcpu_unblocking(struct kvm_vcpu *vcpu); | |
532 | ||
eaf78265 JR |
533 | /* sev.c */ |
534 | ||
1edc1459 TL |
535 | #define GHCB_VERSION_MAX 1ULL |
536 | #define GHCB_VERSION_MIN 1ULL | |
537 | ||
291bd20d TL |
538 | #define GHCB_MSR_INFO_POS 0 |
539 | #define GHCB_MSR_INFO_MASK (BIT_ULL(12) - 1) | |
540 | ||
1edc1459 TL |
541 | #define GHCB_MSR_SEV_INFO_RESP 0x001 |
542 | #define GHCB_MSR_SEV_INFO_REQ 0x002 | |
543 | #define GHCB_MSR_VER_MAX_POS 48 | |
544 | #define GHCB_MSR_VER_MAX_MASK 0xffff | |
545 | #define GHCB_MSR_VER_MIN_POS 32 | |
546 | #define GHCB_MSR_VER_MIN_MASK 0xffff | |
547 | #define GHCB_MSR_CBIT_POS 24 | |
548 | #define GHCB_MSR_CBIT_MASK 0xff | |
549 | #define GHCB_MSR_SEV_INFO(_max, _min, _cbit) \ | |
550 | ((((_max) & GHCB_MSR_VER_MAX_MASK) << GHCB_MSR_VER_MAX_POS) | \ | |
551 | (((_min) & GHCB_MSR_VER_MIN_MASK) << GHCB_MSR_VER_MIN_POS) | \ | |
552 | (((_cbit) & GHCB_MSR_CBIT_MASK) << GHCB_MSR_CBIT_POS) | \ | |
553 | GHCB_MSR_SEV_INFO_RESP) | |
554 | ||
d3694667 TL |
555 | #define GHCB_MSR_CPUID_REQ 0x004 |
556 | #define GHCB_MSR_CPUID_RESP 0x005 | |
557 | #define GHCB_MSR_CPUID_FUNC_POS 32 | |
558 | #define GHCB_MSR_CPUID_FUNC_MASK 0xffffffff | |
559 | #define GHCB_MSR_CPUID_VALUE_POS 32 | |
560 | #define GHCB_MSR_CPUID_VALUE_MASK 0xffffffff | |
561 | #define GHCB_MSR_CPUID_REG_POS 30 | |
562 | #define GHCB_MSR_CPUID_REG_MASK 0x3 | |
563 | ||
e1d71116 TL |
564 | #define GHCB_MSR_TERM_REQ 0x100 |
565 | #define GHCB_MSR_TERM_REASON_SET_POS 12 | |
566 | #define GHCB_MSR_TERM_REASON_SET_MASK 0xf | |
567 | #define GHCB_MSR_TERM_REASON_POS 16 | |
568 | #define GHCB_MSR_TERM_REASON_MASK 0xff | |
569 | ||
eaf78265 JR |
570 | extern unsigned int max_sev_asid; |
571 | ||
eaf78265 JR |
572 | static inline bool svm_sev_enabled(void) |
573 | { | |
574 | return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0; | |
575 | } | |
576 | ||
577 | void sev_vm_destroy(struct kvm *kvm); | |
578 | int svm_mem_enc_op(struct kvm *kvm, void __user *argp); | |
579 | int svm_register_enc_region(struct kvm *kvm, | |
580 | struct kvm_enc_region *range); | |
581 | int svm_unregister_enc_region(struct kvm *kvm, | |
582 | struct kvm_enc_region *range); | |
583 | void pre_sev_run(struct vcpu_svm *svm, int cpu); | |
916391a2 | 584 | void __init sev_hardware_setup(void); |
eaf78265 | 585 | void sev_hardware_teardown(void); |
add5e2f0 | 586 | void sev_free_vcpu(struct kvm_vcpu *vcpu); |
63129754 | 587 | int sev_handle_vmgexit(struct kvm_vcpu *vcpu); |
7ed9abfe | 588 | int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); |
376c6d28 TL |
589 | void sev_es_init_vmcb(struct vcpu_svm *svm); |
590 | void sev_es_create_vcpu(struct vcpu_svm *svm); | |
647daca2 | 591 | void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); |
a7fc06dd | 592 | void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu); |
eaf78265 | 593 | |
16809ecd TL |
594 | /* vmenter.S */ |
595 | ||
596 | void __svm_sev_es_vcpu_run(unsigned long vmcb_pa); | |
597 | void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs); | |
598 | ||
883b0a91 | 599 | #endif |