Commit | Line | Data |
---|---|---|
883b0a91 JR |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * Kernel-based Virtual Machine driver for Linux | |
4 | * | |
5 | * AMD SVM support | |
6 | * | |
7 | * Copyright (C) 2006 Qumranet, Inc. | |
8 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. | |
9 | * | |
10 | * Authors: | |
11 | * Yaniv Kamay <yaniv@qumranet.com> | |
12 | * Avi Kivity <avi@qumranet.com> | |
13 | */ | |
14 | ||
15 | #ifndef __SVM_SVM_H | |
16 | #define __SVM_SVM_H | |
17 | ||
18 | #include <linux/kvm_types.h> | |
19 | #include <linux/kvm_host.h> | |
291bd20d | 20 | #include <linux/bits.h> |
883b0a91 JR |
21 | |
22 | #include <asm/svm.h> | |
b81fc74d | 23 | #include <asm/sev-common.h> |
883b0a91 | 24 | |
85ca8be9 TL |
25 | #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) |
26 | ||
47903dc1 KS |
27 | #define IOPM_SIZE PAGE_SIZE * 3 |
28 | #define MSRPM_SIZE PAGE_SIZE * 2 | |
29 | ||
adc2a237 | 30 | #define MAX_DIRECT_ACCESS_MSRS 20 |
883b0a91 JR |
31 | #define MSRPM_OFFSETS 16 |
32 | extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; | |
33 | extern bool npt_enabled; | |
4b639a9f | 34 | extern bool intercept_smi; |
883b0a91 | 35 | |
59d21d67 VP |
36 | /* |
37 | * Clean bits in VMCB. | |
38 | * VMCB_ALL_CLEAN_MASK might also need to | |
39 | * be updated if this enum is modified. | |
40 | */ | |
883b0a91 JR |
41 | enum { |
42 | VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, | |
43 | pause filter count */ | |
44 | VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */ | |
45 | VMCB_ASID, /* ASID */ | |
46 | VMCB_INTR, /* int_ctl, int_vector */ | |
47 | VMCB_NPT, /* npt_en, nCR3, gPAT */ | |
48 | VMCB_CR, /* CR0, CR3, CR4, EFER */ | |
49 | VMCB_DR, /* DR6, DR7 */ | |
50 | VMCB_DT, /* GDT, IDT */ | |
51 | VMCB_SEG, /* CS, DS, SS, ES, CPL */ | |
52 | VMCB_CR2, /* CR2 only */ | |
53 | VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */ | |
54 | VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE, | |
55 | * AVIC PHYSICAL_TABLE pointer, | |
56 | * AVIC LOGICAL_TABLE pointer | |
57 | */ | |
59d21d67 | 58 | VMCB_SW = 31, /* Reserved for hypervisor/software use */ |
883b0a91 JR |
59 | }; |
60 | ||
59d21d67 VP |
61 | #define VMCB_ALL_CLEAN_MASK ( \ |
62 | (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \ | |
63 | (1U << VMCB_ASID) | (1U << VMCB_INTR) | \ | |
64 | (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \ | |
65 | (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \ | |
66 | (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \ | |
67 | (1U << VMCB_SW)) | |
68 | ||
883b0a91 JR |
69 | /* TPR and CR2 are always written before VMRUN */ |
70 | #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2)) | |
71 | ||
72 | struct kvm_sev_info { | |
73 | bool active; /* SEV enabled guest */ | |
916391a2 | 74 | bool es_active; /* SEV-ES enabled guest */ |
883b0a91 JR |
75 | unsigned int asid; /* ASID used for this guest */ |
76 | unsigned int handle; /* SEV firmware handle */ | |
77 | int fd; /* SEV device fd */ | |
78 | unsigned long pages_locked; /* Number of pages locked */ | |
79 | struct list_head regions_list; /* List of registered regions */ | |
8640ca58 | 80 | u64 ap_jump_table; /* SEV-ES AP Jump Table address */ |
54526d1f | 81 | struct kvm *enc_context_owner; /* Owner of copied encryption context */ |
7aef27f0 | 82 | struct misc_cg *misc_cg; /* For misc cgroup accounting */ |
883b0a91 JR |
83 | }; |
84 | ||
85 | struct kvm_svm { | |
86 | struct kvm kvm; | |
87 | ||
88 | /* Struct members for AVIC */ | |
89 | u32 avic_vm_id; | |
90 | struct page *avic_logical_id_table_page; | |
91 | struct page *avic_physical_id_table_page; | |
92 | struct hlist_node hnode; | |
93 | ||
94 | struct kvm_sev_info sev_info; | |
95 | }; | |
96 | ||
97 | struct kvm_vcpu; | |
98 | ||
4995a368 CA |
99 | struct kvm_vmcb_info { |
100 | struct vmcb *ptr; | |
101 | unsigned long pa; | |
af18fa77 | 102 | int cpu; |
193015ad | 103 | uint64_t asid_generation; |
4995a368 CA |
104 | }; |
105 | ||
7693b3eb | 106 | struct svm_nested_state { |
4995a368 | 107 | struct kvm_vmcb_info vmcb02; |
883b0a91 JR |
108 | u64 hsave_msr; |
109 | u64 vm_cr_msr; | |
0dd16b5b | 110 | u64 vmcb12_gpa; |
8173396e | 111 | u64 last_vmcb12_gpa; |
883b0a91 JR |
112 | |
113 | /* These are the merged vectors */ | |
114 | u32 *msrpm; | |
115 | ||
f74f9414 PB |
116 | /* A VMRUN has started but has not yet been performed, so |
117 | * we cannot inject a nested vmexit yet. */ | |
118 | bool nested_run_pending; | |
119 | ||
e670bf68 PB |
120 | /* cache for control fields of the guest */ |
121 | struct vmcb_control_area ctl; | |
2fcf4876 ML |
122 | |
123 | bool initialized; | |
883b0a91 JR |
124 | }; |
125 | ||
126 | struct vcpu_svm { | |
127 | struct kvm_vcpu vcpu; | |
554cf314 | 128 | /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */ |
883b0a91 | 129 | struct vmcb *vmcb; |
4995a368 CA |
130 | struct kvm_vmcb_info vmcb01; |
131 | struct kvm_vmcb_info *current_vmcb; | |
883b0a91 | 132 | struct svm_cpu_data *svm_data; |
7e8e6eed | 133 | u32 asid; |
adc2a237 ML |
134 | u32 sysenter_esp_hi; |
135 | u32 sysenter_eip_hi; | |
883b0a91 JR |
136 | uint64_t tsc_aux; |
137 | ||
138 | u64 msr_decfg; | |
139 | ||
140 | u64 next_rip; | |
141 | ||
883b0a91 JR |
142 | u64 spec_ctrl; |
143 | /* | |
144 | * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be | |
145 | * translated into the appropriate L2_CFG bits on the host to | |
146 | * perform speculative control. | |
147 | */ | |
148 | u64 virt_spec_ctrl; | |
149 | ||
150 | u32 *msrpm; | |
151 | ||
152 | ulong nmi_iret_rip; | |
153 | ||
7693b3eb | 154 | struct svm_nested_state nested; |
883b0a91 JR |
155 | |
156 | bool nmi_singlestep; | |
157 | u64 nmi_singlestep_guest_rflags; | |
158 | ||
159 | unsigned int3_injected; | |
160 | unsigned long int3_rip; | |
161 | ||
162 | /* cached guest cpuid flags for faster access */ | |
163 | bool nrips_enabled : 1; | |
164 | ||
165 | u32 ldr_reg; | |
166 | u32 dfr_reg; | |
167 | struct page *avic_backing_page; | |
168 | u64 *avic_physical_id_cache; | |
169 | bool avic_is_running; | |
170 | ||
171 | /* | |
172 | * Per-vcpu list of struct amd_svm_iommu_ir: | |
173 | * This is used mainly to store interrupt remapping information used | |
174 | * when update the vcpu affinity. This avoids the need to scan for | |
175 | * IRTE and try to match ga_tag in the IOMMU driver. | |
176 | */ | |
177 | struct list_head ir_list; | |
178 | spinlock_t ir_list_lock; | |
fd6fa73d AG |
179 | |
180 | /* Save desired MSR intercept (read: pass-through) state */ | |
181 | struct { | |
182 | DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS); | |
183 | DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS); | |
184 | } shadow_msr_intercept; | |
add5e2f0 TL |
185 | |
186 | /* SEV-ES support */ | |
187 | struct vmcb_save_area *vmsa; | |
188 | struct ghcb *ghcb; | |
291bd20d | 189 | struct kvm_host_map ghcb_map; |
647daca2 | 190 | bool received_first_sipi; |
8f423a80 TL |
191 | |
192 | /* SEV-ES scratch area support */ | |
193 | void *ghcb_sa; | |
194 | u64 ghcb_sa_len; | |
195 | bool ghcb_sa_sync; | |
196 | bool ghcb_sa_free; | |
a7fc06dd MR |
197 | |
198 | bool guest_state_loaded; | |
883b0a91 JR |
199 | }; |
200 | ||
eaf78265 JR |
201 | struct svm_cpu_data { |
202 | int cpu; | |
203 | ||
204 | u64 asid_generation; | |
205 | u32 max_asid; | |
206 | u32 next_asid; | |
207 | u32 min_asid; | |
208 | struct kvm_ldttss_desc *tss_desc; | |
209 | ||
210 | struct page *save_area; | |
211 | struct vmcb *current_vmcb; | |
212 | ||
213 | /* index = sev_asid, value = vmcb pointer */ | |
214 | struct vmcb **sev_vmcbs; | |
215 | }; | |
216 | ||
217 | DECLARE_PER_CPU(struct svm_cpu_data *, svm_data); | |
218 | ||
883b0a91 JR |
219 | void recalc_intercepts(struct vcpu_svm *svm); |
220 | ||
ef0f6496 JR |
221 | static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm) |
222 | { | |
223 | return container_of(kvm, struct kvm_svm, kvm); | |
224 | } | |
225 | ||
916391a2 TL |
226 | static inline bool sev_guest(struct kvm *kvm) |
227 | { | |
228 | #ifdef CONFIG_KVM_AMD_SEV | |
229 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; | |
230 | ||
231 | return sev->active; | |
232 | #else | |
233 | return false; | |
234 | #endif | |
235 | } | |
236 | ||
237 | static inline bool sev_es_guest(struct kvm *kvm) | |
238 | { | |
239 | #ifdef CONFIG_KVM_AMD_SEV | |
240 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; | |
241 | ||
242 | return sev_guest(kvm) && sev->es_active; | |
243 | #else | |
244 | return false; | |
245 | #endif | |
246 | } | |
247 | ||
06e7852c | 248 | static inline void vmcb_mark_all_dirty(struct vmcb *vmcb) |
883b0a91 JR |
249 | { |
250 | vmcb->control.clean = 0; | |
251 | } | |
252 | ||
06e7852c | 253 | static inline void vmcb_mark_all_clean(struct vmcb *vmcb) |
883b0a91 | 254 | { |
59d21d67 | 255 | vmcb->control.clean = VMCB_ALL_CLEAN_MASK |
883b0a91 JR |
256 | & ~VMCB_ALWAYS_DIRTY_MASK; |
257 | } | |
258 | ||
c4327f15 VP |
259 | static inline bool vmcb_is_clean(struct vmcb *vmcb, int bit) |
260 | { | |
261 | return (vmcb->control.clean & (1 << bit)); | |
262 | } | |
263 | ||
06e7852c | 264 | static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit) |
883b0a91 JR |
265 | { |
266 | vmcb->control.clean &= ~(1 << bit); | |
267 | } | |
268 | ||
8173396e CA |
269 | static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit) |
270 | { | |
271 | return !test_bit(bit, (unsigned long *)&vmcb->control.clean); | |
272 | } | |
273 | ||
883b0a91 JR |
274 | static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) |
275 | { | |
276 | return container_of(vcpu, struct vcpu_svm, vcpu); | |
277 | } | |
278 | ||
c45ad722 BM |
279 | static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit) |
280 | { | |
281 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); | |
282 | __set_bit(bit, (unsigned long *)&control->intercepts); | |
283 | } | |
284 | ||
285 | static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit) | |
286 | { | |
287 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); | |
288 | __clear_bit(bit, (unsigned long *)&control->intercepts); | |
289 | } | |
290 | ||
291 | static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit) | |
292 | { | |
293 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); | |
294 | return test_bit(bit, (unsigned long *)&control->intercepts); | |
295 | } | |
296 | ||
883b0a91 JR |
297 | static inline void set_dr_intercepts(struct vcpu_svm *svm) |
298 | { | |
4995a368 | 299 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 300 | |
8d4846b9 TL |
301 | if (!sev_es_guest(svm->vcpu.kvm)) { |
302 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ); | |
303 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ); | |
304 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ); | |
305 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ); | |
306 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ); | |
307 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ); | |
308 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ); | |
309 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE); | |
310 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE); | |
311 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE); | |
312 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE); | |
313 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE); | |
314 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE); | |
315 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE); | |
316 | } | |
317 | ||
30abaa88 | 318 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); |
30abaa88 | 319 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); |
883b0a91 JR |
320 | |
321 | recalc_intercepts(svm); | |
322 | } | |
323 | ||
324 | static inline void clr_dr_intercepts(struct vcpu_svm *svm) | |
325 | { | |
4995a368 | 326 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 327 | |
30abaa88 | 328 | vmcb->control.intercepts[INTERCEPT_DR] = 0; |
883b0a91 | 329 | |
8d4846b9 TL |
330 | /* DR7 access must remain intercepted for an SEV-ES guest */ |
331 | if (sev_es_guest(svm->vcpu.kvm)) { | |
332 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); | |
333 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); | |
334 | } | |
335 | ||
883b0a91 JR |
336 | recalc_intercepts(svm); |
337 | } | |
338 | ||
9780d51d | 339 | static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit) |
883b0a91 | 340 | { |
4995a368 | 341 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 342 | |
9780d51d BM |
343 | WARN_ON_ONCE(bit >= 32); |
344 | vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); | |
883b0a91 JR |
345 | |
346 | recalc_intercepts(svm); | |
347 | } | |
348 | ||
9780d51d | 349 | static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit) |
883b0a91 | 350 | { |
4995a368 | 351 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 352 | |
9780d51d BM |
353 | WARN_ON_ONCE(bit >= 32); |
354 | vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); | |
883b0a91 JR |
355 | |
356 | recalc_intercepts(svm); | |
357 | } | |
358 | ||
a284ba56 | 359 | static inline void svm_set_intercept(struct vcpu_svm *svm, int bit) |
883b0a91 | 360 | { |
4995a368 | 361 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 362 | |
c62e2e94 | 363 | vmcb_set_intercept(&vmcb->control, bit); |
883b0a91 JR |
364 | |
365 | recalc_intercepts(svm); | |
366 | } | |
367 | ||
a284ba56 | 368 | static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit) |
883b0a91 | 369 | { |
4995a368 | 370 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 371 | |
c62e2e94 | 372 | vmcb_clr_intercept(&vmcb->control, bit); |
883b0a91 JR |
373 | |
374 | recalc_intercepts(svm); | |
375 | } | |
376 | ||
a284ba56 | 377 | static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit) |
883b0a91 | 378 | { |
c62e2e94 | 379 | return vmcb_is_intercept(&svm->vmcb->control, bit); |
883b0a91 JR |
380 | } |
381 | ||
382 | static inline bool vgif_enabled(struct vcpu_svm *svm) | |
383 | { | |
384 | return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK); | |
385 | } | |
386 | ||
387 | static inline void enable_gif(struct vcpu_svm *svm) | |
388 | { | |
389 | if (vgif_enabled(svm)) | |
390 | svm->vmcb->control.int_ctl |= V_GIF_MASK; | |
391 | else | |
392 | svm->vcpu.arch.hflags |= HF_GIF_MASK; | |
393 | } | |
394 | ||
395 | static inline void disable_gif(struct vcpu_svm *svm) | |
396 | { | |
397 | if (vgif_enabled(svm)) | |
398 | svm->vmcb->control.int_ctl &= ~V_GIF_MASK; | |
399 | else | |
400 | svm->vcpu.arch.hflags &= ~HF_GIF_MASK; | |
401 | } | |
402 | ||
403 | static inline bool gif_set(struct vcpu_svm *svm) | |
404 | { | |
405 | if (vgif_enabled(svm)) | |
406 | return !!(svm->vmcb->control.int_ctl & V_GIF_MASK); | |
407 | else | |
408 | return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); | |
409 | } | |
410 | ||
411 | /* svm.c */ | |
761e4169 | 412 | #define MSR_INVALID 0xffffffffU |
883b0a91 | 413 | |
291bd20d | 414 | extern bool dump_invalid_vmcb; |
916391a2 | 415 | |
883b0a91 | 416 | u32 svm_msrpm_offset(u32 msr); |
2fcf4876 ML |
417 | u32 *svm_vcpu_alloc_msrpm(void); |
418 | void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm); | |
419 | void svm_vcpu_free_msrpm(u32 *msrpm); | |
420 | ||
72f211ec | 421 | int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer); |
883b0a91 | 422 | void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
c2fe3cd4 | 423 | void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
f55ac304 | 424 | void svm_flush_tlb(struct kvm_vcpu *vcpu); |
883b0a91 | 425 | void disable_nmi_singlestep(struct vcpu_svm *svm); |
cae96af1 PB |
426 | bool svm_smi_blocked(struct kvm_vcpu *vcpu); |
427 | bool svm_nmi_blocked(struct kvm_vcpu *vcpu); | |
428 | bool svm_interrupt_blocked(struct kvm_vcpu *vcpu); | |
ffdf7f9e | 429 | void svm_set_gif(struct vcpu_svm *svm, bool value); |
63129754 | 430 | int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code); |
376c6d28 TL |
431 | void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, |
432 | int read, int write); | |
883b0a91 JR |
433 | |
434 | /* nested.c */ | |
435 | ||
436 | #define NESTED_EXIT_HOST 0 /* Exit handled on host level */ | |
437 | #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */ | |
438 | #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */ | |
439 | ||
01c3b2b5 | 440 | static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu) |
883b0a91 | 441 | { |
e9fd761a PB |
442 | struct vcpu_svm *svm = to_svm(vcpu); |
443 | ||
444 | return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK); | |
883b0a91 JR |
445 | } |
446 | ||
55714cdd PB |
447 | static inline bool nested_exit_on_smi(struct vcpu_svm *svm) |
448 | { | |
c62e2e94 | 449 | return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI); |
55714cdd PB |
450 | } |
451 | ||
fc6f7c03 PB |
452 | static inline bool nested_exit_on_intr(struct vcpu_svm *svm) |
453 | { | |
c62e2e94 | 454 | return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR); |
fc6f7c03 PB |
455 | } |
456 | ||
bbdad0b5 PB |
457 | static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) |
458 | { | |
c62e2e94 | 459 | return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); |
bbdad0b5 PB |
460 | } |
461 | ||
63129754 | 462 | int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12); |
c513f484 | 463 | void svm_leave_nested(struct vcpu_svm *svm); |
2fcf4876 ML |
464 | void svm_free_nested(struct vcpu_svm *svm); |
465 | int svm_allocate_nested(struct vcpu_svm *svm); | |
63129754 | 466 | int nested_svm_vmrun(struct kvm_vcpu *vcpu); |
2bb16bea VK |
467 | void svm_copy_vmrun_state(struct vmcb_save_area *to_save, |
468 | struct vmcb_save_area *from_save); | |
469 | void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb); | |
883b0a91 | 470 | int nested_svm_vmexit(struct vcpu_svm *svm); |
3a87c7e0 SC |
471 | |
472 | static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code) | |
473 | { | |
474 | svm->vmcb->control.exit_code = exit_code; | |
475 | svm->vmcb->control.exit_info_1 = 0; | |
476 | svm->vmcb->control.exit_info_2 = 0; | |
477 | return nested_svm_vmexit(svm); | |
478 | } | |
479 | ||
883b0a91 | 480 | int nested_svm_exit_handled(struct vcpu_svm *svm); |
63129754 | 481 | int nested_svm_check_permissions(struct kvm_vcpu *vcpu); |
883b0a91 JR |
482 | int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, |
483 | bool has_error_code, u32 error_code); | |
883b0a91 | 484 | int nested_svm_exit_special(struct vcpu_svm *svm); |
bb00bd9c VK |
485 | void nested_load_control_from_vmcb12(struct vcpu_svm *svm, |
486 | struct vmcb_control_area *control); | |
9e8f0fbf | 487 | void nested_sync_control_from_vmcb02(struct vcpu_svm *svm); |
4995a368 CA |
488 | void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm); |
489 | void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb); | |
883b0a91 | 490 | |
33b22172 PB |
491 | extern struct kvm_x86_nested_ops svm_nested_ops; |
492 | ||
ef0f6496 JR |
493 | /* avic.c */ |
494 | ||
495 | #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF) | |
496 | #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31 | |
497 | #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31) | |
498 | ||
499 | #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL) | |
500 | #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12) | |
501 | #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62) | |
502 | #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63) | |
503 | ||
504 | #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL | |
505 | ||
ef0f6496 JR |
506 | static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu) |
507 | { | |
508 | struct vcpu_svm *svm = to_svm(vcpu); | |
509 | u64 *entry = svm->avic_physical_id_cache; | |
510 | ||
511 | if (!entry) | |
512 | return false; | |
513 | ||
514 | return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK); | |
515 | } | |
516 | ||
517 | int avic_ga_log_notifier(u32 ga_tag); | |
518 | void avic_vm_destroy(struct kvm *kvm); | |
519 | int avic_vm_init(struct kvm *kvm); | |
520 | void avic_init_vmcb(struct vcpu_svm *svm); | |
63129754 PB |
521 | int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu); |
522 | int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu); | |
ef0f6496 JR |
523 | int avic_init_vcpu(struct vcpu_svm *svm); |
524 | void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | |
525 | void avic_vcpu_put(struct kvm_vcpu *vcpu); | |
526 | void avic_post_state_restore(struct kvm_vcpu *vcpu); | |
527 | void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu); | |
528 | void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu); | |
529 | bool svm_check_apicv_inhibit_reasons(ulong bit); | |
ef0f6496 JR |
530 | void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); |
531 | void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr); | |
532 | void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr); | |
533 | int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec); | |
534 | bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu); | |
535 | int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, | |
536 | uint32_t guest_irq, bool set); | |
537 | void svm_vcpu_blocking(struct kvm_vcpu *vcpu); | |
538 | void svm_vcpu_unblocking(struct kvm_vcpu *vcpu); | |
539 | ||
eaf78265 JR |
540 | /* sev.c */ |
541 | ||
b81fc74d BS |
542 | #define GHCB_VERSION_MAX 1ULL |
543 | #define GHCB_VERSION_MIN 1ULL | |
544 | ||
e1d71116 | 545 | |
eaf78265 JR |
546 | extern unsigned int max_sev_asid; |
547 | ||
eaf78265 JR |
548 | void sev_vm_destroy(struct kvm *kvm); |
549 | int svm_mem_enc_op(struct kvm *kvm, void __user *argp); | |
550 | int svm_register_enc_region(struct kvm *kvm, | |
551 | struct kvm_enc_region *range); | |
552 | int svm_unregister_enc_region(struct kvm *kvm, | |
553 | struct kvm_enc_region *range); | |
54526d1f | 554 | int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd); |
eaf78265 | 555 | void pre_sev_run(struct vcpu_svm *svm, int cpu); |
d9db0fd6 | 556 | void __init sev_set_cpu_caps(void); |
916391a2 | 557 | void __init sev_hardware_setup(void); |
eaf78265 | 558 | void sev_hardware_teardown(void); |
b95c221c | 559 | int sev_cpu_init(struct svm_cpu_data *sd); |
add5e2f0 | 560 | void sev_free_vcpu(struct kvm_vcpu *vcpu); |
63129754 | 561 | int sev_handle_vmgexit(struct kvm_vcpu *vcpu); |
7ed9abfe | 562 | int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); |
376c6d28 TL |
563 | void sev_es_init_vmcb(struct vcpu_svm *svm); |
564 | void sev_es_create_vcpu(struct vcpu_svm *svm); | |
647daca2 | 565 | void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); |
a7fc06dd | 566 | void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu); |
ce7ea0cf | 567 | void sev_es_unmap_ghcb(struct vcpu_svm *svm); |
eaf78265 | 568 | |
16809ecd TL |
569 | /* vmenter.S */ |
570 | ||
571 | void __svm_sev_es_vcpu_run(unsigned long vmcb_pa); | |
572 | void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs); | |
573 | ||
883b0a91 | 574 | #endif |