Commit | Line | Data |
---|---|---|
883b0a91 JR |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * Kernel-based Virtual Machine driver for Linux | |
4 | * | |
5 | * AMD SVM support | |
6 | * | |
7 | * Copyright (C) 2006 Qumranet, Inc. | |
8 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. | |
9 | * | |
10 | * Authors: | |
11 | * Yaniv Kamay <yaniv@qumranet.com> | |
12 | * Avi Kivity <avi@qumranet.com> | |
13 | */ | |
14 | ||
15 | #ifndef __SVM_SVM_H | |
16 | #define __SVM_SVM_H | |
17 | ||
18 | #include <linux/kvm_types.h> | |
19 | #include <linux/kvm_host.h> | |
291bd20d | 20 | #include <linux/bits.h> |
883b0a91 JR |
21 | |
22 | #include <asm/svm.h> | |
b81fc74d | 23 | #include <asm/sev-common.h> |
883b0a91 | 24 | |
85ca8be9 TL |
25 | #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) |
26 | ||
47903dc1 KS |
27 | #define IOPM_SIZE PAGE_SIZE * 3 |
28 | #define MSRPM_SIZE PAGE_SIZE * 2 | |
29 | ||
adc2a237 | 30 | #define MAX_DIRECT_ACCESS_MSRS 20 |
883b0a91 JR |
31 | #define MSRPM_OFFSETS 16 |
32 | extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; | |
33 | extern bool npt_enabled; | |
4b639a9f | 34 | extern bool intercept_smi; |
883b0a91 | 35 | |
59d21d67 VP |
36 | /* |
37 | * Clean bits in VMCB. | |
38 | * VMCB_ALL_CLEAN_MASK might also need to | |
39 | * be updated if this enum is modified. | |
40 | */ | |
883b0a91 JR |
41 | enum { |
42 | VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, | |
43 | pause filter count */ | |
44 | VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */ | |
45 | VMCB_ASID, /* ASID */ | |
46 | VMCB_INTR, /* int_ctl, int_vector */ | |
47 | VMCB_NPT, /* npt_en, nCR3, gPAT */ | |
48 | VMCB_CR, /* CR0, CR3, CR4, EFER */ | |
49 | VMCB_DR, /* DR6, DR7 */ | |
50 | VMCB_DT, /* GDT, IDT */ | |
51 | VMCB_SEG, /* CS, DS, SS, ES, CPL */ | |
52 | VMCB_CR2, /* CR2 only */ | |
53 | VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */ | |
54 | VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE, | |
55 | * AVIC PHYSICAL_TABLE pointer, | |
56 | * AVIC LOGICAL_TABLE pointer | |
57 | */ | |
59d21d67 | 58 | VMCB_SW = 31, /* Reserved for hypervisor/software use */ |
883b0a91 JR |
59 | }; |
60 | ||
59d21d67 VP |
61 | #define VMCB_ALL_CLEAN_MASK ( \ |
62 | (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \ | |
63 | (1U << VMCB_ASID) | (1U << VMCB_INTR) | \ | |
64 | (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \ | |
65 | (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \ | |
66 | (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \ | |
67 | (1U << VMCB_SW)) | |
68 | ||
883b0a91 JR |
69 | /* TPR and CR2 are always written before VMRUN */ |
70 | #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2)) | |
71 | ||
72 | struct kvm_sev_info { | |
73 | bool active; /* SEV enabled guest */ | |
916391a2 | 74 | bool es_active; /* SEV-ES enabled guest */ |
883b0a91 JR |
75 | unsigned int asid; /* ASID used for this guest */ |
76 | unsigned int handle; /* SEV firmware handle */ | |
77 | int fd; /* SEV device fd */ | |
78 | unsigned long pages_locked; /* Number of pages locked */ | |
79 | struct list_head regions_list; /* List of registered regions */ | |
8640ca58 | 80 | u64 ap_jump_table; /* SEV-ES AP Jump Table address */ |
54526d1f | 81 | struct kvm *enc_context_owner; /* Owner of copied encryption context */ |
7aef27f0 | 82 | struct misc_cg *misc_cg; /* For misc cgroup accounting */ |
883b0a91 JR |
83 | }; |
84 | ||
85 | struct kvm_svm { | |
86 | struct kvm kvm; | |
87 | ||
88 | /* Struct members for AVIC */ | |
89 | u32 avic_vm_id; | |
90 | struct page *avic_logical_id_table_page; | |
91 | struct page *avic_physical_id_table_page; | |
92 | struct hlist_node hnode; | |
93 | ||
94 | struct kvm_sev_info sev_info; | |
95 | }; | |
96 | ||
97 | struct kvm_vcpu; | |
98 | ||
4995a368 CA |
99 | struct kvm_vmcb_info { |
100 | struct vmcb *ptr; | |
101 | unsigned long pa; | |
af18fa77 | 102 | int cpu; |
193015ad | 103 | uint64_t asid_generation; |
4995a368 CA |
104 | }; |
105 | ||
7693b3eb | 106 | struct svm_nested_state { |
4995a368 | 107 | struct kvm_vmcb_info vmcb02; |
883b0a91 JR |
108 | u64 hsave_msr; |
109 | u64 vm_cr_msr; | |
0dd16b5b | 110 | u64 vmcb12_gpa; |
8173396e | 111 | u64 last_vmcb12_gpa; |
883b0a91 JR |
112 | |
113 | /* These are the merged vectors */ | |
114 | u32 *msrpm; | |
115 | ||
f74f9414 PB |
116 | /* A VMRUN has started but has not yet been performed, so |
117 | * we cannot inject a nested vmexit yet. */ | |
118 | bool nested_run_pending; | |
119 | ||
e670bf68 PB |
120 | /* cache for control fields of the guest */ |
121 | struct vmcb_control_area ctl; | |
2fcf4876 ML |
122 | |
123 | bool initialized; | |
883b0a91 JR |
124 | }; |
125 | ||
126 | struct vcpu_svm { | |
127 | struct kvm_vcpu vcpu; | |
554cf314 | 128 | /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */ |
883b0a91 | 129 | struct vmcb *vmcb; |
4995a368 CA |
130 | struct kvm_vmcb_info vmcb01; |
131 | struct kvm_vmcb_info *current_vmcb; | |
883b0a91 | 132 | struct svm_cpu_data *svm_data; |
7e8e6eed | 133 | u32 asid; |
adc2a237 ML |
134 | u32 sysenter_esp_hi; |
135 | u32 sysenter_eip_hi; | |
883b0a91 JR |
136 | uint64_t tsc_aux; |
137 | ||
138 | u64 msr_decfg; | |
139 | ||
140 | u64 next_rip; | |
141 | ||
883b0a91 | 142 | u64 spec_ctrl; |
5228eb96 ML |
143 | |
144 | u64 tsc_ratio_msr; | |
883b0a91 JR |
145 | /* |
146 | * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be | |
147 | * translated into the appropriate L2_CFG bits on the host to | |
148 | * perform speculative control. | |
149 | */ | |
150 | u64 virt_spec_ctrl; | |
151 | ||
152 | u32 *msrpm; | |
153 | ||
154 | ulong nmi_iret_rip; | |
155 | ||
7693b3eb | 156 | struct svm_nested_state nested; |
883b0a91 JR |
157 | |
158 | bool nmi_singlestep; | |
159 | u64 nmi_singlestep_guest_rflags; | |
160 | ||
161 | unsigned int3_injected; | |
162 | unsigned long int3_rip; | |
163 | ||
164 | /* cached guest cpuid flags for faster access */ | |
5228eb96 ML |
165 | bool nrips_enabled : 1; |
166 | bool tsc_scaling_enabled : 1; | |
883b0a91 JR |
167 | |
168 | u32 ldr_reg; | |
169 | u32 dfr_reg; | |
170 | struct page *avic_backing_page; | |
171 | u64 *avic_physical_id_cache; | |
172 | bool avic_is_running; | |
173 | ||
174 | /* | |
175 | * Per-vcpu list of struct amd_svm_iommu_ir: | |
176 | * This is used mainly to store interrupt remapping information used | |
177 | * when update the vcpu affinity. This avoids the need to scan for | |
178 | * IRTE and try to match ga_tag in the IOMMU driver. | |
179 | */ | |
180 | struct list_head ir_list; | |
181 | spinlock_t ir_list_lock; | |
fd6fa73d AG |
182 | |
183 | /* Save desired MSR intercept (read: pass-through) state */ | |
184 | struct { | |
185 | DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS); | |
186 | DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS); | |
187 | } shadow_msr_intercept; | |
add5e2f0 TL |
188 | |
189 | /* SEV-ES support */ | |
190 | struct vmcb_save_area *vmsa; | |
191 | struct ghcb *ghcb; | |
291bd20d | 192 | struct kvm_host_map ghcb_map; |
647daca2 | 193 | bool received_first_sipi; |
8f423a80 TL |
194 | |
195 | /* SEV-ES scratch area support */ | |
196 | void *ghcb_sa; | |
197 | u64 ghcb_sa_len; | |
198 | bool ghcb_sa_sync; | |
199 | bool ghcb_sa_free; | |
a7fc06dd MR |
200 | |
201 | bool guest_state_loaded; | |
883b0a91 JR |
202 | }; |
203 | ||
eaf78265 JR |
204 | struct svm_cpu_data { |
205 | int cpu; | |
206 | ||
207 | u64 asid_generation; | |
208 | u32 max_asid; | |
209 | u32 next_asid; | |
210 | u32 min_asid; | |
211 | struct kvm_ldttss_desc *tss_desc; | |
212 | ||
213 | struct page *save_area; | |
214 | struct vmcb *current_vmcb; | |
215 | ||
216 | /* index = sev_asid, value = vmcb pointer */ | |
217 | struct vmcb **sev_vmcbs; | |
218 | }; | |
219 | ||
220 | DECLARE_PER_CPU(struct svm_cpu_data *, svm_data); | |
221 | ||
883b0a91 JR |
222 | void recalc_intercepts(struct vcpu_svm *svm); |
223 | ||
ef0f6496 JR |
224 | static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm) |
225 | { | |
226 | return container_of(kvm, struct kvm_svm, kvm); | |
227 | } | |
228 | ||
916391a2 TL |
229 | static inline bool sev_guest(struct kvm *kvm) |
230 | { | |
231 | #ifdef CONFIG_KVM_AMD_SEV | |
232 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; | |
233 | ||
234 | return sev->active; | |
235 | #else | |
236 | return false; | |
237 | #endif | |
238 | } | |
239 | ||
240 | static inline bool sev_es_guest(struct kvm *kvm) | |
241 | { | |
242 | #ifdef CONFIG_KVM_AMD_SEV | |
243 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; | |
244 | ||
245 | return sev_guest(kvm) && sev->es_active; | |
246 | #else | |
247 | return false; | |
248 | #endif | |
249 | } | |
250 | ||
06e7852c | 251 | static inline void vmcb_mark_all_dirty(struct vmcb *vmcb) |
883b0a91 JR |
252 | { |
253 | vmcb->control.clean = 0; | |
254 | } | |
255 | ||
06e7852c | 256 | static inline void vmcb_mark_all_clean(struct vmcb *vmcb) |
883b0a91 | 257 | { |
59d21d67 | 258 | vmcb->control.clean = VMCB_ALL_CLEAN_MASK |
883b0a91 JR |
259 | & ~VMCB_ALWAYS_DIRTY_MASK; |
260 | } | |
261 | ||
c4327f15 VP |
262 | static inline bool vmcb_is_clean(struct vmcb *vmcb, int bit) |
263 | { | |
264 | return (vmcb->control.clean & (1 << bit)); | |
265 | } | |
266 | ||
06e7852c | 267 | static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit) |
883b0a91 JR |
268 | { |
269 | vmcb->control.clean &= ~(1 << bit); | |
270 | } | |
271 | ||
8173396e CA |
272 | static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit) |
273 | { | |
274 | return !test_bit(bit, (unsigned long *)&vmcb->control.clean); | |
275 | } | |
276 | ||
883b0a91 JR |
277 | static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) |
278 | { | |
279 | return container_of(vcpu, struct vcpu_svm, vcpu); | |
280 | } | |
281 | ||
c45ad722 BM |
282 | static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit) |
283 | { | |
284 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); | |
285 | __set_bit(bit, (unsigned long *)&control->intercepts); | |
286 | } | |
287 | ||
288 | static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit) | |
289 | { | |
290 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); | |
291 | __clear_bit(bit, (unsigned long *)&control->intercepts); | |
292 | } | |
293 | ||
294 | static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit) | |
295 | { | |
296 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); | |
297 | return test_bit(bit, (unsigned long *)&control->intercepts); | |
298 | } | |
299 | ||
883b0a91 JR |
300 | static inline void set_dr_intercepts(struct vcpu_svm *svm) |
301 | { | |
4995a368 | 302 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 303 | |
8d4846b9 TL |
304 | if (!sev_es_guest(svm->vcpu.kvm)) { |
305 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ); | |
306 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ); | |
307 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ); | |
308 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ); | |
309 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ); | |
310 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ); | |
311 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ); | |
312 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE); | |
313 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE); | |
314 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE); | |
315 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE); | |
316 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE); | |
317 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE); | |
318 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE); | |
319 | } | |
320 | ||
30abaa88 | 321 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); |
30abaa88 | 322 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); |
883b0a91 JR |
323 | |
324 | recalc_intercepts(svm); | |
325 | } | |
326 | ||
327 | static inline void clr_dr_intercepts(struct vcpu_svm *svm) | |
328 | { | |
4995a368 | 329 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 330 | |
30abaa88 | 331 | vmcb->control.intercepts[INTERCEPT_DR] = 0; |
883b0a91 | 332 | |
8d4846b9 TL |
333 | /* DR7 access must remain intercepted for an SEV-ES guest */ |
334 | if (sev_es_guest(svm->vcpu.kvm)) { | |
335 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); | |
336 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); | |
337 | } | |
338 | ||
883b0a91 JR |
339 | recalc_intercepts(svm); |
340 | } | |
341 | ||
9780d51d | 342 | static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit) |
883b0a91 | 343 | { |
4995a368 | 344 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 345 | |
9780d51d BM |
346 | WARN_ON_ONCE(bit >= 32); |
347 | vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); | |
883b0a91 JR |
348 | |
349 | recalc_intercepts(svm); | |
350 | } | |
351 | ||
9780d51d | 352 | static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit) |
883b0a91 | 353 | { |
4995a368 | 354 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 355 | |
9780d51d BM |
356 | WARN_ON_ONCE(bit >= 32); |
357 | vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); | |
883b0a91 JR |
358 | |
359 | recalc_intercepts(svm); | |
360 | } | |
361 | ||
a284ba56 | 362 | static inline void svm_set_intercept(struct vcpu_svm *svm, int bit) |
883b0a91 | 363 | { |
4995a368 | 364 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 365 | |
c62e2e94 | 366 | vmcb_set_intercept(&vmcb->control, bit); |
883b0a91 JR |
367 | |
368 | recalc_intercepts(svm); | |
369 | } | |
370 | ||
a284ba56 | 371 | static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit) |
883b0a91 | 372 | { |
4995a368 | 373 | struct vmcb *vmcb = svm->vmcb01.ptr; |
883b0a91 | 374 | |
c62e2e94 | 375 | vmcb_clr_intercept(&vmcb->control, bit); |
883b0a91 JR |
376 | |
377 | recalc_intercepts(svm); | |
378 | } | |
379 | ||
a284ba56 | 380 | static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit) |
883b0a91 | 381 | { |
c62e2e94 | 382 | return vmcb_is_intercept(&svm->vmcb->control, bit); |
883b0a91 JR |
383 | } |
384 | ||
385 | static inline bool vgif_enabled(struct vcpu_svm *svm) | |
386 | { | |
387 | return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK); | |
388 | } | |
389 | ||
390 | static inline void enable_gif(struct vcpu_svm *svm) | |
391 | { | |
392 | if (vgif_enabled(svm)) | |
393 | svm->vmcb->control.int_ctl |= V_GIF_MASK; | |
394 | else | |
395 | svm->vcpu.arch.hflags |= HF_GIF_MASK; | |
396 | } | |
397 | ||
398 | static inline void disable_gif(struct vcpu_svm *svm) | |
399 | { | |
400 | if (vgif_enabled(svm)) | |
401 | svm->vmcb->control.int_ctl &= ~V_GIF_MASK; | |
402 | else | |
403 | svm->vcpu.arch.hflags &= ~HF_GIF_MASK; | |
404 | } | |
405 | ||
406 | static inline bool gif_set(struct vcpu_svm *svm) | |
407 | { | |
408 | if (vgif_enabled(svm)) | |
409 | return !!(svm->vmcb->control.int_ctl & V_GIF_MASK); | |
410 | else | |
411 | return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); | |
412 | } | |
413 | ||
414 | /* svm.c */ | |
761e4169 | 415 | #define MSR_INVALID 0xffffffffU |
883b0a91 | 416 | |
291bd20d | 417 | extern bool dump_invalid_vmcb; |
916391a2 | 418 | |
883b0a91 | 419 | u32 svm_msrpm_offset(u32 msr); |
2fcf4876 ML |
420 | u32 *svm_vcpu_alloc_msrpm(void); |
421 | void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm); | |
422 | void svm_vcpu_free_msrpm(u32 *msrpm); | |
423 | ||
72f211ec | 424 | int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer); |
883b0a91 | 425 | void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
c2fe3cd4 | 426 | void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
f55ac304 | 427 | void svm_flush_tlb(struct kvm_vcpu *vcpu); |
883b0a91 | 428 | void disable_nmi_singlestep(struct vcpu_svm *svm); |
cae96af1 PB |
429 | bool svm_smi_blocked(struct kvm_vcpu *vcpu); |
430 | bool svm_nmi_blocked(struct kvm_vcpu *vcpu); | |
431 | bool svm_interrupt_blocked(struct kvm_vcpu *vcpu); | |
ffdf7f9e | 432 | void svm_set_gif(struct vcpu_svm *svm, bool value); |
63129754 | 433 | int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code); |
376c6d28 TL |
434 | void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, |
435 | int read, int write); | |
883b0a91 JR |
436 | |
437 | /* nested.c */ | |
438 | ||
439 | #define NESTED_EXIT_HOST 0 /* Exit handled on host level */ | |
440 | #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */ | |
441 | #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */ | |
442 | ||
01c3b2b5 | 443 | static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu) |
883b0a91 | 444 | { |
e9fd761a PB |
445 | struct vcpu_svm *svm = to_svm(vcpu); |
446 | ||
447 | return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK); | |
883b0a91 JR |
448 | } |
449 | ||
55714cdd PB |
450 | static inline bool nested_exit_on_smi(struct vcpu_svm *svm) |
451 | { | |
c62e2e94 | 452 | return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI); |
55714cdd PB |
453 | } |
454 | ||
fc6f7c03 PB |
455 | static inline bool nested_exit_on_intr(struct vcpu_svm *svm) |
456 | { | |
c62e2e94 | 457 | return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR); |
fc6f7c03 PB |
458 | } |
459 | ||
bbdad0b5 PB |
460 | static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) |
461 | { | |
c62e2e94 | 462 | return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); |
bbdad0b5 PB |
463 | } |
464 | ||
e85d3e7b ML |
465 | int enter_svm_guest_mode(struct kvm_vcpu *vcpu, |
466 | u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun); | |
c513f484 | 467 | void svm_leave_nested(struct vcpu_svm *svm); |
2fcf4876 ML |
468 | void svm_free_nested(struct vcpu_svm *svm); |
469 | int svm_allocate_nested(struct vcpu_svm *svm); | |
63129754 | 470 | int nested_svm_vmrun(struct kvm_vcpu *vcpu); |
2bb16bea VK |
471 | void svm_copy_vmrun_state(struct vmcb_save_area *to_save, |
472 | struct vmcb_save_area *from_save); | |
473 | void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb); | |
883b0a91 | 474 | int nested_svm_vmexit(struct vcpu_svm *svm); |
3a87c7e0 SC |
475 | |
476 | static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code) | |
477 | { | |
478 | svm->vmcb->control.exit_code = exit_code; | |
479 | svm->vmcb->control.exit_info_1 = 0; | |
480 | svm->vmcb->control.exit_info_2 = 0; | |
481 | return nested_svm_vmexit(svm); | |
482 | } | |
483 | ||
883b0a91 | 484 | int nested_svm_exit_handled(struct vcpu_svm *svm); |
63129754 | 485 | int nested_svm_check_permissions(struct kvm_vcpu *vcpu); |
883b0a91 JR |
486 | int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, |
487 | bool has_error_code, u32 error_code); | |
883b0a91 | 488 | int nested_svm_exit_special(struct vcpu_svm *svm); |
5228eb96 ML |
489 | void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu); |
490 | void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier); | |
bb00bd9c VK |
491 | void nested_load_control_from_vmcb12(struct vcpu_svm *svm, |
492 | struct vmcb_control_area *control); | |
9e8f0fbf | 493 | void nested_sync_control_from_vmcb02(struct vcpu_svm *svm); |
4995a368 CA |
494 | void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm); |
495 | void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb); | |
883b0a91 | 496 | |
33b22172 PB |
497 | extern struct kvm_x86_nested_ops svm_nested_ops; |
498 | ||
ef0f6496 JR |
499 | /* avic.c */ |
500 | ||
501 | #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF) | |
502 | #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31 | |
503 | #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31) | |
504 | ||
505 | #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL) | |
506 | #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12) | |
507 | #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62) | |
508 | #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63) | |
509 | ||
510 | #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL | |
511 | ||
ef0f6496 JR |
512 | static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu) |
513 | { | |
514 | struct vcpu_svm *svm = to_svm(vcpu); | |
515 | u64 *entry = svm->avic_physical_id_cache; | |
516 | ||
517 | if (!entry) | |
518 | return false; | |
519 | ||
520 | return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK); | |
521 | } | |
522 | ||
523 | int avic_ga_log_notifier(u32 ga_tag); | |
524 | void avic_vm_destroy(struct kvm *kvm); | |
525 | int avic_vm_init(struct kvm *kvm); | |
526 | void avic_init_vmcb(struct vcpu_svm *svm); | |
63129754 PB |
527 | int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu); |
528 | int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu); | |
ef0f6496 JR |
529 | int avic_init_vcpu(struct vcpu_svm *svm); |
530 | void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | |
531 | void avic_vcpu_put(struct kvm_vcpu *vcpu); | |
532 | void avic_post_state_restore(struct kvm_vcpu *vcpu); | |
533 | void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu); | |
534 | void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu); | |
535 | bool svm_check_apicv_inhibit_reasons(ulong bit); | |
ef0f6496 JR |
536 | void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); |
537 | void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr); | |
538 | void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr); | |
539 | int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec); | |
540 | bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu); | |
541 | int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, | |
542 | uint32_t guest_irq, bool set); | |
543 | void svm_vcpu_blocking(struct kvm_vcpu *vcpu); | |
544 | void svm_vcpu_unblocking(struct kvm_vcpu *vcpu); | |
545 | ||
eaf78265 JR |
546 | /* sev.c */ |
547 | ||
b81fc74d BS |
548 | #define GHCB_VERSION_MAX 1ULL |
549 | #define GHCB_VERSION_MIN 1ULL | |
550 | ||
e1d71116 | 551 | |
eaf78265 JR |
552 | extern unsigned int max_sev_asid; |
553 | ||
eaf78265 JR |
554 | void sev_vm_destroy(struct kvm *kvm); |
555 | int svm_mem_enc_op(struct kvm *kvm, void __user *argp); | |
556 | int svm_register_enc_region(struct kvm *kvm, | |
557 | struct kvm_enc_region *range); | |
558 | int svm_unregister_enc_region(struct kvm *kvm, | |
559 | struct kvm_enc_region *range); | |
54526d1f | 560 | int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd); |
eaf78265 | 561 | void pre_sev_run(struct vcpu_svm *svm, int cpu); |
d9db0fd6 | 562 | void __init sev_set_cpu_caps(void); |
916391a2 | 563 | void __init sev_hardware_setup(void); |
eaf78265 | 564 | void sev_hardware_teardown(void); |
b95c221c | 565 | int sev_cpu_init(struct svm_cpu_data *sd); |
add5e2f0 | 566 | void sev_free_vcpu(struct kvm_vcpu *vcpu); |
63129754 | 567 | int sev_handle_vmgexit(struct kvm_vcpu *vcpu); |
7ed9abfe | 568 | int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); |
376c6d28 | 569 | void sev_es_init_vmcb(struct vcpu_svm *svm); |
9ebe530b | 570 | void sev_es_vcpu_reset(struct vcpu_svm *svm); |
647daca2 | 571 | void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); |
a7fc06dd | 572 | void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu); |
ce7ea0cf | 573 | void sev_es_unmap_ghcb(struct vcpu_svm *svm); |
eaf78265 | 574 | |
16809ecd TL |
575 | /* vmenter.S */ |
576 | ||
577 | void __svm_sev_es_vcpu_run(unsigned long vmcb_pa); | |
578 | void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs); | |
579 | ||
883b0a91 | 580 | #endif |