Commit | Line | Data |
---|---|---|
44a95dae SS |
1 | #define pr_fmt(fmt) "SVM: " fmt |
2 | ||
edf88417 AK |
3 | #include <linux/kvm_host.h> |
4 | ||
85f455f7 | 5 | #include "irq.h" |
1d737c8a | 6 | #include "mmu.h" |
5fdbf976 | 7 | #include "kvm_cache_regs.h" |
fe4c7b19 | 8 | #include "x86.h" |
66f7b72e | 9 | #include "cpuid.h" |
25462f7f | 10 | #include "pmu.h" |
e495606d | 11 | |
6aa8b732 | 12 | #include <linux/module.h> |
ae759544 | 13 | #include <linux/mod_devicetable.h> |
9d8f549d | 14 | #include <linux/kernel.h> |
6aa8b732 AK |
15 | #include <linux/vmalloc.h> |
16 | #include <linux/highmem.h> | |
ef0f6496 | 17 | #include <linux/amd-iommu.h> |
e8edc6e0 | 18 | #include <linux/sched.h> |
af658dca | 19 | #include <linux/trace_events.h> |
5a0e3ad6 | 20 | #include <linux/slab.h> |
5881f737 | 21 | #include <linux/hashtable.h> |
00089c04 | 22 | #include <linux/objtool.h> |
e9df0942 | 23 | #include <linux/psp-sev.h> |
1654efcb | 24 | #include <linux/file.h> |
89c50580 BS |
25 | #include <linux/pagemap.h> |
26 | #include <linux/swap.h> | |
33af3a7e | 27 | #include <linux/rwsem.h> |
6aa8b732 | 28 | |
8221c137 | 29 | #include <asm/apic.h> |
1018faa6 | 30 | #include <asm/perf_event.h> |
67ec6607 | 31 | #include <asm/tlbflush.h> |
e495606d | 32 | #include <asm/desc.h> |
facb0139 | 33 | #include <asm/debugreg.h> |
631bc487 | 34 | #include <asm/kvm_para.h> |
411b44ba | 35 | #include <asm/irq_remapping.h> |
28a27752 | 36 | #include <asm/spec-ctrl.h> |
ba5bade4 | 37 | #include <asm/cpu_device_id.h> |
f1c6366e | 38 | #include <asm/traps.h> |
6aa8b732 | 39 | |
63d1142f | 40 | #include <asm/virtext.h> |
229456fc | 41 | #include "trace.h" |
63d1142f | 42 | |
883b0a91 | 43 | #include "svm.h" |
35a78319 | 44 | #include "svm_ops.h" |
883b0a91 | 45 | |
1e0c7d40 VP |
46 | #include "kvm_onhyperv.h" |
47 | #include "svm_onhyperv.h" | |
48 | ||
4ecac3fd AK |
49 | #define __ex(x) __kvm_handle_fault_on_reboot(x) |
50 | ||
6aa8b732 AK |
51 | MODULE_AUTHOR("Qumranet"); |
52 | MODULE_LICENSE("GPL"); | |
53 | ||
575b255c | 54 | #ifdef MODULE |
ae759544 | 55 | static const struct x86_cpu_id svm_cpu_id[] = { |
320debe5 | 56 | X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL), |
ae759544 JT |
57 | {} |
58 | }; | |
59 | MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id); | |
575b255c | 60 | #endif |
ae759544 | 61 | |
6aa8b732 AK |
62 | #define SEG_TYPE_LDT 2 |
63 | #define SEG_TYPE_BUSY_TSS16 3 | |
64 | ||
6bc31bdc AP |
65 | #define SVM_FEATURE_LBRV (1 << 1) |
66 | #define SVM_FEATURE_SVML (1 << 2) | |
ddce97aa AP |
67 | #define SVM_FEATURE_TSC_RATE (1 << 4) |
68 | #define SVM_FEATURE_VMCB_CLEAN (1 << 5) | |
69 | #define SVM_FEATURE_FLUSH_ASID (1 << 6) | |
70 | #define SVM_FEATURE_DECODE_ASSIST (1 << 7) | |
6bc31bdc | 71 | #define SVM_FEATURE_PAUSE_FILTER (1 << 10) |
80b7706e | 72 | |
24e09cbf JR |
73 | #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) |
74 | ||
fbc0db76 | 75 | #define TSC_RATIO_RSVD 0xffffff0000000000ULL |
92a1f12d JR |
76 | #define TSC_RATIO_MIN 0x0000000000000001ULL |
77 | #define TSC_RATIO_MAX 0x000000ffffffffffULL | |
fbc0db76 | 78 | |
67ec6607 JR |
79 | static bool erratum_383_found __read_mostly; |
80 | ||
883b0a91 | 81 | u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; |
323c3d80 | 82 | |
2b036c6b BO |
83 | /* |
84 | * Set osvw_len to higher value when updated Revision Guides | |
85 | * are published and we know what the new status bits are | |
86 | */ | |
87 | static uint64_t osvw_len = 4, osvw_status; | |
88 | ||
fbc0db76 JR |
89 | static DEFINE_PER_CPU(u64, current_tsc_ratio); |
90 | #define TSC_RATIO_DEFAULT 0x0100000000ULL | |
91 | ||
09941fbb | 92 | static const struct svm_direct_access_msrs { |
ac72a9b7 | 93 | u32 index; /* Index of the MSR */ |
376c6d28 | 94 | bool always; /* True if intercept is initially cleared */ |
fd6fa73d | 95 | } direct_access_msrs[MAX_DIRECT_ACCESS_MSRS] = { |
8c06585d | 96 | { .index = MSR_STAR, .always = true }, |
ac72a9b7 | 97 | { .index = MSR_IA32_SYSENTER_CS, .always = true }, |
adc2a237 ML |
98 | { .index = MSR_IA32_SYSENTER_EIP, .always = false }, |
99 | { .index = MSR_IA32_SYSENTER_ESP, .always = false }, | |
ac72a9b7 JR |
100 | #ifdef CONFIG_X86_64 |
101 | { .index = MSR_GS_BASE, .always = true }, | |
102 | { .index = MSR_FS_BASE, .always = true }, | |
103 | { .index = MSR_KERNEL_GS_BASE, .always = true }, | |
104 | { .index = MSR_LSTAR, .always = true }, | |
105 | { .index = MSR_CSTAR, .always = true }, | |
106 | { .index = MSR_SYSCALL_MASK, .always = true }, | |
107 | #endif | |
b2ac58f9 | 108 | { .index = MSR_IA32_SPEC_CTRL, .always = false }, |
15d45071 | 109 | { .index = MSR_IA32_PRED_CMD, .always = false }, |
ac72a9b7 JR |
110 | { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, |
111 | { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, | |
112 | { .index = MSR_IA32_LASTINTFROMIP, .always = false }, | |
113 | { .index = MSR_IA32_LASTINTTOIP, .always = false }, | |
376c6d28 TL |
114 | { .index = MSR_EFER, .always = false }, |
115 | { .index = MSR_IA32_CR_PAT, .always = false }, | |
116 | { .index = MSR_AMD64_SEV_ES_GHCB, .always = true }, | |
ac72a9b7 | 117 | { .index = MSR_INVALID, .always = false }, |
6c8166a7 AK |
118 | }; |
119 | ||
8566ac8b BM |
120 | /* |
121 | * These 2 parameters are used to config the controls for Pause-Loop Exiting: | |
122 | * pause_filter_count: On processors that support Pause filtering(indicated | |
123 | * by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter | |
124 | * count value. On VMRUN this value is loaded into an internal counter. | |
125 | * Each time a pause instruction is executed, this counter is decremented | |
126 | * until it reaches zero at which time a #VMEXIT is generated if pause | |
127 | * intercept is enabled. Refer to AMD APM Vol 2 Section 15.14.4 Pause | |
128 | * Intercept Filtering for more details. | |
129 | * This also indicate if ple logic enabled. | |
130 | * | |
131 | * pause_filter_thresh: In addition, some processor families support advanced | |
132 | * pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on | |
133 | * the amount of time a guest is allowed to execute in a pause loop. | |
134 | * In this mode, a 16-bit pause filter threshold field is added in the | |
135 | * VMCB. The threshold value is a cycle count that is used to reset the | |
136 | * pause counter. As with simple pause filtering, VMRUN loads the pause | |
137 | * count value from VMCB into an internal counter. Then, on each pause | |
138 | * instruction the hardware checks the elapsed number of cycles since | |
139 | * the most recent pause instruction against the pause filter threshold. | |
140 | * If the elapsed cycle count is greater than the pause filter threshold, | |
141 | * then the internal pause count is reloaded from the VMCB and execution | |
142 | * continues. If the elapsed cycle count is less than the pause filter | |
143 | * threshold, then the internal pause count is decremented. If the count | |
144 | * value is less than zero and PAUSE intercept is enabled, a #VMEXIT is | |
145 | * triggered. If advanced pause filtering is supported and pause filter | |
146 | * threshold field is set to zero, the filter will operate in the simpler, | |
147 | * count only mode. | |
148 | */ | |
149 | ||
150 | static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP; | |
151 | module_param(pause_filter_thresh, ushort, 0444); | |
152 | ||
153 | static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW; | |
154 | module_param(pause_filter_count, ushort, 0444); | |
155 | ||
156 | /* Default doubles per-vcpu window every exit. */ | |
157 | static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW; | |
158 | module_param(pause_filter_count_grow, ushort, 0444); | |
159 | ||
160 | /* Default resets per-vcpu window every exit to pause_filter_count. */ | |
161 | static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK; | |
162 | module_param(pause_filter_count_shrink, ushort, 0444); | |
163 | ||
164 | /* Default is to compute the maximum so we can never overflow. */ | |
165 | static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX; | |
166 | module_param(pause_filter_count_max, ushort, 0444); | |
167 | ||
99840a75 SC |
168 | /* |
169 | * Use nested page tables by default. Note, NPT may get forced off by | |
170 | * svm_hardware_setup() if it's unsupported by hardware or the host kernel. | |
171 | */ | |
172 | bool npt_enabled = true; | |
173 | module_param_named(npt, npt_enabled, bool, 0444); | |
e3da3acd | 174 | |
e2358851 DB |
175 | /* allow nested virtualization in KVM/SVM */ |
176 | static int nested = true; | |
236de055 AG |
177 | module_param(nested, int, S_IRUGO); |
178 | ||
d647eb63 PB |
179 | /* enable/disable Next RIP Save */ |
180 | static int nrips = true; | |
181 | module_param(nrips, int, 0444); | |
182 | ||
89c8a498 JN |
183 | /* enable/disable Virtual VMLOAD VMSAVE */ |
184 | static int vls = true; | |
185 | module_param(vls, int, 0444); | |
186 | ||
640bd6e5 JN |
187 | /* enable/disable Virtual GIF */ |
188 | static int vgif = true; | |
189 | module_param(vgif, int, 0444); | |
5ea11f2b | 190 | |
fdf513e3 VK |
191 | /* |
192 | * enable / disable AVIC. Because the defaults differ for APICv | |
193 | * support between VMX and SVM we cannot use module_param_named. | |
194 | */ | |
195 | static bool avic; | |
196 | module_param(avic, bool, 0444); | |
197 | ||
291bd20d | 198 | bool __read_mostly dump_invalid_vmcb; |
6f2f8453 PB |
199 | module_param(dump_invalid_vmcb, bool, 0644); |
200 | ||
4b639a9f ML |
201 | |
202 | bool intercept_smi = true; | |
203 | module_param(intercept_smi, bool, 0444); | |
204 | ||
205 | ||
2e215216 | 206 | static bool svm_gp_erratum_intercept = true; |
82a11e9c | 207 | |
7607b717 BS |
208 | static u8 rsm_ins_bytes[] = "\x0f\xaa"; |
209 | ||
4866d5e3 | 210 | static unsigned long iopm_base; |
6aa8b732 AK |
211 | |
212 | struct kvm_ldttss_desc { | |
213 | u16 limit0; | |
214 | u16 base0; | |
e0231715 JR |
215 | unsigned base1:8, type:5, dpl:2, p:1; |
216 | unsigned limit1:4, zero0:3, g:1, base2:8; | |
6aa8b732 AK |
217 | u32 base3; |
218 | u32 zero1; | |
219 | } __attribute__((packed)); | |
220 | ||
eaf78265 | 221 | DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); |
6aa8b732 | 222 | |
844d69c2 SC |
223 | /* |
224 | * Only MSR_TSC_AUX is switched via the user return hook. EFER is switched via | |
225 | * the VMCB, and the SYSCALL/SYSENTER MSRs are handled by VMLOAD/VMSAVE. | |
226 | * | |
227 | * RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to | |
228 | * defer the restoration of TSC_AUX until the CPU returns to userspace. | |
229 | */ | |
0caa0a77 | 230 | static int tsc_aux_uret_slot __read_mostly = -1; |
844d69c2 | 231 | |
09941fbb | 232 | static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; |
6aa8b732 | 233 | |
9d8f549d | 234 | #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges) |
6aa8b732 AK |
235 | #define MSRS_RANGE_SIZE 2048 |
236 | #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2) | |
237 | ||
883b0a91 | 238 | u32 svm_msrpm_offset(u32 msr) |
455716fa JR |
239 | { |
240 | u32 offset; | |
241 | int i; | |
242 | ||
243 | for (i = 0; i < NUM_MSR_MAPS; i++) { | |
244 | if (msr < msrpm_ranges[i] || | |
245 | msr >= msrpm_ranges[i] + MSRS_IN_RANGE) | |
246 | continue; | |
247 | ||
248 | offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */ | |
249 | offset += (i * MSRS_RANGE_SIZE); /* add range offset */ | |
250 | ||
251 | /* Now we have the u8 offset - but need the u32 offset */ | |
252 | return offset / 4; | |
253 | } | |
254 | ||
255 | /* MSR not in any range */ | |
256 | return MSR_INVALID; | |
257 | } | |
258 | ||
6aa8b732 AK |
259 | #define MAX_INST_SIZE 15 |
260 | ||
d468d94b | 261 | static int get_max_npt_level(void) |
4b16184c JR |
262 | { |
263 | #ifdef CONFIG_X86_64 | |
2a7266a8 | 264 | return PT64_ROOT_4LEVEL; |
4b16184c JR |
265 | #else |
266 | return PT32E_ROOT_LEVEL; | |
267 | #endif | |
268 | } | |
269 | ||
72f211ec | 270 | int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) |
6aa8b732 | 271 | { |
c513f484 | 272 | struct vcpu_svm *svm = to_svm(vcpu); |
2fcf4876 | 273 | u64 old_efer = vcpu->arch.efer; |
6dc696d4 | 274 | vcpu->arch.efer = efer; |
9167ab79 PB |
275 | |
276 | if (!npt_enabled) { | |
277 | /* Shadow paging assumes NX to be available. */ | |
278 | efer |= EFER_NX; | |
279 | ||
280 | if (!(efer & EFER_LMA)) | |
281 | efer &= ~EFER_LME; | |
282 | } | |
6aa8b732 | 283 | |
2fcf4876 ML |
284 | if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) { |
285 | if (!(efer & EFER_SVME)) { | |
286 | svm_leave_nested(svm); | |
287 | svm_set_gif(svm, true); | |
82a11e9c BD |
288 | /* #GP intercept is still needed for vmware backdoor */ |
289 | if (!enable_vmware_backdoor) | |
290 | clr_exception_intercept(svm, GP_VECTOR); | |
2fcf4876 ML |
291 | |
292 | /* | |
293 | * Free the nested guest state, unless we are in SMM. | |
294 | * In this case we will return to the nested guest | |
295 | * as soon as we leave SMM. | |
296 | */ | |
63129754 | 297 | if (!is_smm(vcpu)) |
2fcf4876 ML |
298 | svm_free_nested(svm); |
299 | ||
300 | } else { | |
301 | int ret = svm_allocate_nested(svm); | |
302 | ||
303 | if (ret) { | |
304 | vcpu->arch.efer = old_efer; | |
305 | return ret; | |
306 | } | |
82a11e9c BD |
307 | |
308 | if (svm_gp_erratum_intercept) | |
309 | set_exception_intercept(svm, GP_VECTOR); | |
2fcf4876 | 310 | } |
c513f484 PB |
311 | } |
312 | ||
313 | svm->vmcb->save.efer = efer | EFER_SVME; | |
06e7852c | 314 | vmcb_mark_dirty(svm->vmcb, VMCB_CR); |
72f211ec | 315 | return 0; |
6aa8b732 AK |
316 | } |
317 | ||
6aa8b732 AK |
318 | static int is_external_interrupt(u32 info) |
319 | { | |
320 | info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID; | |
321 | return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); | |
322 | } | |
323 | ||
37ccdcbe | 324 | static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu) |
2809f5d2 GC |
325 | { |
326 | struct vcpu_svm *svm = to_svm(vcpu); | |
327 | u32 ret = 0; | |
328 | ||
329 | if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) | |
37ccdcbe PB |
330 | ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS; |
331 | return ret; | |
2809f5d2 GC |
332 | } |
333 | ||
334 | static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) | |
335 | { | |
336 | struct vcpu_svm *svm = to_svm(vcpu); | |
337 | ||
338 | if (mask == 0) | |
339 | svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; | |
340 | else | |
341 | svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; | |
342 | ||
343 | } | |
344 | ||
f8ea7c60 | 345 | static int skip_emulated_instruction(struct kvm_vcpu *vcpu) |
6aa8b732 | 346 | { |
a2fa3e9f GH |
347 | struct vcpu_svm *svm = to_svm(vcpu); |
348 | ||
f1c6366e TL |
349 | /* |
350 | * SEV-ES does not expose the next RIP. The RIP update is controlled by | |
351 | * the type of exit and the #VC handler in the guest. | |
352 | */ | |
353 | if (sev_es_guest(vcpu->kvm)) | |
354 | goto done; | |
355 | ||
d647eb63 | 356 | if (nrips && svm->vmcb->control.next_rip != 0) { |
d2922422 | 357 | WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS)); |
6bc31bdc | 358 | svm->next_rip = svm->vmcb->control.next_rip; |
f104765b | 359 | } |
6bc31bdc | 360 | |
1957aa63 SC |
361 | if (!svm->next_rip) { |
362 | if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP)) | |
363 | return 0; | |
364 | } else { | |
1957aa63 SC |
365 | kvm_rip_write(vcpu, svm->next_rip); |
366 | } | |
f1c6366e TL |
367 | |
368 | done: | |
2809f5d2 | 369 | svm_set_interrupt_shadow(vcpu, 0); |
f8ea7c60 | 370 | |
60fc3d02 | 371 | return 1; |
6aa8b732 AK |
372 | } |
373 | ||
cfcd20e5 | 374 | static void svm_queue_exception(struct kvm_vcpu *vcpu) |
116a4752 JK |
375 | { |
376 | struct vcpu_svm *svm = to_svm(vcpu); | |
cfcd20e5 WL |
377 | unsigned nr = vcpu->arch.exception.nr; |
378 | bool has_error_code = vcpu->arch.exception.has_error_code; | |
cfcd20e5 | 379 | u32 error_code = vcpu->arch.exception.error_code; |
116a4752 | 380 | |
63129754 | 381 | kvm_deliver_exception_payload(vcpu); |
da998b46 | 382 | |
d647eb63 | 383 | if (nr == BP_VECTOR && !nrips) { |
63129754 | 384 | unsigned long rip, old_rip = kvm_rip_read(vcpu); |
66b7138f JK |
385 | |
386 | /* | |
387 | * For guest debugging where we have to reinject #BP if some | |
388 | * INT3 is guest-owned: | |
389 | * Emulate nRIP by moving RIP forward. Will fail if injection | |
390 | * raises a fault that is not intercepted. Still better than | |
391 | * failing in all cases. | |
392 | */ | |
63129754 PB |
393 | (void)skip_emulated_instruction(vcpu); |
394 | rip = kvm_rip_read(vcpu); | |
66b7138f JK |
395 | svm->int3_rip = rip + svm->vmcb->save.cs.base; |
396 | svm->int3_injected = rip - old_rip; | |
397 | } | |
398 | ||
116a4752 JK |
399 | svm->vmcb->control.event_inj = nr |
400 | | SVM_EVTINJ_VALID | |
401 | | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0) | |
402 | | SVM_EVTINJ_TYPE_EXEPT; | |
403 | svm->vmcb->control.event_inj_err = error_code; | |
404 | } | |
405 | ||
67ec6607 JR |
406 | static void svm_init_erratum_383(void) |
407 | { | |
408 | u32 low, high; | |
409 | int err; | |
410 | u64 val; | |
411 | ||
e6ee94d5 | 412 | if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH)) |
67ec6607 JR |
413 | return; |
414 | ||
415 | /* Use _safe variants to not break nested virtualization */ | |
416 | val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err); | |
417 | if (err) | |
418 | return; | |
419 | ||
420 | val |= (1ULL << 47); | |
421 | ||
422 | low = lower_32_bits(val); | |
423 | high = upper_32_bits(val); | |
424 | ||
425 | native_write_msr_safe(MSR_AMD64_DC_CFG, low, high); | |
426 | ||
427 | erratum_383_found = true; | |
428 | } | |
429 | ||
2b036c6b BO |
430 | static void svm_init_osvw(struct kvm_vcpu *vcpu) |
431 | { | |
432 | /* | |
433 | * Guests should see errata 400 and 415 as fixed (assuming that | |
434 | * HLT and IO instructions are intercepted). | |
435 | */ | |
436 | vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3; | |
437 | vcpu->arch.osvw.status = osvw_status & ~(6ULL); | |
438 | ||
439 | /* | |
440 | * By increasing VCPU's osvw.length to 3 we are telling the guest that | |
441 | * all osvw.status bits inside that length, including bit 0 (which is | |
442 | * reserved for erratum 298), are valid. However, if host processor's | |
443 | * osvw_len is 0 then osvw_status[0] carries no information. We need to | |
444 | * be conservative here and therefore we tell the guest that erratum 298 | |
445 | * is present (because we really don't know). | |
446 | */ | |
447 | if (osvw_len == 0 && boot_cpu_data.x86 == 0x10) | |
448 | vcpu->arch.osvw.status |= 1; | |
449 | } | |
450 | ||
6aa8b732 AK |
451 | static int has_svm(void) |
452 | { | |
63d1142f | 453 | const char *msg; |
6aa8b732 | 454 | |
63d1142f | 455 | if (!cpu_has_svm(&msg)) { |
ff81ff10 | 456 | printk(KERN_INFO "has_svm: %s\n", msg); |
6aa8b732 AK |
457 | return 0; |
458 | } | |
459 | ||
ccd85d90 SC |
460 | if (sev_active()) { |
461 | pr_info("KVM is unsupported when running as an SEV guest\n"); | |
462 | return 0; | |
463 | } | |
464 | ||
03ca4589 SC |
465 | if (pgtable_l5_enabled()) { |
466 | pr_info("KVM doesn't yet support 5-level paging on AMD SVM\n"); | |
467 | return 0; | |
468 | } | |
469 | ||
6aa8b732 AK |
470 | return 1; |
471 | } | |
472 | ||
13a34e06 | 473 | static void svm_hardware_disable(void) |
6aa8b732 | 474 | { |
fbc0db76 JR |
475 | /* Make sure we clean up behind us */ |
476 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) | |
477 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); | |
478 | ||
2c8dceeb | 479 | cpu_svm_disable(); |
1018faa6 JR |
480 | |
481 | amd_pmu_disable_virt(); | |
6aa8b732 AK |
482 | } |
483 | ||
13a34e06 | 484 | static int svm_hardware_enable(void) |
6aa8b732 AK |
485 | { |
486 | ||
0fe1e009 | 487 | struct svm_cpu_data *sd; |
6aa8b732 | 488 | uint64_t efer; |
6aa8b732 AK |
489 | struct desc_struct *gdt; |
490 | int me = raw_smp_processor_id(); | |
491 | ||
10474ae8 AG |
492 | rdmsrl(MSR_EFER, efer); |
493 | if (efer & EFER_SVME) | |
494 | return -EBUSY; | |
495 | ||
6aa8b732 | 496 | if (!has_svm()) { |
1f5b77f5 | 497 | pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me); |
10474ae8 | 498 | return -EINVAL; |
6aa8b732 | 499 | } |
0fe1e009 | 500 | sd = per_cpu(svm_data, me); |
0fe1e009 | 501 | if (!sd) { |
1f5b77f5 | 502 | pr_err("%s: svm_data is NULL on %d\n", __func__, me); |
10474ae8 | 503 | return -EINVAL; |
6aa8b732 AK |
504 | } |
505 | ||
0fe1e009 TH |
506 | sd->asid_generation = 1; |
507 | sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; | |
508 | sd->next_asid = sd->max_asid + 1; | |
ed3cd233 | 509 | sd->min_asid = max_sev_asid + 1; |
6aa8b732 | 510 | |
45fc8757 | 511 | gdt = get_current_gdt_rw(); |
0fe1e009 | 512 | sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); |
6aa8b732 | 513 | |
9962d032 | 514 | wrmsrl(MSR_EFER, efer | EFER_SVME); |
6aa8b732 | 515 | |
85ca8be9 | 516 | wrmsrl(MSR_VM_HSAVE_PA, __sme_page_pa(sd->save_area)); |
10474ae8 | 517 | |
fbc0db76 JR |
518 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { |
519 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); | |
89cbc767 | 520 | __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT); |
fbc0db76 JR |
521 | } |
522 | ||
2b036c6b BO |
523 | |
524 | /* | |
525 | * Get OSVW bits. | |
526 | * | |
527 | * Note that it is possible to have a system with mixed processor | |
528 | * revisions and therefore different OSVW bits. If bits are not the same | |
529 | * on different processors then choose the worst case (i.e. if erratum | |
530 | * is present on one processor and not on another then assume that the | |
531 | * erratum is present everywhere). | |
532 | */ | |
533 | if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) { | |
534 | uint64_t len, status = 0; | |
535 | int err; | |
536 | ||
537 | len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err); | |
538 | if (!err) | |
539 | status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, | |
540 | &err); | |
541 | ||
542 | if (err) | |
543 | osvw_status = osvw_len = 0; | |
544 | else { | |
545 | if (len < osvw_len) | |
546 | osvw_len = len; | |
547 | osvw_status |= status; | |
548 | osvw_status &= (1ULL << osvw_len) - 1; | |
549 | } | |
550 | } else | |
551 | osvw_status = osvw_len = 0; | |
552 | ||
67ec6607 JR |
553 | svm_init_erratum_383(); |
554 | ||
1018faa6 JR |
555 | amd_pmu_enable_virt(); |
556 | ||
10474ae8 | 557 | return 0; |
6aa8b732 AK |
558 | } |
559 | ||
0da1db75 JR |
560 | static void svm_cpu_uninit(int cpu) |
561 | { | |
a2b2d4bf | 562 | struct svm_cpu_data *sd = per_cpu(svm_data, cpu); |
0da1db75 | 563 | |
0fe1e009 | 564 | if (!sd) |
0da1db75 JR |
565 | return; |
566 | ||
a2b2d4bf | 567 | per_cpu(svm_data, cpu) = NULL; |
70cd94e6 | 568 | kfree(sd->sev_vmcbs); |
0fe1e009 TH |
569 | __free_page(sd->save_area); |
570 | kfree(sd); | |
0da1db75 JR |
571 | } |
572 | ||
6aa8b732 AK |
573 | static int svm_cpu_init(int cpu) |
574 | { | |
0fe1e009 | 575 | struct svm_cpu_data *sd; |
b95c221c | 576 | int ret = -ENOMEM; |
6aa8b732 | 577 | |
0fe1e009 TH |
578 | sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); |
579 | if (!sd) | |
b95c221c | 580 | return ret; |
0fe1e009 | 581 | sd->cpu = cpu; |
70cd94e6 | 582 | sd->save_area = alloc_page(GFP_KERNEL); |
0fe1e009 | 583 | if (!sd->save_area) |
d80b64ff | 584 | goto free_cpu_data; |
b95c221c | 585 | |
85ca8be9 | 586 | clear_page(page_address(sd->save_area)); |
6aa8b732 | 587 | |
b95c221c SC |
588 | ret = sev_cpu_init(sd); |
589 | if (ret) | |
590 | goto free_save_area; | |
70cd94e6 | 591 | |
0fe1e009 | 592 | per_cpu(svm_data, cpu) = sd; |
6aa8b732 AK |
593 | |
594 | return 0; | |
595 | ||
d80b64ff ML |
596 | free_save_area: |
597 | __free_page(sd->save_area); | |
598 | free_cpu_data: | |
0fe1e009 | 599 | kfree(sd); |
b95c221c | 600 | return ret; |
6aa8b732 AK |
601 | |
602 | } | |
603 | ||
fd6fa73d | 604 | static int direct_access_msr_slot(u32 msr) |
ac72a9b7 | 605 | { |
fd6fa73d | 606 | u32 i; |
ac72a9b7 JR |
607 | |
608 | for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) | |
fd6fa73d AG |
609 | if (direct_access_msrs[i].index == msr) |
610 | return i; | |
ac72a9b7 | 611 | |
fd6fa73d AG |
612 | return -ENOENT; |
613 | } | |
614 | ||
615 | static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read, | |
616 | int write) | |
617 | { | |
618 | struct vcpu_svm *svm = to_svm(vcpu); | |
619 | int slot = direct_access_msr_slot(msr); | |
620 | ||
621 | if (slot == -ENOENT) | |
622 | return; | |
623 | ||
624 | /* Set the shadow bitmaps to the desired intercept states */ | |
625 | if (read) | |
626 | set_bit(slot, svm->shadow_msr_intercept.read); | |
627 | else | |
628 | clear_bit(slot, svm->shadow_msr_intercept.read); | |
629 | ||
630 | if (write) | |
631 | set_bit(slot, svm->shadow_msr_intercept.write); | |
632 | else | |
633 | clear_bit(slot, svm->shadow_msr_intercept.write); | |
ac72a9b7 JR |
634 | } |
635 | ||
fd6fa73d AG |
636 | static bool valid_msr_intercept(u32 index) |
637 | { | |
638 | return direct_access_msr_slot(index) != -ENOENT; | |
ac72a9b7 JR |
639 | } |
640 | ||
476c9bd8 | 641 | static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr) |
b2ac58f9 KA |
642 | { |
643 | u8 bit_write; | |
644 | unsigned long tmp; | |
645 | u32 offset; | |
646 | u32 *msrpm; | |
647 | ||
648 | msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm: | |
649 | to_svm(vcpu)->msrpm; | |
650 | ||
651 | offset = svm_msrpm_offset(msr); | |
652 | bit_write = 2 * (msr & 0x0f) + 1; | |
653 | tmp = msrpm[offset]; | |
654 | ||
655 | BUG_ON(offset == MSR_INVALID); | |
656 | ||
657 | return !!test_bit(bit_write, &tmp); | |
658 | } | |
659 | ||
fd6fa73d AG |
660 | static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm, |
661 | u32 msr, int read, int write) | |
6aa8b732 | 662 | { |
455716fa JR |
663 | u8 bit_read, bit_write; |
664 | unsigned long tmp; | |
665 | u32 offset; | |
6aa8b732 | 666 | |
ac72a9b7 JR |
667 | /* |
668 | * If this warning triggers extend the direct_access_msrs list at the | |
669 | * beginning of the file | |
670 | */ | |
671 | WARN_ON(!valid_msr_intercept(msr)); | |
672 | ||
fd6fa73d AG |
673 | /* Enforce non allowed MSRs to trap */ |
674 | if (read && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) | |
675 | read = 0; | |
676 | ||
677 | if (write && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) | |
678 | write = 0; | |
679 | ||
455716fa JR |
680 | offset = svm_msrpm_offset(msr); |
681 | bit_read = 2 * (msr & 0x0f); | |
682 | bit_write = 2 * (msr & 0x0f) + 1; | |
683 | tmp = msrpm[offset]; | |
684 | ||
685 | BUG_ON(offset == MSR_INVALID); | |
686 | ||
687 | read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp); | |
688 | write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp); | |
689 | ||
690 | msrpm[offset] = tmp; | |
c4327f15 VP |
691 | |
692 | svm_hv_vmcb_dirty_nested_enlightenments(vcpu); | |
693 | ||
6aa8b732 AK |
694 | } |
695 | ||
376c6d28 TL |
696 | void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, |
697 | int read, int write) | |
6aa8b732 | 698 | { |
fd6fa73d AG |
699 | set_shadow_msr_intercept(vcpu, msr, read, write); |
700 | set_msr_interception_bitmap(vcpu, msrpm, msr, read, write); | |
701 | } | |
702 | ||
2fcf4876 | 703 | u32 *svm_vcpu_alloc_msrpm(void) |
6aa8b732 | 704 | { |
47903dc1 KS |
705 | unsigned int order = get_order(MSRPM_SIZE); |
706 | struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, order); | |
476c9bd8 | 707 | u32 *msrpm; |
f4c847a9 ML |
708 | |
709 | if (!pages) | |
710 | return NULL; | |
6aa8b732 | 711 | |
f4c847a9 | 712 | msrpm = page_address(pages); |
47903dc1 | 713 | memset(msrpm, 0xff, PAGE_SIZE * (1 << order)); |
f65c229c | 714 | |
476c9bd8 AL |
715 | return msrpm; |
716 | } | |
717 | ||
2fcf4876 | 718 | void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm) |
476c9bd8 AL |
719 | { |
720 | int i; | |
721 | ||
ac72a9b7 JR |
722 | for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { |
723 | if (!direct_access_msrs[i].always) | |
724 | continue; | |
476c9bd8 | 725 | set_msr_interception(vcpu, msrpm, direct_access_msrs[i].index, 1, 1); |
ac72a9b7 | 726 | } |
f4c847a9 | 727 | } |
ac72a9b7 | 728 | |
2fcf4876 ML |
729 | |
730 | void svm_vcpu_free_msrpm(u32 *msrpm) | |
f4c847a9 | 731 | { |
47903dc1 | 732 | __free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE)); |
f65c229c JR |
733 | } |
734 | ||
fd6fa73d AG |
735 | static void svm_msr_filter_changed(struct kvm_vcpu *vcpu) |
736 | { | |
737 | struct vcpu_svm *svm = to_svm(vcpu); | |
738 | u32 i; | |
739 | ||
740 | /* | |
741 | * Set intercept permissions for all direct access MSRs again. They | |
742 | * will automatically get filtered through the MSR filter, so we are | |
743 | * back in sync after this. | |
744 | */ | |
745 | for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { | |
746 | u32 msr = direct_access_msrs[i].index; | |
747 | u32 read = test_bit(i, svm->shadow_msr_intercept.read); | |
748 | u32 write = test_bit(i, svm->shadow_msr_intercept.write); | |
749 | ||
750 | set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write); | |
ac72a9b7 | 751 | } |
f65c229c JR |
752 | } |
753 | ||
323c3d80 JR |
754 | static void add_msr_offset(u32 offset) |
755 | { | |
756 | int i; | |
757 | ||
758 | for (i = 0; i < MSRPM_OFFSETS; ++i) { | |
759 | ||
760 | /* Offset already in list? */ | |
761 | if (msrpm_offsets[i] == offset) | |
bfc733a7 | 762 | return; |
323c3d80 JR |
763 | |
764 | /* Slot used by another offset? */ | |
765 | if (msrpm_offsets[i] != MSR_INVALID) | |
766 | continue; | |
767 | ||
768 | /* Add offset to list */ | |
769 | msrpm_offsets[i] = offset; | |
770 | ||
771 | return; | |
6aa8b732 | 772 | } |
323c3d80 JR |
773 | |
774 | /* | |
775 | * If this BUG triggers the msrpm_offsets table has an overflow. Just | |
776 | * increase MSRPM_OFFSETS in this case. | |
777 | */ | |
bfc733a7 | 778 | BUG(); |
6aa8b732 AK |
779 | } |
780 | ||
323c3d80 | 781 | static void init_msrpm_offsets(void) |
f65c229c | 782 | { |
323c3d80 | 783 | int i; |
f65c229c | 784 | |
323c3d80 JR |
785 | memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets)); |
786 | ||
787 | for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { | |
788 | u32 offset; | |
789 | ||
790 | offset = svm_msrpm_offset(direct_access_msrs[i].index); | |
791 | BUG_ON(offset == MSR_INVALID); | |
792 | ||
793 | add_msr_offset(offset); | |
794 | } | |
f65c229c JR |
795 | } |
796 | ||
476c9bd8 | 797 | static void svm_enable_lbrv(struct kvm_vcpu *vcpu) |
24e09cbf | 798 | { |
476c9bd8 | 799 | struct vcpu_svm *svm = to_svm(vcpu); |
24e09cbf | 800 | |
0dc92119 | 801 | svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; |
476c9bd8 AL |
802 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1); |
803 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1); | |
804 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); | |
805 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1); | |
24e09cbf JR |
806 | } |
807 | ||
476c9bd8 | 808 | static void svm_disable_lbrv(struct kvm_vcpu *vcpu) |
24e09cbf | 809 | { |
476c9bd8 | 810 | struct vcpu_svm *svm = to_svm(vcpu); |
24e09cbf | 811 | |
0dc92119 | 812 | svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK; |
476c9bd8 AL |
813 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0); |
814 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0); | |
815 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0); | |
816 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0); | |
24e09cbf JR |
817 | } |
818 | ||
883b0a91 | 819 | void disable_nmi_singlestep(struct vcpu_svm *svm) |
4aebd0e9 LP |
820 | { |
821 | svm->nmi_singlestep = false; | |
640bd6e5 | 822 | |
ab2f4d73 LP |
823 | if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) { |
824 | /* Clear our flags if they were not set by the guest */ | |
825 | if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) | |
826 | svm->vmcb->save.rflags &= ~X86_EFLAGS_TF; | |
827 | if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) | |
828 | svm->vmcb->save.rflags &= ~X86_EFLAGS_RF; | |
829 | } | |
4aebd0e9 LP |
830 | } |
831 | ||
8566ac8b BM |
832 | static void grow_ple_window(struct kvm_vcpu *vcpu) |
833 | { | |
834 | struct vcpu_svm *svm = to_svm(vcpu); | |
835 | struct vmcb_control_area *control = &svm->vmcb->control; | |
836 | int old = control->pause_filter_count; | |
837 | ||
838 | control->pause_filter_count = __grow_ple_window(old, | |
839 | pause_filter_count, | |
840 | pause_filter_count_grow, | |
841 | pause_filter_count_max); | |
842 | ||
4f75bcc3 | 843 | if (control->pause_filter_count != old) { |
06e7852c | 844 | vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
4f75bcc3 PX |
845 | trace_kvm_ple_window_update(vcpu->vcpu_id, |
846 | control->pause_filter_count, old); | |
847 | } | |
8566ac8b BM |
848 | } |
849 | ||
850 | static void shrink_ple_window(struct kvm_vcpu *vcpu) | |
851 | { | |
852 | struct vcpu_svm *svm = to_svm(vcpu); | |
853 | struct vmcb_control_area *control = &svm->vmcb->control; | |
854 | int old = control->pause_filter_count; | |
855 | ||
856 | control->pause_filter_count = | |
857 | __shrink_ple_window(old, | |
858 | pause_filter_count, | |
859 | pause_filter_count_shrink, | |
860 | pause_filter_count); | |
4f75bcc3 | 861 | if (control->pause_filter_count != old) { |
06e7852c | 862 | vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
4f75bcc3 PX |
863 | trace_kvm_ple_window_update(vcpu->vcpu_id, |
864 | control->pause_filter_count, old); | |
865 | } | |
8566ac8b BM |
866 | } |
867 | ||
52918ed5 TL |
868 | /* |
869 | * The default MMIO mask is a single bit (excluding the present bit), | |
870 | * which could conflict with the memory encryption bit. Check for | |
871 | * memory encryption support and override the default MMIO mask if | |
872 | * memory encryption is enabled. | |
873 | */ | |
874 | static __init void svm_adjust_mmio_mask(void) | |
875 | { | |
876 | unsigned int enc_bit, mask_bit; | |
877 | u64 msr, mask; | |
878 | ||
879 | /* If there is no memory encryption support, use existing mask */ | |
880 | if (cpuid_eax(0x80000000) < 0x8000001f) | |
881 | return; | |
882 | ||
883 | /* If memory encryption is not enabled, use existing mask */ | |
059e5c32 BS |
884 | rdmsrl(MSR_AMD64_SYSCFG, msr); |
885 | if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) | |
52918ed5 TL |
886 | return; |
887 | ||
888 | enc_bit = cpuid_ebx(0x8000001f) & 0x3f; | |
889 | mask_bit = boot_cpu_data.x86_phys_bits; | |
890 | ||
891 | /* Increment the mask bit if it is the same as the encryption bit */ | |
892 | if (enc_bit == mask_bit) | |
893 | mask_bit++; | |
894 | ||
895 | /* | |
896 | * If the mask bit location is below 52, then some bits above the | |
897 | * physical addressing limit will always be reserved, so use the | |
898 | * rsvd_bits() function to generate the mask. This mask, along with | |
899 | * the present bit, will be used to generate a page fault with | |
900 | * PFER.RSV = 1. | |
901 | * | |
902 | * If the mask bit location is 52 (or above), then clear the mask. | |
903 | */ | |
904 | mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0; | |
905 | ||
8120337a | 906 | kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK); |
52918ed5 TL |
907 | } |
908 | ||
dd58f3c9 LR |
909 | static void svm_hardware_teardown(void) |
910 | { | |
911 | int cpu; | |
912 | ||
4cafd0c5 | 913 | sev_hardware_teardown(); |
dd58f3c9 LR |
914 | |
915 | for_each_possible_cpu(cpu) | |
916 | svm_cpu_uninit(cpu); | |
917 | ||
47903dc1 KS |
918 | __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), |
919 | get_order(IOPM_SIZE)); | |
dd58f3c9 LR |
920 | iopm_base = 0; |
921 | } | |
922 | ||
9b58b985 SC |
923 | static __init void svm_set_cpu_caps(void) |
924 | { | |
925 | kvm_set_cpu_caps(); | |
926 | ||
408e9a31 PB |
927 | supported_xss = 0; |
928 | ||
a50718cc SC |
929 | /* CPUID 0x80000001 and 0x8000000A (SVM features) */ |
930 | if (nested) { | |
9b58b985 SC |
931 | kvm_cpu_cap_set(X86_FEATURE_SVM); |
932 | ||
4eb87460 | 933 | if (nrips) |
a50718cc SC |
934 | kvm_cpu_cap_set(X86_FEATURE_NRIPS); |
935 | ||
936 | if (npt_enabled) | |
937 | kvm_cpu_cap_set(X86_FEATURE_NPT); | |
14c2bf81 WH |
938 | |
939 | /* Nested VM can receive #VMEXIT instead of triggering #GP */ | |
940 | kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK); | |
a50718cc SC |
941 | } |
942 | ||
93c380e7 SC |
943 | /* CPUID 0x80000008 */ |
944 | if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) || | |
945 | boot_cpu_has(X86_FEATURE_AMD_SSBD)) | |
946 | kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD); | |
d9db0fd6 PB |
947 | |
948 | /* CPUID 0x8000001F (SME/SEV features) */ | |
949 | sev_set_cpu_caps(); | |
9b58b985 SC |
950 | } |
951 | ||
6aa8b732 AK |
952 | static __init int svm_hardware_setup(void) |
953 | { | |
954 | int cpu; | |
955 | struct page *iopm_pages; | |
f65c229c | 956 | void *iopm_va; |
6aa8b732 | 957 | int r; |
47903dc1 | 958 | unsigned int order = get_order(IOPM_SIZE); |
6aa8b732 | 959 | |
b26a71a1 SC |
960 | /* |
961 | * NX is required for shadow paging and for NPT if the NX huge pages | |
962 | * mitigation is enabled. | |
963 | */ | |
964 | if (!boot_cpu_has(X86_FEATURE_NX)) { | |
965 | pr_err_ratelimited("NX (Execute Disable) not supported\n"); | |
966 | return -EOPNOTSUPP; | |
967 | } | |
968 | kvm_enable_efer_bits(EFER_NX); | |
969 | ||
47903dc1 | 970 | iopm_pages = alloc_pages(GFP_KERNEL, order); |
6aa8b732 AK |
971 | |
972 | if (!iopm_pages) | |
973 | return -ENOMEM; | |
c8681339 AL |
974 | |
975 | iopm_va = page_address(iopm_pages); | |
47903dc1 | 976 | memset(iopm_va, 0xff, PAGE_SIZE * (1 << order)); |
6aa8b732 AK |
977 | iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; |
978 | ||
323c3d80 JR |
979 | init_msrpm_offsets(); |
980 | ||
cfc48181 SC |
981 | supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); |
982 | ||
1b2fd70c AG |
983 | if (boot_cpu_has(X86_FEATURE_FXSR_OPT)) |
984 | kvm_enable_efer_bits(EFER_FFXSR); | |
985 | ||
92a1f12d | 986 | if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) { |
92a1f12d | 987 | kvm_has_tsc_control = true; |
bc9b961b HZ |
988 | kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX; |
989 | kvm_tsc_scaling_ratio_frac_bits = 32; | |
92a1f12d JR |
990 | } |
991 | ||
e5fda4bb | 992 | tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX); |
844d69c2 | 993 | |
8566ac8b BM |
994 | /* Check for pause filtering support */ |
995 | if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) { | |
996 | pause_filter_count = 0; | |
997 | pause_filter_thresh = 0; | |
998 | } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) { | |
999 | pause_filter_thresh = 0; | |
1000 | } | |
1001 | ||
236de055 AG |
1002 | if (nested) { |
1003 | printk(KERN_INFO "kvm: Nested Virtualization enabled\n"); | |
eec4b140 | 1004 | kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE); |
236de055 AG |
1005 | } |
1006 | ||
99840a75 SC |
1007 | /* |
1008 | * KVM's MMU doesn't support using 2-level paging for itself, and thus | |
1009 | * NPT isn't supported if the host is using 2-level paging since host | |
1010 | * CR4 is unchanged on VMRUN. | |
1011 | */ | |
1012 | if (!IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_X86_PAE)) | |
e3da3acd JR |
1013 | npt_enabled = false; |
1014 | ||
99840a75 | 1015 | if (!boot_cpu_has(X86_FEATURE_NPT)) |
6c7dac72 | 1016 | npt_enabled = false; |
6c7dac72 | 1017 | |
83013059 | 1018 | kvm_configure_mmu(npt_enabled, get_max_npt_level(), PG_LEVEL_1G); |
213e0e1f | 1019 | pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis"); |
e3da3acd | 1020 | |
e8126bda SC |
1021 | /* Note, SEV setup consumes npt_enabled. */ |
1022 | sev_hardware_setup(); | |
fa13680f | 1023 | |
1e0c7d40 VP |
1024 | svm_hv_hardware_setup(); |
1025 | ||
fa13680f SC |
1026 | svm_adjust_mmio_mask(); |
1027 | ||
1028 | for_each_possible_cpu(cpu) { | |
1029 | r = svm_cpu_init(cpu); | |
1030 | if (r) | |
1031 | goto err; | |
1032 | } | |
1033 | ||
d647eb63 PB |
1034 | if (nrips) { |
1035 | if (!boot_cpu_has(X86_FEATURE_NRIPS)) | |
1036 | nrips = false; | |
1037 | } | |
1038 | ||
fdf513e3 | 1039 | enable_apicv = avic = avic && npt_enabled && boot_cpu_has(X86_FEATURE_AVIC); |
5881f737 | 1040 | |
fdf513e3 VK |
1041 | if (enable_apicv) { |
1042 | pr_info("AVIC enabled\n"); | |
1043 | ||
1044 | amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier); | |
5b8abf1f | 1045 | } |
44a95dae | 1046 | |
89c8a498 JN |
1047 | if (vls) { |
1048 | if (!npt_enabled || | |
5442c269 | 1049 | !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) || |
89c8a498 JN |
1050 | !IS_ENABLED(CONFIG_X86_64)) { |
1051 | vls = false; | |
1052 | } else { | |
1053 | pr_info("Virtual VMLOAD VMSAVE supported\n"); | |
1054 | } | |
1055 | } | |
1056 | ||
3b9c723e WH |
1057 | if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK)) |
1058 | svm_gp_erratum_intercept = false; | |
1059 | ||
640bd6e5 JN |
1060 | if (vgif) { |
1061 | if (!boot_cpu_has(X86_FEATURE_VGIF)) | |
1062 | vgif = false; | |
1063 | else | |
1064 | pr_info("Virtual GIF supported\n"); | |
1065 | } | |
1066 | ||
9b58b985 | 1067 | svm_set_cpu_caps(); |
66a6950f | 1068 | |
3edd6839 MG |
1069 | /* |
1070 | * It seems that on AMD processors PTE's accessed bit is | |
1071 | * being set by the CPU hardware before the NPF vmexit. | |
1072 | * This is not expected behaviour and our tests fail because | |
1073 | * of it. | |
1074 | * A workaround here is to disable support for | |
1075 | * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled. | |
1076 | * In this case userspace can know if there is support using | |
1077 | * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle | |
1078 | * it | |
1079 | * If future AMD CPU models change the behaviour described above, | |
1080 | * this variable can be changed accordingly | |
1081 | */ | |
1082 | allow_smaller_maxphyaddr = !npt_enabled; | |
1083 | ||
6aa8b732 AK |
1084 | return 0; |
1085 | ||
f65c229c | 1086 | err: |
dd58f3c9 | 1087 | svm_hardware_teardown(); |
6aa8b732 AK |
1088 | return r; |
1089 | } | |
1090 | ||
6aa8b732 AK |
1091 | static void init_seg(struct vmcb_seg *seg) |
1092 | { | |
1093 | seg->selector = 0; | |
1094 | seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | | |
e0231715 | 1095 | SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */ |
6aa8b732 AK |
1096 | seg->limit = 0xffff; |
1097 | seg->base = 0; | |
1098 | } | |
1099 | ||
1100 | static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) | |
1101 | { | |
1102 | seg->selector = 0; | |
1103 | seg->attrib = SVM_SELECTOR_P_MASK | type; | |
1104 | seg->limit = 0xffff; | |
1105 | seg->base = 0; | |
1106 | } | |
1107 | ||
307a94c7 IS |
1108 | static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu) |
1109 | { | |
1110 | struct vcpu_svm *svm = to_svm(vcpu); | |
1111 | ||
1112 | return svm->nested.ctl.tsc_offset; | |
1113 | } | |
1114 | ||
1115 | static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu) | |
1116 | { | |
1117 | return kvm_default_tsc_scaling_ratio; | |
1118 | } | |
1119 | ||
edcfe540 | 1120 | static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) |
f4e1b3c8 ZA |
1121 | { |
1122 | struct vcpu_svm *svm = to_svm(vcpu); | |
116a0a23 | 1123 | |
edcfe540 IS |
1124 | svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset; |
1125 | svm->vmcb->control.tsc_offset = offset; | |
06e7852c | 1126 | vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
f4e1b3c8 ZA |
1127 | } |
1128 | ||
1ab9287a IS |
1129 | static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier) |
1130 | { | |
1131 | wrmsrl(MSR_AMD64_TSC_RATIO, multiplier); | |
1132 | } | |
1133 | ||
3b195ac9 SC |
1134 | /* Evaluate instruction intercepts that depend on guest CPUID features. */ |
1135 | static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu, | |
1136 | struct vcpu_svm *svm) | |
4407a797 BM |
1137 | { |
1138 | /* | |
0a8ed2ea SC |
1139 | * Intercept INVPCID if shadow paging is enabled to sync/free shadow |
1140 | * roots, or if INVPCID is disabled in the guest to inject #UD. | |
4407a797 BM |
1141 | */ |
1142 | if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) { | |
0a8ed2ea SC |
1143 | if (!npt_enabled || |
1144 | !guest_cpuid_has(&svm->vcpu, X86_FEATURE_INVPCID)) | |
4407a797 BM |
1145 | svm_set_intercept(svm, INTERCEPT_INVPCID); |
1146 | else | |
1147 | svm_clr_intercept(svm, INTERCEPT_INVPCID); | |
1148 | } | |
3b195ac9 SC |
1149 | |
1150 | if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) { | |
1151 | if (guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) | |
1152 | svm_clr_intercept(svm, INTERCEPT_RDTSCP); | |
1153 | else | |
1154 | svm_set_intercept(svm, INTERCEPT_RDTSCP); | |
1155 | } | |
4407a797 BM |
1156 | } |
1157 | ||
63129754 | 1158 | static void init_vmcb(struct kvm_vcpu *vcpu) |
6aa8b732 | 1159 | { |
63129754 | 1160 | struct vcpu_svm *svm = to_svm(vcpu); |
e6101a96 JR |
1161 | struct vmcb_control_area *control = &svm->vmcb->control; |
1162 | struct vmcb_save_area *save = &svm->vmcb->save; | |
6aa8b732 | 1163 | |
63129754 | 1164 | vcpu->arch.hflags = 0; |
bff78274 | 1165 | |
830bd71f BM |
1166 | svm_set_intercept(svm, INTERCEPT_CR0_READ); |
1167 | svm_set_intercept(svm, INTERCEPT_CR3_READ); | |
1168 | svm_set_intercept(svm, INTERCEPT_CR4_READ); | |
1169 | svm_set_intercept(svm, INTERCEPT_CR0_WRITE); | |
1170 | svm_set_intercept(svm, INTERCEPT_CR3_WRITE); | |
1171 | svm_set_intercept(svm, INTERCEPT_CR4_WRITE); | |
63129754 | 1172 | if (!kvm_vcpu_apicv_active(vcpu)) |
830bd71f | 1173 | svm_set_intercept(svm, INTERCEPT_CR8_WRITE); |
6aa8b732 | 1174 | |
5315c716 | 1175 | set_dr_intercepts(svm); |
6aa8b732 | 1176 | |
18c918c5 JR |
1177 | set_exception_intercept(svm, PF_VECTOR); |
1178 | set_exception_intercept(svm, UD_VECTOR); | |
1179 | set_exception_intercept(svm, MC_VECTOR); | |
54a20552 | 1180 | set_exception_intercept(svm, AC_VECTOR); |
cbdb967a | 1181 | set_exception_intercept(svm, DB_VECTOR); |
9718420e LA |
1182 | /* |
1183 | * Guest access to VMware backdoor ports could legitimately | |
1184 | * trigger #GP because of TSS I/O permission bitmap. | |
1185 | * We intercept those #GP and allow access to them anyway | |
1186 | * as VMware does. | |
1187 | */ | |
1188 | if (enable_vmware_backdoor) | |
1189 | set_exception_intercept(svm, GP_VECTOR); | |
6aa8b732 | 1190 | |
a284ba56 JR |
1191 | svm_set_intercept(svm, INTERCEPT_INTR); |
1192 | svm_set_intercept(svm, INTERCEPT_NMI); | |
4b639a9f ML |
1193 | |
1194 | if (intercept_smi) | |
1195 | svm_set_intercept(svm, INTERCEPT_SMI); | |
1196 | ||
a284ba56 JR |
1197 | svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0); |
1198 | svm_set_intercept(svm, INTERCEPT_RDPMC); | |
1199 | svm_set_intercept(svm, INTERCEPT_CPUID); | |
1200 | svm_set_intercept(svm, INTERCEPT_INVD); | |
1201 | svm_set_intercept(svm, INTERCEPT_INVLPG); | |
1202 | svm_set_intercept(svm, INTERCEPT_INVLPGA); | |
1203 | svm_set_intercept(svm, INTERCEPT_IOIO_PROT); | |
1204 | svm_set_intercept(svm, INTERCEPT_MSR_PROT); | |
1205 | svm_set_intercept(svm, INTERCEPT_TASK_SWITCH); | |
1206 | svm_set_intercept(svm, INTERCEPT_SHUTDOWN); | |
1207 | svm_set_intercept(svm, INTERCEPT_VMRUN); | |
1208 | svm_set_intercept(svm, INTERCEPT_VMMCALL); | |
1209 | svm_set_intercept(svm, INTERCEPT_VMLOAD); | |
1210 | svm_set_intercept(svm, INTERCEPT_VMSAVE); | |
1211 | svm_set_intercept(svm, INTERCEPT_STGI); | |
1212 | svm_set_intercept(svm, INTERCEPT_CLGI); | |
1213 | svm_set_intercept(svm, INTERCEPT_SKINIT); | |
1214 | svm_set_intercept(svm, INTERCEPT_WBINVD); | |
1215 | svm_set_intercept(svm, INTERCEPT_XSETBV); | |
1216 | svm_set_intercept(svm, INTERCEPT_RDPRU); | |
1217 | svm_set_intercept(svm, INTERCEPT_RSM); | |
6aa8b732 | 1218 | |
63129754 | 1219 | if (!kvm_mwait_in_guest(vcpu->kvm)) { |
a284ba56 JR |
1220 | svm_set_intercept(svm, INTERCEPT_MONITOR); |
1221 | svm_set_intercept(svm, INTERCEPT_MWAIT); | |
668fffa3 MT |
1222 | } |
1223 | ||
63129754 | 1224 | if (!kvm_hlt_in_guest(vcpu->kvm)) |
a284ba56 | 1225 | svm_set_intercept(svm, INTERCEPT_HLT); |
caa057a2 | 1226 | |
d0ec49d4 TL |
1227 | control->iopm_base_pa = __sme_set(iopm_base); |
1228 | control->msrpm_base_pa = __sme_set(__pa(svm->msrpm)); | |
6aa8b732 AK |
1229 | control->int_ctl = V_INTR_MASKING_MASK; |
1230 | ||
1231 | init_seg(&save->es); | |
1232 | init_seg(&save->ss); | |
1233 | init_seg(&save->ds); | |
1234 | init_seg(&save->fs); | |
1235 | init_seg(&save->gs); | |
1236 | ||
1237 | save->cs.selector = 0xf000; | |
04b66839 | 1238 | save->cs.base = 0xffff0000; |
6aa8b732 AK |
1239 | /* Executable/Readable Code Segment */ |
1240 | save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK | | |
1241 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK; | |
1242 | save->cs.limit = 0xffff; | |
6aa8b732 AK |
1243 | |
1244 | save->gdtr.limit = 0xffff; | |
1245 | save->idtr.limit = 0xffff; | |
1246 | ||
1247 | init_sys_seg(&save->ldtr, SEG_TYPE_LDT); | |
1248 | init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); | |
1249 | ||
63129754 PB |
1250 | svm_set_cr4(vcpu, 0); |
1251 | svm_set_efer(vcpu, 0); | |
d77c26fc | 1252 | save->dr6 = 0xffff0ff0; |
63129754 | 1253 | kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); |
6aa8b732 | 1254 | save->rip = 0x0000fff0; |
63129754 | 1255 | vcpu->arch.regs[VCPU_REGS_RIP] = save->rip; |
6aa8b732 | 1256 | |
e0231715 | 1257 | /* |
18fa000a | 1258 | * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0. |
d28bc9dd | 1259 | * It also updates the guest-visible cr0 value. |
6aa8b732 | 1260 | */ |
63129754 PB |
1261 | svm_set_cr0(vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); |
1262 | kvm_mmu_reset_context(vcpu); | |
18fa000a | 1263 | |
66aee91a | 1264 | save->cr4 = X86_CR4_PAE; |
6aa8b732 | 1265 | /* rdx = ?? */ |
709ddebf JR |
1266 | |
1267 | if (npt_enabled) { | |
1268 | /* Setup VMCB for Nested Paging */ | |
cea3a19b | 1269 | control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE; |
a284ba56 | 1270 | svm_clr_intercept(svm, INTERCEPT_INVLPG); |
18c918c5 | 1271 | clr_exception_intercept(svm, PF_VECTOR); |
830bd71f BM |
1272 | svm_clr_intercept(svm, INTERCEPT_CR3_READ); |
1273 | svm_clr_intercept(svm, INTERCEPT_CR3_WRITE); | |
63129754 | 1274 | save->g_pat = vcpu->arch.pat; |
709ddebf JR |
1275 | save->cr3 = 0; |
1276 | save->cr4 = 0; | |
1277 | } | |
193015ad | 1278 | svm->current_vmcb->asid_generation = 0; |
7e8e6eed | 1279 | svm->asid = 0; |
1371d904 | 1280 | |
c74ad08f ML |
1281 | svm->nested.vmcb12_gpa = INVALID_GPA; |
1282 | svm->nested.last_vmcb12_gpa = INVALID_GPA; | |
63129754 | 1283 | vcpu->arch.hflags = 0; |
2af9194d | 1284 | |
63129754 | 1285 | if (!kvm_pause_in_guest(vcpu->kvm)) { |
8566ac8b BM |
1286 | control->pause_filter_count = pause_filter_count; |
1287 | if (pause_filter_thresh) | |
1288 | control->pause_filter_thresh = pause_filter_thresh; | |
a284ba56 | 1289 | svm_set_intercept(svm, INTERCEPT_PAUSE); |
8566ac8b | 1290 | } else { |
a284ba56 | 1291 | svm_clr_intercept(svm, INTERCEPT_PAUSE); |
565d0998 ML |
1292 | } |
1293 | ||
3b195ac9 | 1294 | svm_recalc_instruction_intercepts(vcpu, svm); |
4407a797 | 1295 | |
89c8a498 | 1296 | /* |
d00b99c5 BM |
1297 | * If the host supports V_SPEC_CTRL then disable the interception |
1298 | * of MSR_IA32_SPEC_CTRL. | |
89c8a498 | 1299 | */ |
d00b99c5 BM |
1300 | if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) |
1301 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); | |
1302 | ||
63129754 | 1303 | if (kvm_vcpu_apicv_active(vcpu)) |
44a95dae | 1304 | avic_init_vmcb(svm); |
89c8a498 | 1305 | |
640bd6e5 | 1306 | if (vgif) { |
a284ba56 JR |
1307 | svm_clr_intercept(svm, INTERCEPT_STGI); |
1308 | svm_clr_intercept(svm, INTERCEPT_CLGI); | |
640bd6e5 JN |
1309 | svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; |
1310 | } | |
1311 | ||
63129754 | 1312 | if (sev_guest(vcpu->kvm)) { |
1654efcb | 1313 | svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; |
35c6f649 | 1314 | clr_exception_intercept(svm, UD_VECTOR); |
376c6d28 | 1315 | |
63129754 | 1316 | if (sev_es_guest(vcpu->kvm)) { |
376c6d28 TL |
1317 | /* Perform SEV-ES specific VMCB updates */ |
1318 | sev_es_init_vmcb(svm); | |
1319 | } | |
35c6f649 | 1320 | } |
1654efcb | 1321 | |
1e0c7d40 VP |
1322 | svm_hv_init_vmcb(svm->vmcb); |
1323 | ||
06e7852c | 1324 | vmcb_mark_all_dirty(svm->vmcb); |
8d28fec4 | 1325 | |
2af9194d | 1326 | enable_gif(svm); |
44a95dae SS |
1327 | |
1328 | } | |
1329 | ||
d28bc9dd | 1330 | static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) |
04d2cc77 AK |
1331 | { |
1332 | struct vcpu_svm *svm = to_svm(vcpu); | |
66f7b72e JS |
1333 | u32 dummy; |
1334 | u32 eax = 1; | |
04d2cc77 | 1335 | |
b2ac58f9 | 1336 | svm->spec_ctrl = 0; |
ccbcd267 | 1337 | svm->virt_spec_ctrl = 0; |
b2ac58f9 | 1338 | |
d28bc9dd | 1339 | if (!init_event) { |
63129754 PB |
1340 | vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE | |
1341 | MSR_IA32_APICBASE_ENABLE; | |
1342 | if (kvm_vcpu_is_reset_bsp(vcpu)) | |
1343 | vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP; | |
d28bc9dd | 1344 | } |
63129754 | 1345 | init_vmcb(vcpu); |
70433389 | 1346 | |
f91af517 | 1347 | kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, false); |
de3cd117 | 1348 | kvm_rdx_write(vcpu, eax); |
44a95dae SS |
1349 | |
1350 | if (kvm_vcpu_apicv_active(vcpu) && !init_event) | |
1351 | avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE); | |
04d2cc77 AK |
1352 | } |
1353 | ||
4995a368 CA |
1354 | void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb) |
1355 | { | |
1356 | svm->current_vmcb = target_vmcb; | |
1357 | svm->vmcb = target_vmcb->ptr; | |
4995a368 CA |
1358 | } |
1359 | ||
987b2594 | 1360 | static int svm_create_vcpu(struct kvm_vcpu *vcpu) |
6aa8b732 | 1361 | { |
a2fa3e9f | 1362 | struct vcpu_svm *svm; |
4995a368 | 1363 | struct page *vmcb01_page; |
add5e2f0 | 1364 | struct page *vmsa_page = NULL; |
fb3f0f51 | 1365 | int err; |
6aa8b732 | 1366 | |
a9dd6f09 SC |
1367 | BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0); |
1368 | svm = to_svm(vcpu); | |
fb3f0f51 | 1369 | |
b7af4043 | 1370 | err = -ENOMEM; |
4995a368 CA |
1371 | vmcb01_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); |
1372 | if (!vmcb01_page) | |
987b2594 | 1373 | goto out; |
6aa8b732 | 1374 | |
63129754 | 1375 | if (sev_es_guest(vcpu->kvm)) { |
add5e2f0 TL |
1376 | /* |
1377 | * SEV-ES guests require a separate VMSA page used to contain | |
1378 | * the encrypted register state of the guest. | |
1379 | */ | |
1380 | vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); | |
1381 | if (!vmsa_page) | |
1382 | goto error_free_vmcb_page; | |
ed02b213 TL |
1383 | |
1384 | /* | |
1385 | * SEV-ES guests maintain an encrypted version of their FPU | |
1386 | * state which is restored and saved on VMRUN and VMEXIT. | |
1387 | * Free the fpu structure to prevent KVM from attempting to | |
1388 | * access the FPU state. | |
1389 | */ | |
1390 | kvm_free_guest_fpu(vcpu); | |
add5e2f0 TL |
1391 | } |
1392 | ||
dfa20099 SS |
1393 | err = avic_init_vcpu(svm); |
1394 | if (err) | |
add5e2f0 | 1395 | goto error_free_vmsa_page; |
44a95dae | 1396 | |
8221c137 SS |
1397 | /* We initialize this flag to true to make sure that the is_running |
1398 | * bit would be set the first time the vcpu is loaded. | |
1399 | */ | |
6c3e4422 SS |
1400 | if (irqchip_in_kernel(vcpu->kvm) && kvm_apicv_activated(vcpu->kvm)) |
1401 | svm->avic_is_running = true; | |
8221c137 | 1402 | |
476c9bd8 | 1403 | svm->msrpm = svm_vcpu_alloc_msrpm(); |
054409ab CZ |
1404 | if (!svm->msrpm) { |
1405 | err = -ENOMEM; | |
add5e2f0 | 1406 | goto error_free_vmsa_page; |
054409ab | 1407 | } |
b7af4043 | 1408 | |
476c9bd8 | 1409 | svm_vcpu_init_msrpm(vcpu, svm->msrpm); |
3d6368ef | 1410 | |
4995a368 CA |
1411 | svm->vmcb01.ptr = page_address(vmcb01_page); |
1412 | svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT); | |
add5e2f0 TL |
1413 | |
1414 | if (vmsa_page) | |
1415 | svm->vmsa = page_address(vmsa_page); | |
1416 | ||
a7fc06dd | 1417 | svm->guest_state_loaded = false; |
4995a368 CA |
1418 | |
1419 | svm_switch_vmcb(svm, &svm->vmcb01); | |
63129754 | 1420 | init_vmcb(vcpu); |
6aa8b732 | 1421 | |
7f27179a | 1422 | svm_init_osvw(vcpu); |
bab0c318 | 1423 | vcpu->arch.microcode_version = 0x01000065; |
2b036c6b | 1424 | |
63129754 | 1425 | if (sev_es_guest(vcpu->kvm)) |
376c6d28 TL |
1426 | /* Perform SEV-ES specific VMCB creation updates */ |
1427 | sev_es_create_vcpu(svm); | |
1428 | ||
a9dd6f09 | 1429 | return 0; |
36241b8c | 1430 | |
add5e2f0 TL |
1431 | error_free_vmsa_page: |
1432 | if (vmsa_page) | |
1433 | __free_page(vmsa_page); | |
8d22b90e | 1434 | error_free_vmcb_page: |
4995a368 | 1435 | __free_page(vmcb01_page); |
987b2594 | 1436 | out: |
a9dd6f09 | 1437 | return err; |
6aa8b732 AK |
1438 | } |
1439 | ||
fd65d314 JM |
1440 | static void svm_clear_current_vmcb(struct vmcb *vmcb) |
1441 | { | |
1442 | int i; | |
1443 | ||
1444 | for_each_online_cpu(i) | |
1445 | cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL); | |
1446 | } | |
1447 | ||
6aa8b732 AK |
1448 | static void svm_free_vcpu(struct kvm_vcpu *vcpu) |
1449 | { | |
a2fa3e9f GH |
1450 | struct vcpu_svm *svm = to_svm(vcpu); |
1451 | ||
fd65d314 JM |
1452 | /* |
1453 | * The vmcb page can be recycled, causing a false negative in | |
1454 | * svm_vcpu_load(). So, ensure that no logical CPU has this | |
1455 | * vmcb page recorded as its current vmcb. | |
1456 | */ | |
1457 | svm_clear_current_vmcb(svm->vmcb); | |
1458 | ||
2fcf4876 ML |
1459 | svm_free_nested(svm); |
1460 | ||
add5e2f0 TL |
1461 | sev_free_vcpu(vcpu); |
1462 | ||
4995a368 | 1463 | __free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT)); |
47903dc1 | 1464 | __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE)); |
6aa8b732 AK |
1465 | } |
1466 | ||
a7fc06dd | 1467 | static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) |
6aa8b732 | 1468 | { |
a2fa3e9f | 1469 | struct vcpu_svm *svm = to_svm(vcpu); |
a7fc06dd | 1470 | struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); |
0cc5064d | 1471 | |
ce7ea0cf TL |
1472 | if (sev_es_guest(vcpu->kvm)) |
1473 | sev_es_unmap_ghcb(svm); | |
1474 | ||
a7fc06dd MR |
1475 | if (svm->guest_state_loaded) |
1476 | return; | |
1477 | ||
a7fc06dd MR |
1478 | /* |
1479 | * Save additional host state that will be restored on VMEXIT (sev-es) | |
1480 | * or subsequent vmload of host save area. | |
1481 | */ | |
63129754 | 1482 | if (sev_es_guest(vcpu->kvm)) { |
a7fc06dd | 1483 | sev_es_prepare_guest_switch(svm, vcpu->cpu); |
86137773 | 1484 | } else { |
e79b91bb | 1485 | vmsave(__sme_page_pa(sd->save_area)); |
86137773 | 1486 | } |
fbc0db76 | 1487 | |
ad721883 HZ |
1488 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { |
1489 | u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio; | |
1490 | if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) { | |
1491 | __this_cpu_write(current_tsc_ratio, tsc_ratio); | |
1492 | wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio); | |
1493 | } | |
fbc0db76 | 1494 | } |
a7fc06dd | 1495 | |
0caa0a77 SC |
1496 | if (likely(tsc_aux_uret_slot >= 0)) |
1497 | kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull); | |
8221c137 | 1498 | |
a7fc06dd MR |
1499 | svm->guest_state_loaded = true; |
1500 | } | |
1501 | ||
1502 | static void svm_prepare_host_switch(struct kvm_vcpu *vcpu) | |
1503 | { | |
844d69c2 | 1504 | to_svm(vcpu)->guest_state_loaded = false; |
a7fc06dd MR |
1505 | } |
1506 | ||
1507 | static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |
1508 | { | |
1509 | struct vcpu_svm *svm = to_svm(vcpu); | |
1510 | struct svm_cpu_data *sd = per_cpu(svm_data, cpu); | |
1511 | ||
15d45071 AR |
1512 | if (sd->current_vmcb != svm->vmcb) { |
1513 | sd->current_vmcb = svm->vmcb; | |
1514 | indirect_branch_prediction_barrier(); | |
1515 | } | |
8221c137 | 1516 | avic_vcpu_load(vcpu, cpu); |
6aa8b732 AK |
1517 | } |
1518 | ||
1519 | static void svm_vcpu_put(struct kvm_vcpu *vcpu) | |
1520 | { | |
8221c137 | 1521 | avic_vcpu_put(vcpu); |
a7fc06dd | 1522 | svm_prepare_host_switch(vcpu); |
8221c137 | 1523 | |
e1beb1d3 | 1524 | ++vcpu->stat.host_state_reload; |
6aa8b732 AK |
1525 | } |
1526 | ||
6aa8b732 AK |
1527 | static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) |
1528 | { | |
9b611747 LP |
1529 | struct vcpu_svm *svm = to_svm(vcpu); |
1530 | unsigned long rflags = svm->vmcb->save.rflags; | |
1531 | ||
1532 | if (svm->nmi_singlestep) { | |
1533 | /* Hide our flags if they were not set by the guest */ | |
1534 | if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) | |
1535 | rflags &= ~X86_EFLAGS_TF; | |
1536 | if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) | |
1537 | rflags &= ~X86_EFLAGS_RF; | |
1538 | } | |
1539 | return rflags; | |
6aa8b732 AK |
1540 | } |
1541 | ||
1542 | static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | |
1543 | { | |
9b611747 LP |
1544 | if (to_svm(vcpu)->nmi_singlestep) |
1545 | rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); | |
1546 | ||
ae9fedc7 | 1547 | /* |
bb3541f1 | 1548 | * Any change of EFLAGS.VM is accompanied by a reload of SS |
ae9fedc7 PB |
1549 | * (caused by either a task switch or an inter-privilege IRET), |
1550 | * so we do not need to update the CPL here. | |
1551 | */ | |
a2fa3e9f | 1552 | to_svm(vcpu)->vmcb->save.rflags = rflags; |
6aa8b732 AK |
1553 | } |
1554 | ||
6de4f3ad AK |
1555 | static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) |
1556 | { | |
1557 | switch (reg) { | |
1558 | case VCPU_EXREG_PDPTR: | |
1559 | BUG_ON(!npt_enabled); | |
9f8fe504 | 1560 | load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); |
6de4f3ad AK |
1561 | break; |
1562 | default: | |
34059c25 | 1563 | WARN_ON_ONCE(1); |
6de4f3ad AK |
1564 | } |
1565 | } | |
1566 | ||
e14b7786 | 1567 | static void svm_set_vintr(struct vcpu_svm *svm) |
64b5bd27 PB |
1568 | { |
1569 | struct vmcb_control_area *control; | |
1570 | ||
1571 | /* The following fields are ignored when AVIC is enabled */ | |
1572 | WARN_ON(kvm_vcpu_apicv_active(&svm->vcpu)); | |
a284ba56 | 1573 | svm_set_intercept(svm, INTERCEPT_VINTR); |
64b5bd27 PB |
1574 | |
1575 | /* | |
1576 | * This is just a dummy VINTR to actually cause a vmexit to happen. | |
1577 | * Actual injection of virtual interrupts happens through EVENTINJ. | |
1578 | */ | |
1579 | control = &svm->vmcb->control; | |
1580 | control->int_vector = 0x0; | |
1581 | control->int_ctl &= ~V_INTR_PRIO_MASK; | |
1582 | control->int_ctl |= V_IRQ_MASK | | |
1583 | ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); | |
06e7852c | 1584 | vmcb_mark_dirty(svm->vmcb, VMCB_INTR); |
64b5bd27 PB |
1585 | } |
1586 | ||
f0b85051 AG |
1587 | static void svm_clear_vintr(struct vcpu_svm *svm) |
1588 | { | |
d8e4e58f | 1589 | const u32 mask = V_TPR_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK | V_INTR_MASKING_MASK; |
a284ba56 | 1590 | svm_clr_intercept(svm, INTERCEPT_VINTR); |
64b5bd27 | 1591 | |
d8e4e58f PB |
1592 | /* Drop int_ctl fields related to VINTR injection. */ |
1593 | svm->vmcb->control.int_ctl &= mask; | |
1594 | if (is_guest_mode(&svm->vcpu)) { | |
4995a368 | 1595 | svm->vmcb01.ptr->control.int_ctl &= mask; |
fb7333df | 1596 | |
d8e4e58f PB |
1597 | WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) != |
1598 | (svm->nested.ctl.int_ctl & V_TPR_MASK)); | |
1599 | svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask; | |
1600 | } | |
1601 | ||
06e7852c | 1602 | vmcb_mark_dirty(svm->vmcb, VMCB_INTR); |
f0b85051 AG |
1603 | } |
1604 | ||
6aa8b732 AK |
1605 | static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) |
1606 | { | |
a2fa3e9f | 1607 | struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; |
cc3ed80a | 1608 | struct vmcb_save_area *save01 = &to_svm(vcpu)->vmcb01.ptr->save; |
6aa8b732 AK |
1609 | |
1610 | switch (seg) { | |
1611 | case VCPU_SREG_CS: return &save->cs; | |
1612 | case VCPU_SREG_DS: return &save->ds; | |
1613 | case VCPU_SREG_ES: return &save->es; | |
cc3ed80a ML |
1614 | case VCPU_SREG_FS: return &save01->fs; |
1615 | case VCPU_SREG_GS: return &save01->gs; | |
6aa8b732 | 1616 | case VCPU_SREG_SS: return &save->ss; |
cc3ed80a ML |
1617 | case VCPU_SREG_TR: return &save01->tr; |
1618 | case VCPU_SREG_LDTR: return &save01->ldtr; | |
6aa8b732 AK |
1619 | } |
1620 | BUG(); | |
8b6d44c7 | 1621 | return NULL; |
6aa8b732 AK |
1622 | } |
1623 | ||
1624 | static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) | |
1625 | { | |
1626 | struct vmcb_seg *s = svm_seg(vcpu, seg); | |
1627 | ||
1628 | return s->base; | |
1629 | } | |
1630 | ||
1631 | static void svm_get_segment(struct kvm_vcpu *vcpu, | |
1632 | struct kvm_segment *var, int seg) | |
1633 | { | |
1634 | struct vmcb_seg *s = svm_seg(vcpu, seg); | |
1635 | ||
1636 | var->base = s->base; | |
1637 | var->limit = s->limit; | |
1638 | var->selector = s->selector; | |
1639 | var->type = s->attrib & SVM_SELECTOR_TYPE_MASK; | |
1640 | var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1; | |
1641 | var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; | |
1642 | var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1; | |
1643 | var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; | |
1644 | var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; | |
1645 | var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; | |
80112c89 JM |
1646 | |
1647 | /* | |
1648 | * AMD CPUs circa 2014 track the G bit for all segments except CS. | |
1649 | * However, the SVM spec states that the G bit is not observed by the | |
1650 | * CPU, and some VMware virtual CPUs drop the G bit for all segments. | |
1651 | * So let's synthesize a legal G bit for all segments, this helps | |
1652 | * running KVM nested. It also helps cross-vendor migration, because | |
1653 | * Intel's vmentry has a check on the 'G' bit. | |
1654 | */ | |
1655 | var->g = s->limit > 0xfffff; | |
25022acc | 1656 | |
e0231715 JR |
1657 | /* |
1658 | * AMD's VMCB does not have an explicit unusable field, so emulate it | |
19bca6ab AP |
1659 | * for cross vendor migration purposes by "not present" |
1660 | */ | |
8eae9570 | 1661 | var->unusable = !var->present; |
19bca6ab | 1662 | |
1fbdc7a5 | 1663 | switch (seg) { |
1fbdc7a5 AP |
1664 | case VCPU_SREG_TR: |
1665 | /* | |
1666 | * Work around a bug where the busy flag in the tr selector | |
1667 | * isn't exposed | |
1668 | */ | |
c0d09828 | 1669 | var->type |= 0x2; |
1fbdc7a5 AP |
1670 | break; |
1671 | case VCPU_SREG_DS: | |
1672 | case VCPU_SREG_ES: | |
1673 | case VCPU_SREG_FS: | |
1674 | case VCPU_SREG_GS: | |
1675 | /* | |
1676 | * The accessed bit must always be set in the segment | |
1677 | * descriptor cache, although it can be cleared in the | |
1678 | * descriptor, the cached bit always remains at 1. Since | |
1679 | * Intel has a check on this, set it here to support | |
1680 | * cross-vendor migration. | |
1681 | */ | |
1682 | if (!var->unusable) | |
1683 | var->type |= 0x1; | |
1684 | break; | |
b586eb02 | 1685 | case VCPU_SREG_SS: |
e0231715 JR |
1686 | /* |
1687 | * On AMD CPUs sometimes the DB bit in the segment | |
b586eb02 AP |
1688 | * descriptor is left as 1, although the whole segment has |
1689 | * been made unusable. Clear it here to pass an Intel VMX | |
1690 | * entry check when cross vendor migrating. | |
1691 | */ | |
1692 | if (var->unusable) | |
1693 | var->db = 0; | |
d9c1b543 | 1694 | /* This is symmetric with svm_set_segment() */ |
33b458d2 | 1695 | var->dpl = to_svm(vcpu)->vmcb->save.cpl; |
b586eb02 | 1696 | break; |
1fbdc7a5 | 1697 | } |
6aa8b732 AK |
1698 | } |
1699 | ||
2e4d2653 IE |
1700 | static int svm_get_cpl(struct kvm_vcpu *vcpu) |
1701 | { | |
1702 | struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; | |
1703 | ||
1704 | return save->cpl; | |
1705 | } | |
1706 | ||
89a27f4d | 1707 | static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
6aa8b732 | 1708 | { |
a2fa3e9f GH |
1709 | struct vcpu_svm *svm = to_svm(vcpu); |
1710 | ||
89a27f4d GN |
1711 | dt->size = svm->vmcb->save.idtr.limit; |
1712 | dt->address = svm->vmcb->save.idtr.base; | |
6aa8b732 AK |
1713 | } |
1714 | ||
89a27f4d | 1715 | static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
6aa8b732 | 1716 | { |
a2fa3e9f GH |
1717 | struct vcpu_svm *svm = to_svm(vcpu); |
1718 | ||
89a27f4d GN |
1719 | svm->vmcb->save.idtr.limit = dt->size; |
1720 | svm->vmcb->save.idtr.base = dt->address ; | |
06e7852c | 1721 | vmcb_mark_dirty(svm->vmcb, VMCB_DT); |
6aa8b732 AK |
1722 | } |
1723 | ||
89a27f4d | 1724 | static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
6aa8b732 | 1725 | { |
a2fa3e9f GH |
1726 | struct vcpu_svm *svm = to_svm(vcpu); |
1727 | ||
89a27f4d GN |
1728 | dt->size = svm->vmcb->save.gdtr.limit; |
1729 | dt->address = svm->vmcb->save.gdtr.base; | |
6aa8b732 AK |
1730 | } |
1731 | ||
89a27f4d | 1732 | static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
6aa8b732 | 1733 | { |
a2fa3e9f GH |
1734 | struct vcpu_svm *svm = to_svm(vcpu); |
1735 | ||
89a27f4d GN |
1736 | svm->vmcb->save.gdtr.limit = dt->size; |
1737 | svm->vmcb->save.gdtr.base = dt->address ; | |
06e7852c | 1738 | vmcb_mark_dirty(svm->vmcb, VMCB_DT); |
6aa8b732 AK |
1739 | } |
1740 | ||
883b0a91 | 1741 | void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
6aa8b732 | 1742 | { |
a2fa3e9f | 1743 | struct vcpu_svm *svm = to_svm(vcpu); |
2a32a77c | 1744 | u64 hcr0 = cr0; |
a2fa3e9f | 1745 | |
05b3e0c2 | 1746 | #ifdef CONFIG_X86_64 |
f1c6366e | 1747 | if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) { |
707d92fa | 1748 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { |
f6801dff | 1749 | vcpu->arch.efer |= EFER_LMA; |
2b5203ee | 1750 | svm->vmcb->save.efer |= EFER_LMA | EFER_LME; |
6aa8b732 AK |
1751 | } |
1752 | ||
d77c26fc | 1753 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { |
f6801dff | 1754 | vcpu->arch.efer &= ~EFER_LMA; |
2b5203ee | 1755 | svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); |
6aa8b732 AK |
1756 | } |
1757 | } | |
1758 | #endif | |
ad312c7c | 1759 | vcpu->arch.cr0 = cr0; |
888f9f3e AK |
1760 | |
1761 | if (!npt_enabled) | |
2a32a77c | 1762 | hcr0 |= X86_CR0_PG | X86_CR0_WP; |
02daab21 | 1763 | |
bcf166a9 PB |
1764 | /* |
1765 | * re-enable caching here because the QEMU bios | |
1766 | * does not do it - this results in some delay at | |
1767 | * reboot | |
1768 | */ | |
1769 | if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) | |
2a32a77c PB |
1770 | hcr0 &= ~(X86_CR0_CD | X86_CR0_NW); |
1771 | ||
1772 | svm->vmcb->save.cr0 = hcr0; | |
06e7852c | 1773 | vmcb_mark_dirty(svm->vmcb, VMCB_CR); |
2a32a77c PB |
1774 | |
1775 | /* | |
1776 | * SEV-ES guests must always keep the CR intercepts cleared. CR | |
1777 | * tracking is done using the CR write traps. | |
1778 | */ | |
63129754 | 1779 | if (sev_es_guest(vcpu->kvm)) |
2a32a77c PB |
1780 | return; |
1781 | ||
1782 | if (hcr0 == cr0) { | |
1783 | /* Selective CR0 write remains on. */ | |
1784 | svm_clr_intercept(svm, INTERCEPT_CR0_READ); | |
1785 | svm_clr_intercept(svm, INTERCEPT_CR0_WRITE); | |
1786 | } else { | |
1787 | svm_set_intercept(svm, INTERCEPT_CR0_READ); | |
1788 | svm_set_intercept(svm, INTERCEPT_CR0_WRITE); | |
1789 | } | |
6aa8b732 AK |
1790 | } |
1791 | ||
c2fe3cd4 SC |
1792 | static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
1793 | { | |
1794 | return true; | |
1795 | } | |
1796 | ||
1797 | void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |
6aa8b732 | 1798 | { |
1e02ce4c | 1799 | unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE; |
dc924b06 | 1800 | unsigned long old_cr4 = vcpu->arch.cr4; |
e5eab0ce JR |
1801 | |
1802 | if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE)) | |
f55ac304 | 1803 | svm_flush_tlb(vcpu); |
6394b649 | 1804 | |
ec077263 JR |
1805 | vcpu->arch.cr4 = cr4; |
1806 | if (!npt_enabled) | |
1807 | cr4 |= X86_CR4_PAE; | |
6394b649 | 1808 | cr4 |= host_cr4_mce; |
ec077263 | 1809 | to_svm(vcpu)->vmcb->save.cr4 = cr4; |
06e7852c | 1810 | vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); |
2259c17f JM |
1811 | |
1812 | if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE)) | |
1813 | kvm_update_cpuid_runtime(vcpu); | |
6aa8b732 AK |
1814 | } |
1815 | ||
1816 | static void svm_set_segment(struct kvm_vcpu *vcpu, | |
1817 | struct kvm_segment *var, int seg) | |
1818 | { | |
a2fa3e9f | 1819 | struct vcpu_svm *svm = to_svm(vcpu); |
6aa8b732 AK |
1820 | struct vmcb_seg *s = svm_seg(vcpu, seg); |
1821 | ||
1822 | s->base = var->base; | |
1823 | s->limit = var->limit; | |
1824 | s->selector = var->selector; | |
d9c1b543 RP |
1825 | s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); |
1826 | s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; | |
1827 | s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; | |
1828 | s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT; | |
1829 | s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; | |
1830 | s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; | |
1831 | s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; | |
1832 | s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; | |
ae9fedc7 PB |
1833 | |
1834 | /* | |
1835 | * This is always accurate, except if SYSRET returned to a segment | |
1836 | * with SS.DPL != 3. Intel does not have this quirk, and always | |
1837 | * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it | |
1838 | * would entail passing the CPL to userspace and back. | |
1839 | */ | |
1840 | if (seg == VCPU_SREG_SS) | |
d9c1b543 RP |
1841 | /* This is symmetric with svm_get_segment() */ |
1842 | svm->vmcb->save.cpl = (var->dpl & 3); | |
6aa8b732 | 1843 | |
06e7852c | 1844 | vmcb_mark_dirty(svm->vmcb, VMCB_SEG); |
6aa8b732 AK |
1845 | } |
1846 | ||
b6a7cc35 | 1847 | static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu) |
6aa8b732 | 1848 | { |
d0bfb940 JK |
1849 | struct vcpu_svm *svm = to_svm(vcpu); |
1850 | ||
18c918c5 | 1851 | clr_exception_intercept(svm, BP_VECTOR); |
44c11430 | 1852 | |
d0bfb940 | 1853 | if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { |
d0bfb940 | 1854 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) |
18c918c5 | 1855 | set_exception_intercept(svm, BP_VECTOR); |
6986982f | 1856 | } |
44c11430 GN |
1857 | } |
1858 | ||
0fe1e009 | 1859 | static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) |
6aa8b732 | 1860 | { |
0fe1e009 TH |
1861 | if (sd->next_asid > sd->max_asid) { |
1862 | ++sd->asid_generation; | |
4faefff3 | 1863 | sd->next_asid = sd->min_asid; |
a2fa3e9f | 1864 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; |
7e8e6eed | 1865 | vmcb_mark_dirty(svm->vmcb, VMCB_ASID); |
6aa8b732 AK |
1866 | } |
1867 | ||
193015ad | 1868 | svm->current_vmcb->asid_generation = sd->asid_generation; |
7e8e6eed | 1869 | svm->asid = sd->next_asid++; |
6aa8b732 AK |
1870 | } |
1871 | ||
d67668e9 | 1872 | static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value) |
73aaf249 | 1873 | { |
d67668e9 | 1874 | struct vmcb *vmcb = svm->vmcb; |
73aaf249 | 1875 | |
8d4846b9 TL |
1876 | if (svm->vcpu.arch.guest_state_protected) |
1877 | return; | |
1878 | ||
d67668e9 PB |
1879 | if (unlikely(value != vmcb->save.dr6)) { |
1880 | vmcb->save.dr6 = value; | |
06e7852c | 1881 | vmcb_mark_dirty(vmcb, VMCB_DR); |
d67668e9 | 1882 | } |
73aaf249 JK |
1883 | } |
1884 | ||
facb0139 PB |
1885 | static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) |
1886 | { | |
1887 | struct vcpu_svm *svm = to_svm(vcpu); | |
1888 | ||
8d4846b9 TL |
1889 | if (vcpu->arch.guest_state_protected) |
1890 | return; | |
1891 | ||
facb0139 PB |
1892 | get_debugreg(vcpu->arch.db[0], 0); |
1893 | get_debugreg(vcpu->arch.db[1], 1); | |
1894 | get_debugreg(vcpu->arch.db[2], 2); | |
1895 | get_debugreg(vcpu->arch.db[3], 3); | |
d67668e9 | 1896 | /* |
9a3ecd5e | 1897 | * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here, |
d67668e9 PB |
1898 | * because db_interception might need it. We can do it before vmentry. |
1899 | */ | |
5679b803 | 1900 | vcpu->arch.dr6 = svm->vmcb->save.dr6; |
facb0139 | 1901 | vcpu->arch.dr7 = svm->vmcb->save.dr7; |
facb0139 PB |
1902 | vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; |
1903 | set_dr_intercepts(svm); | |
1904 | } | |
1905 | ||
020df079 | 1906 | static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value) |
6aa8b732 | 1907 | { |
42dbaa5a | 1908 | struct vcpu_svm *svm = to_svm(vcpu); |
42dbaa5a | 1909 | |
8d4846b9 TL |
1910 | if (vcpu->arch.guest_state_protected) |
1911 | return; | |
1912 | ||
020df079 | 1913 | svm->vmcb->save.dr7 = value; |
06e7852c | 1914 | vmcb_mark_dirty(svm->vmcb, VMCB_DR); |
6aa8b732 AK |
1915 | } |
1916 | ||
63129754 | 1917 | static int pf_interception(struct kvm_vcpu *vcpu) |
6aa8b732 | 1918 | { |
63129754 PB |
1919 | struct vcpu_svm *svm = to_svm(vcpu); |
1920 | ||
6d1b867d | 1921 | u64 fault_address = svm->vmcb->control.exit_info_2; |
1261bfa3 | 1922 | u64 error_code = svm->vmcb->control.exit_info_1; |
6aa8b732 | 1923 | |
63129754 | 1924 | return kvm_handle_page_fault(vcpu, error_code, fault_address, |
00b10fe1 BS |
1925 | static_cpu_has(X86_FEATURE_DECODEASSISTS) ? |
1926 | svm->vmcb->control.insn_bytes : NULL, | |
d0006530 PB |
1927 | svm->vmcb->control.insn_len); |
1928 | } | |
1929 | ||
63129754 | 1930 | static int npf_interception(struct kvm_vcpu *vcpu) |
d0006530 | 1931 | { |
63129754 PB |
1932 | struct vcpu_svm *svm = to_svm(vcpu); |
1933 | ||
76ff371b | 1934 | u64 fault_address = svm->vmcb->control.exit_info_2; |
d0006530 PB |
1935 | u64 error_code = svm->vmcb->control.exit_info_1; |
1936 | ||
1937 | trace_kvm_page_fault(fault_address, error_code); | |
63129754 | 1938 | return kvm_mmu_page_fault(vcpu, fault_address, error_code, |
00b10fe1 BS |
1939 | static_cpu_has(X86_FEATURE_DECODEASSISTS) ? |
1940 | svm->vmcb->control.insn_bytes : NULL, | |
d0006530 | 1941 | svm->vmcb->control.insn_len); |
6aa8b732 AK |
1942 | } |
1943 | ||
63129754 | 1944 | static int db_interception(struct kvm_vcpu *vcpu) |
d0bfb940 | 1945 | { |
63129754 PB |
1946 | struct kvm_run *kvm_run = vcpu->run; |
1947 | struct vcpu_svm *svm = to_svm(vcpu); | |
851ba692 | 1948 | |
63129754 | 1949 | if (!(vcpu->guest_debug & |
44c11430 | 1950 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && |
6be7d306 | 1951 | !svm->nmi_singlestep) { |
9a3ecd5e | 1952 | u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW; |
63129754 | 1953 | kvm_queue_exception_p(vcpu, DB_VECTOR, payload); |
d0bfb940 JK |
1954 | return 1; |
1955 | } | |
44c11430 | 1956 | |
6be7d306 | 1957 | if (svm->nmi_singlestep) { |
4aebd0e9 | 1958 | disable_nmi_singlestep(svm); |
99c22179 VK |
1959 | /* Make sure we check for pending NMIs upon entry */ |
1960 | kvm_make_request(KVM_REQ_EVENT, vcpu); | |
44c11430 GN |
1961 | } |
1962 | ||
63129754 | 1963 | if (vcpu->guest_debug & |
e0231715 | 1964 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) { |
44c11430 | 1965 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
dee919d1 PB |
1966 | kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6; |
1967 | kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7; | |
44c11430 GN |
1968 | kvm_run->debug.arch.pc = |
1969 | svm->vmcb->save.cs.base + svm->vmcb->save.rip; | |
1970 | kvm_run->debug.arch.exception = DB_VECTOR; | |
1971 | return 0; | |
1972 | } | |
1973 | ||
1974 | return 1; | |
d0bfb940 JK |
1975 | } |
1976 | ||
63129754 | 1977 | static int bp_interception(struct kvm_vcpu *vcpu) |
d0bfb940 | 1978 | { |
63129754 PB |
1979 | struct vcpu_svm *svm = to_svm(vcpu); |
1980 | struct kvm_run *kvm_run = vcpu->run; | |
851ba692 | 1981 | |
d0bfb940 JK |
1982 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
1983 | kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; | |
1984 | kvm_run->debug.arch.exception = BP_VECTOR; | |
1985 | return 0; | |
1986 | } | |
1987 | ||
63129754 | 1988 | static int ud_interception(struct kvm_vcpu *vcpu) |
7aa81cc0 | 1989 | { |
63129754 | 1990 | return handle_ud(vcpu); |
7aa81cc0 AL |
1991 | } |
1992 | ||
63129754 | 1993 | static int ac_interception(struct kvm_vcpu *vcpu) |
54a20552 | 1994 | { |
63129754 | 1995 | kvm_queue_exception_e(vcpu, AC_VECTOR, 0); |
54a20552 EN |
1996 | return 1; |
1997 | } | |
1998 | ||
67ec6607 JR |
1999 | static bool is_erratum_383(void) |
2000 | { | |
2001 | int err, i; | |
2002 | u64 value; | |
2003 | ||
2004 | if (!erratum_383_found) | |
2005 | return false; | |
2006 | ||
2007 | value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err); | |
2008 | if (err) | |
2009 | return false; | |
2010 | ||
2011 | /* Bit 62 may or may not be set for this mce */ | |
2012 | value &= ~(1ULL << 62); | |
2013 | ||
2014 | if (value != 0xb600000000010015ULL) | |
2015 | return false; | |
2016 | ||
2017 | /* Clear MCi_STATUS registers */ | |
2018 | for (i = 0; i < 6; ++i) | |
2019 | native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0); | |
2020 | ||
2021 | value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err); | |
2022 | if (!err) { | |
2023 | u32 low, high; | |
2024 | ||
2025 | value &= ~(1ULL << 2); | |
2026 | low = lower_32_bits(value); | |
2027 | high = upper_32_bits(value); | |
2028 | ||
2029 | native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high); | |
2030 | } | |
2031 | ||
2032 | /* Flush tlb to evict multi-match entries */ | |
2033 | __flush_tlb_all(); | |
2034 | ||
2035 | return true; | |
2036 | } | |
2037 | ||
63129754 | 2038 | static void svm_handle_mce(struct kvm_vcpu *vcpu) |
53371b50 | 2039 | { |
67ec6607 JR |
2040 | if (is_erratum_383()) { |
2041 | /* | |
2042 | * Erratum 383 triggered. Guest state is corrupt so kill the | |
2043 | * guest. | |
2044 | */ | |
2045 | pr_err("KVM: Guest triggered AMD Erratum 383\n"); | |
2046 | ||
63129754 | 2047 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); |
67ec6607 JR |
2048 | |
2049 | return; | |
2050 | } | |
2051 | ||
53371b50 JR |
2052 | /* |
2053 | * On an #MC intercept the MCE handler is not called automatically in | |
2054 | * the host. So do it by hand here. | |
2055 | */ | |
1c164cb3 | 2056 | kvm_machine_check(); |
fe5913e4 JR |
2057 | } |
2058 | ||
63129754 | 2059 | static int mc_interception(struct kvm_vcpu *vcpu) |
fe5913e4 | 2060 | { |
53371b50 JR |
2061 | return 1; |
2062 | } | |
2063 | ||
63129754 | 2064 | static int shutdown_interception(struct kvm_vcpu *vcpu) |
46fe4ddd | 2065 | { |
63129754 PB |
2066 | struct kvm_run *kvm_run = vcpu->run; |
2067 | struct vcpu_svm *svm = to_svm(vcpu); | |
851ba692 | 2068 | |
8164a5ff TL |
2069 | /* |
2070 | * The VM save area has already been encrypted so it | |
2071 | * cannot be reinitialized - just terminate. | |
2072 | */ | |
63129754 | 2073 | if (sev_es_guest(vcpu->kvm)) |
8164a5ff TL |
2074 | return -EINVAL; |
2075 | ||
46fe4ddd JR |
2076 | /* |
2077 | * VMCB is undefined after a SHUTDOWN intercept | |
2078 | * so reinitialize it. | |
2079 | */ | |
a2fa3e9f | 2080 | clear_page(svm->vmcb); |
63129754 | 2081 | init_vmcb(vcpu); |
46fe4ddd JR |
2082 | |
2083 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; | |
2084 | return 0; | |
2085 | } | |
2086 | ||
63129754 | 2087 | static int io_interception(struct kvm_vcpu *vcpu) |
6aa8b732 | 2088 | { |
63129754 | 2089 | struct vcpu_svm *svm = to_svm(vcpu); |
d77c26fc | 2090 | u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ |
dca7f128 | 2091 | int size, in, string; |
039576c0 | 2092 | unsigned port; |
6aa8b732 | 2093 | |
63129754 | 2094 | ++vcpu->stat.io_exits; |
e70669ab | 2095 | string = (io_info & SVM_IOIO_STR_MASK) != 0; |
039576c0 AK |
2096 | in = (io_info & SVM_IOIO_TYPE_MASK) != 0; |
2097 | port = io_info >> 16; | |
2098 | size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; | |
7ed9abfe TL |
2099 | |
2100 | if (string) { | |
2101 | if (sev_es_guest(vcpu->kvm)) | |
2102 | return sev_es_string_io(svm, size, port, in); | |
2103 | else | |
2104 | return kvm_emulate_instruction(vcpu, 0); | |
2105 | } | |
2106 | ||
cf8f70bf | 2107 | svm->next_rip = svm->vmcb->control.exit_info_2; |
cf8f70bf | 2108 | |
63129754 | 2109 | return kvm_fast_pio(vcpu, size, port, in); |
c47f098d JR |
2110 | } |
2111 | ||
63129754 | 2112 | static int nmi_interception(struct kvm_vcpu *vcpu) |
a0698055 | 2113 | { |
a0698055 JR |
2114 | return 1; |
2115 | } | |
2116 | ||
991afbbe ML |
2117 | static int smi_interception(struct kvm_vcpu *vcpu) |
2118 | { | |
2119 | return 1; | |
2120 | } | |
2121 | ||
63129754 | 2122 | static int intr_interception(struct kvm_vcpu *vcpu) |
6aa8b732 | 2123 | { |
63129754 | 2124 | ++vcpu->stat.irq_exits; |
6aa8b732 AK |
2125 | return 1; |
2126 | } | |
2127 | ||
2ac636a6 | 2128 | static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload) |
6aa8b732 | 2129 | { |
63129754 | 2130 | struct vcpu_svm *svm = to_svm(vcpu); |
9e8f0fbf | 2131 | struct vmcb *vmcb12; |
8c5fbf1a | 2132 | struct kvm_host_map map; |
b742c1e6 | 2133 | int ret; |
9966bf68 | 2134 | |
63129754 | 2135 | if (nested_svm_check_permissions(vcpu)) |
5542675b AG |
2136 | return 1; |
2137 | ||
63129754 | 2138 | ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); |
8c5fbf1a KA |
2139 | if (ret) { |
2140 | if (ret == -EINVAL) | |
63129754 | 2141 | kvm_inject_gp(vcpu, 0); |
9966bf68 | 2142 | return 1; |
8c5fbf1a KA |
2143 | } |
2144 | ||
9e8f0fbf | 2145 | vmcb12 = map.hva; |
9966bf68 | 2146 | |
63129754 | 2147 | ret = kvm_skip_emulated_instruction(vcpu); |
9966bf68 | 2148 | |
adc2a237 | 2149 | if (vmload) { |
2ac636a6 | 2150 | nested_svm_vmloadsave(vmcb12, svm->vmcb); |
adc2a237 ML |
2151 | svm->sysenter_eip_hi = 0; |
2152 | svm->sysenter_esp_hi = 0; | |
2153 | } else | |
2ac636a6 | 2154 | nested_svm_vmloadsave(svm->vmcb, vmcb12); |
e3e9ed3d | 2155 | |
63129754 | 2156 | kvm_vcpu_unmap(vcpu, &map, true); |
5542675b | 2157 | |
b742c1e6 | 2158 | return ret; |
5542675b AG |
2159 | } |
2160 | ||
2ac636a6 | 2161 | static int vmload_interception(struct kvm_vcpu *vcpu) |
5542675b | 2162 | { |
2ac636a6 SC |
2163 | return vmload_vmsave_interception(vcpu, true); |
2164 | } | |
5542675b | 2165 | |
2ac636a6 SC |
2166 | static int vmsave_interception(struct kvm_vcpu *vcpu) |
2167 | { | |
2168 | return vmload_vmsave_interception(vcpu, false); | |
5542675b AG |
2169 | } |
2170 | ||
63129754 | 2171 | static int vmrun_interception(struct kvm_vcpu *vcpu) |
3d6368ef | 2172 | { |
63129754 | 2173 | if (nested_svm_check_permissions(vcpu)) |
3d6368ef AG |
2174 | return 1; |
2175 | ||
63129754 | 2176 | return nested_svm_vmrun(vcpu); |
3d6368ef AG |
2177 | } |
2178 | ||
82a11e9c BD |
2179 | enum { |
2180 | NONE_SVM_INSTR, | |
2181 | SVM_INSTR_VMRUN, | |
2182 | SVM_INSTR_VMLOAD, | |
2183 | SVM_INSTR_VMSAVE, | |
2184 | }; | |
2185 | ||
2186 | /* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */ | |
2187 | static int svm_instr_opcode(struct kvm_vcpu *vcpu) | |
2188 | { | |
2189 | struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; | |
2190 | ||
2191 | if (ctxt->b != 0x1 || ctxt->opcode_len != 2) | |
2192 | return NONE_SVM_INSTR; | |
2193 | ||
2194 | switch (ctxt->modrm) { | |
2195 | case 0xd8: /* VMRUN */ | |
2196 | return SVM_INSTR_VMRUN; | |
2197 | case 0xda: /* VMLOAD */ | |
2198 | return SVM_INSTR_VMLOAD; | |
2199 | case 0xdb: /* VMSAVE */ | |
2200 | return SVM_INSTR_VMSAVE; | |
2201 | default: | |
2202 | break; | |
2203 | } | |
2204 | ||
2205 | return NONE_SVM_INSTR; | |
2206 | } | |
2207 | ||
2208 | static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode) | |
2209 | { | |
14c2bf81 WH |
2210 | const int guest_mode_exit_codes[] = { |
2211 | [SVM_INSTR_VMRUN] = SVM_EXIT_VMRUN, | |
2212 | [SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD, | |
2213 | [SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE, | |
2214 | }; | |
63129754 | 2215 | int (*const svm_instr_handlers[])(struct kvm_vcpu *vcpu) = { |
82a11e9c BD |
2216 | [SVM_INSTR_VMRUN] = vmrun_interception, |
2217 | [SVM_INSTR_VMLOAD] = vmload_interception, | |
2218 | [SVM_INSTR_VMSAVE] = vmsave_interception, | |
2219 | }; | |
2220 | struct vcpu_svm *svm = to_svm(vcpu); | |
2df8d380 | 2221 | int ret; |
82a11e9c | 2222 | |
14c2bf81 | 2223 | if (is_guest_mode(vcpu)) { |
2df8d380 | 2224 | /* Returns '1' or -errno on failure, '0' on success. */ |
3a87c7e0 | 2225 | ret = nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]); |
2df8d380 SC |
2226 | if (ret) |
2227 | return ret; | |
2228 | return 1; | |
2229 | } | |
63129754 | 2230 | return svm_instr_handlers[opcode](vcpu); |
82a11e9c BD |
2231 | } |
2232 | ||
2233 | /* | |
2234 | * #GP handling code. Note that #GP can be triggered under the following two | |
2235 | * cases: | |
2236 | * 1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on | |
2237 | * some AMD CPUs when EAX of these instructions are in the reserved memory | |
2238 | * regions (e.g. SMM memory on host). | |
2239 | * 2) VMware backdoor | |
2240 | */ | |
63129754 | 2241 | static int gp_interception(struct kvm_vcpu *vcpu) |
82a11e9c | 2242 | { |
63129754 | 2243 | struct vcpu_svm *svm = to_svm(vcpu); |
82a11e9c BD |
2244 | u32 error_code = svm->vmcb->control.exit_info_1; |
2245 | int opcode; | |
2246 | ||
2247 | /* Both #GP cases have zero error_code */ | |
2248 | if (error_code) | |
2249 | goto reinject; | |
2250 | ||
2251 | /* Decode the instruction for usage later */ | |
2252 | if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK) | |
2253 | goto reinject; | |
2254 | ||
2255 | opcode = svm_instr_opcode(vcpu); | |
2256 | ||
2257 | if (opcode == NONE_SVM_INSTR) { | |
2258 | if (!enable_vmware_backdoor) | |
2259 | goto reinject; | |
2260 | ||
2261 | /* | |
2262 | * VMware backdoor emulation on #GP interception only handles | |
2263 | * IN{S}, OUT{S}, and RDPMC. | |
2264 | */ | |
14c2bf81 WH |
2265 | if (!is_guest_mode(vcpu)) |
2266 | return kvm_emulate_instruction(vcpu, | |
82a11e9c BD |
2267 | EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE); |
2268 | } else | |
2269 | return emulate_svm_instr(vcpu, opcode); | |
2270 | ||
2271 | reinject: | |
2272 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); | |
2273 | return 1; | |
2274 | } | |
2275 | ||
ffdf7f9e PB |
2276 | void svm_set_gif(struct vcpu_svm *svm, bool value) |
2277 | { | |
2278 | if (value) { | |
2279 | /* | |
2280 | * If VGIF is enabled, the STGI intercept is only added to | |
2281 | * detect the opening of the SMI/NMI window; remove it now. | |
2282 | * Likewise, clear the VINTR intercept, we will set it | |
2283 | * again while processing KVM_REQ_EVENT if needed. | |
2284 | */ | |
2285 | if (vgif_enabled(svm)) | |
a284ba56 JR |
2286 | svm_clr_intercept(svm, INTERCEPT_STGI); |
2287 | if (svm_is_intercept(svm, INTERCEPT_VINTR)) | |
ffdf7f9e PB |
2288 | svm_clear_vintr(svm); |
2289 | ||
2290 | enable_gif(svm); | |
2291 | if (svm->vcpu.arch.smi_pending || | |
2292 | svm->vcpu.arch.nmi_pending || | |
2293 | kvm_cpu_has_injectable_intr(&svm->vcpu)) | |
2294 | kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); | |
2295 | } else { | |
2296 | disable_gif(svm); | |
2297 | ||
2298 | /* | |
2299 | * After a CLGI no interrupts should come. But if vGIF is | |
2300 | * in use, we still rely on the VINTR intercept (rather than | |
2301 | * STGI) to detect an open interrupt window. | |
2302 | */ | |
2303 | if (!vgif_enabled(svm)) | |
2304 | svm_clear_vintr(svm); | |
2305 | } | |
2306 | } | |
2307 | ||
63129754 | 2308 | static int stgi_interception(struct kvm_vcpu *vcpu) |
1371d904 | 2309 | { |
b742c1e6 LP |
2310 | int ret; |
2311 | ||
63129754 | 2312 | if (nested_svm_check_permissions(vcpu)) |
1371d904 AG |
2313 | return 1; |
2314 | ||
63129754 PB |
2315 | ret = kvm_skip_emulated_instruction(vcpu); |
2316 | svm_set_gif(to_svm(vcpu), true); | |
b742c1e6 | 2317 | return ret; |
1371d904 AG |
2318 | } |
2319 | ||
63129754 | 2320 | static int clgi_interception(struct kvm_vcpu *vcpu) |
1371d904 | 2321 | { |
b742c1e6 LP |
2322 | int ret; |
2323 | ||
63129754 | 2324 | if (nested_svm_check_permissions(vcpu)) |
1371d904 AG |
2325 | return 1; |
2326 | ||
63129754 PB |
2327 | ret = kvm_skip_emulated_instruction(vcpu); |
2328 | svm_set_gif(to_svm(vcpu), false); | |
b742c1e6 | 2329 | return ret; |
1371d904 AG |
2330 | } |
2331 | ||
63129754 | 2332 | static int invlpga_interception(struct kvm_vcpu *vcpu) |
ff092385 | 2333 | { |
bc9eff67 SC |
2334 | gva_t gva = kvm_rax_read(vcpu); |
2335 | u32 asid = kvm_rcx_read(vcpu); | |
ff092385 | 2336 | |
bc9eff67 SC |
2337 | /* FIXME: Handle an address size prefix. */ |
2338 | if (!is_long_mode(vcpu)) | |
2339 | gva = (u32)gva; | |
ff092385 | 2340 | |
bc9eff67 | 2341 | trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva); |
532a46b9 | 2342 | |
ff092385 | 2343 | /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ |
bc9eff67 | 2344 | kvm_mmu_invlpg(vcpu, gva); |
532a46b9 | 2345 | |
63129754 | 2346 | return kvm_skip_emulated_instruction(vcpu); |
dab429a7 DK |
2347 | } |
2348 | ||
63129754 | 2349 | static int skinit_interception(struct kvm_vcpu *vcpu) |
81dd35d4 | 2350 | { |
63129754 | 2351 | trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu)); |
81dd35d4 | 2352 | |
63129754 | 2353 | kvm_queue_exception(vcpu, UD_VECTOR); |
0cb8410b JM |
2354 | return 1; |
2355 | } | |
2356 | ||
63129754 | 2357 | static int task_switch_interception(struct kvm_vcpu *vcpu) |
6aa8b732 | 2358 | { |
63129754 | 2359 | struct vcpu_svm *svm = to_svm(vcpu); |
37817f29 | 2360 | u16 tss_selector; |
64a7ec06 GN |
2361 | int reason; |
2362 | int int_type = svm->vmcb->control.exit_int_info & | |
2363 | SVM_EXITINTINFO_TYPE_MASK; | |
8317c298 | 2364 | int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; |
fe8e7f83 GN |
2365 | uint32_t type = |
2366 | svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; | |
2367 | uint32_t idt_v = | |
2368 | svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; | |
e269fb21 JK |
2369 | bool has_error_code = false; |
2370 | u32 error_code = 0; | |
37817f29 IE |
2371 | |
2372 | tss_selector = (u16)svm->vmcb->control.exit_info_1; | |
64a7ec06 | 2373 | |
37817f29 IE |
2374 | if (svm->vmcb->control.exit_info_2 & |
2375 | (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET)) | |
64a7ec06 GN |
2376 | reason = TASK_SWITCH_IRET; |
2377 | else if (svm->vmcb->control.exit_info_2 & | |
2378 | (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP)) | |
2379 | reason = TASK_SWITCH_JMP; | |
fe8e7f83 | 2380 | else if (idt_v) |
64a7ec06 GN |
2381 | reason = TASK_SWITCH_GATE; |
2382 | else | |
2383 | reason = TASK_SWITCH_CALL; | |
2384 | ||
fe8e7f83 GN |
2385 | if (reason == TASK_SWITCH_GATE) { |
2386 | switch (type) { | |
2387 | case SVM_EXITINTINFO_TYPE_NMI: | |
63129754 | 2388 | vcpu->arch.nmi_injected = false; |
fe8e7f83 GN |
2389 | break; |
2390 | case SVM_EXITINTINFO_TYPE_EXEPT: | |
e269fb21 JK |
2391 | if (svm->vmcb->control.exit_info_2 & |
2392 | (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) { | |
2393 | has_error_code = true; | |
2394 | error_code = | |
2395 | (u32)svm->vmcb->control.exit_info_2; | |
2396 | } | |
63129754 | 2397 | kvm_clear_exception_queue(vcpu); |
fe8e7f83 GN |
2398 | break; |
2399 | case SVM_EXITINTINFO_TYPE_INTR: | |
63129754 | 2400 | kvm_clear_interrupt_queue(vcpu); |
fe8e7f83 GN |
2401 | break; |
2402 | default: | |
2403 | break; | |
2404 | } | |
2405 | } | |
64a7ec06 | 2406 | |
8317c298 GN |
2407 | if (reason != TASK_SWITCH_GATE || |
2408 | int_type == SVM_EXITINTINFO_TYPE_SOFT || | |
2409 | (int_type == SVM_EXITINTINFO_TYPE_EXEPT && | |
f8ea7c60 | 2410 | (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) { |
63129754 | 2411 | if (!skip_emulated_instruction(vcpu)) |
738fece4 | 2412 | return 0; |
f8ea7c60 | 2413 | } |
64a7ec06 | 2414 | |
7f3d35fd KW |
2415 | if (int_type != SVM_EXITINTINFO_TYPE_SOFT) |
2416 | int_vec = -1; | |
2417 | ||
63129754 | 2418 | return kvm_task_switch(vcpu, tss_selector, int_vec, reason, |
60fc3d02 | 2419 | has_error_code, error_code); |
6aa8b732 AK |
2420 | } |
2421 | ||
63129754 | 2422 | static int iret_interception(struct kvm_vcpu *vcpu) |
6aa8b732 | 2423 | { |
63129754 | 2424 | struct vcpu_svm *svm = to_svm(vcpu); |
6aa8b732 | 2425 | |
63129754 PB |
2426 | ++vcpu->stat.nmi_window_exits; |
2427 | vcpu->arch.hflags |= HF_IRET_MASK; | |
2428 | if (!sev_es_guest(vcpu->kvm)) { | |
4444dfe4 | 2429 | svm_clr_intercept(svm, INTERCEPT_IRET); |
63129754 | 2430 | svm->nmi_iret_rip = kvm_rip_read(vcpu); |
4444dfe4 | 2431 | } |
63129754 | 2432 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
95ba8273 GN |
2433 | return 1; |
2434 | } | |
2435 | ||
63129754 | 2436 | static int invlpg_interception(struct kvm_vcpu *vcpu) |
a7052897 | 2437 | { |
df4f3108 | 2438 | if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) |
63129754 | 2439 | return kvm_emulate_instruction(vcpu, 0); |
df4f3108 | 2440 | |
63129754 PB |
2441 | kvm_mmu_invlpg(vcpu, to_svm(vcpu)->vmcb->control.exit_info_1); |
2442 | return kvm_skip_emulated_instruction(vcpu); | |
a7052897 MT |
2443 | } |
2444 | ||
63129754 | 2445 | static int emulate_on_interception(struct kvm_vcpu *vcpu) |
6aa8b732 | 2446 | { |
63129754 | 2447 | return kvm_emulate_instruction(vcpu, 0); |
6aa8b732 AK |
2448 | } |
2449 | ||
63129754 | 2450 | static int rsm_interception(struct kvm_vcpu *vcpu) |
7607b717 | 2451 | { |
63129754 | 2452 | return kvm_emulate_instruction_from_buffer(vcpu, rsm_ins_bytes, 2); |
7607b717 BS |
2453 | } |
2454 | ||
63129754 | 2455 | static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu, |
52eb5a6d | 2456 | unsigned long val) |
628afd2a | 2457 | { |
63129754 PB |
2458 | struct vcpu_svm *svm = to_svm(vcpu); |
2459 | unsigned long cr0 = vcpu->arch.cr0; | |
628afd2a | 2460 | bool ret = false; |
628afd2a | 2461 | |
63129754 | 2462 | if (!is_guest_mode(vcpu) || |
c62e2e94 | 2463 | (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))) |
628afd2a JR |
2464 | return false; |
2465 | ||
2466 | cr0 &= ~SVM_CR0_SELECTIVE_MASK; | |
2467 | val &= ~SVM_CR0_SELECTIVE_MASK; | |
2468 | ||
2469 | if (cr0 ^ val) { | |
2470 | svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; | |
2471 | ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); | |
2472 | } | |
2473 | ||
2474 | return ret; | |
2475 | } | |
2476 | ||
7ff76d58 AP |
2477 | #define CR_VALID (1ULL << 63) |
2478 | ||
63129754 | 2479 | static int cr_interception(struct kvm_vcpu *vcpu) |
7ff76d58 | 2480 | { |
63129754 | 2481 | struct vcpu_svm *svm = to_svm(vcpu); |
7ff76d58 AP |
2482 | int reg, cr; |
2483 | unsigned long val; | |
2484 | int err; | |
2485 | ||
2486 | if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) | |
63129754 | 2487 | return emulate_on_interception(vcpu); |
7ff76d58 AP |
2488 | |
2489 | if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) | |
63129754 | 2490 | return emulate_on_interception(vcpu); |
7ff76d58 AP |
2491 | |
2492 | reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; | |
5e57518d DK |
2493 | if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) |
2494 | cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0; | |
2495 | else | |
2496 | cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; | |
7ff76d58 AP |
2497 | |
2498 | err = 0; | |
2499 | if (cr >= 16) { /* mov to cr */ | |
2500 | cr -= 16; | |
27b4a9c4 | 2501 | val = kvm_register_read(vcpu, reg); |
95b28ac9 | 2502 | trace_kvm_cr_write(cr, val); |
7ff76d58 AP |
2503 | switch (cr) { |
2504 | case 0: | |
63129754 PB |
2505 | if (!check_selective_cr0_intercepted(vcpu, val)) |
2506 | err = kvm_set_cr0(vcpu, val); | |
977b2d03 JR |
2507 | else |
2508 | return 1; | |
2509 | ||
7ff76d58 AP |
2510 | break; |
2511 | case 3: | |
63129754 | 2512 | err = kvm_set_cr3(vcpu, val); |
7ff76d58 AP |
2513 | break; |
2514 | case 4: | |
63129754 | 2515 | err = kvm_set_cr4(vcpu, val); |
7ff76d58 AP |
2516 | break; |
2517 | case 8: | |
63129754 | 2518 | err = kvm_set_cr8(vcpu, val); |
7ff76d58 AP |
2519 | break; |
2520 | default: | |
2521 | WARN(1, "unhandled write to CR%d", cr); | |
63129754 | 2522 | kvm_queue_exception(vcpu, UD_VECTOR); |
7ff76d58 AP |
2523 | return 1; |
2524 | } | |
2525 | } else { /* mov from cr */ | |
2526 | switch (cr) { | |
2527 | case 0: | |
63129754 | 2528 | val = kvm_read_cr0(vcpu); |
7ff76d58 AP |
2529 | break; |
2530 | case 2: | |
63129754 | 2531 | val = vcpu->arch.cr2; |
7ff76d58 AP |
2532 | break; |
2533 | case 3: | |
63129754 | 2534 | val = kvm_read_cr3(vcpu); |
7ff76d58 AP |
2535 | break; |
2536 | case 4: | |
63129754 | 2537 | val = kvm_read_cr4(vcpu); |
7ff76d58 AP |
2538 | break; |
2539 | case 8: | |
63129754 | 2540 | val = kvm_get_cr8(vcpu); |
7ff76d58 AP |
2541 | break; |
2542 | default: | |
2543 | WARN(1, "unhandled read from CR%d", cr); | |
63129754 | 2544 | kvm_queue_exception(vcpu, UD_VECTOR); |
7ff76d58 AP |
2545 | return 1; |
2546 | } | |
27b4a9c4 | 2547 | kvm_register_write(vcpu, reg, val); |
95b28ac9 | 2548 | trace_kvm_cr_read(cr, val); |
7ff76d58 | 2549 | } |
63129754 | 2550 | return kvm_complete_insn_gp(vcpu, err); |
7ff76d58 AP |
2551 | } |
2552 | ||
63129754 | 2553 | static int cr_trap(struct kvm_vcpu *vcpu) |
f27ad38a | 2554 | { |
63129754 | 2555 | struct vcpu_svm *svm = to_svm(vcpu); |
f27ad38a TL |
2556 | unsigned long old_value, new_value; |
2557 | unsigned int cr; | |
d1949b93 | 2558 | int ret = 0; |
f27ad38a TL |
2559 | |
2560 | new_value = (unsigned long)svm->vmcb->control.exit_info_1; | |
2561 | ||
2562 | cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP; | |
2563 | switch (cr) { | |
2564 | case 0: | |
2565 | old_value = kvm_read_cr0(vcpu); | |
2566 | svm_set_cr0(vcpu, new_value); | |
2567 | ||
2568 | kvm_post_set_cr0(vcpu, old_value, new_value); | |
2569 | break; | |
5b51cb13 TL |
2570 | case 4: |
2571 | old_value = kvm_read_cr4(vcpu); | |
2572 | svm_set_cr4(vcpu, new_value); | |
2573 | ||
2574 | kvm_post_set_cr4(vcpu, old_value, new_value); | |
2575 | break; | |
d1949b93 | 2576 | case 8: |
63129754 | 2577 | ret = kvm_set_cr8(vcpu, new_value); |
d1949b93 | 2578 | break; |
f27ad38a TL |
2579 | default: |
2580 | WARN(1, "unhandled CR%d write trap", cr); | |
2581 | kvm_queue_exception(vcpu, UD_VECTOR); | |
2582 | return 1; | |
2583 | } | |
2584 | ||
d1949b93 | 2585 | return kvm_complete_insn_gp(vcpu, ret); |
f27ad38a TL |
2586 | } |
2587 | ||
63129754 | 2588 | static int dr_interception(struct kvm_vcpu *vcpu) |
cae3797a | 2589 | { |
63129754 | 2590 | struct vcpu_svm *svm = to_svm(vcpu); |
cae3797a AP |
2591 | int reg, dr; |
2592 | unsigned long val; | |
996ff542 | 2593 | int err = 0; |
cae3797a | 2594 | |
63129754 | 2595 | if (vcpu->guest_debug == 0) { |
facb0139 PB |
2596 | /* |
2597 | * No more DR vmexits; force a reload of the debug registers | |
2598 | * and reenter on this instruction. The next vmexit will | |
2599 | * retrieve the full state of the debug registers. | |
2600 | */ | |
2601 | clr_dr_intercepts(svm); | |
63129754 | 2602 | vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; |
facb0139 PB |
2603 | return 1; |
2604 | } | |
2605 | ||
cae3797a | 2606 | if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS)) |
63129754 | 2607 | return emulate_on_interception(vcpu); |
cae3797a AP |
2608 | |
2609 | reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; | |
2610 | dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; | |
996ff542 PB |
2611 | if (dr >= 16) { /* mov to DRn */ |
2612 | dr -= 16; | |
27b4a9c4 | 2613 | val = kvm_register_read(vcpu, reg); |
63129754 | 2614 | err = kvm_set_dr(vcpu, dr, val); |
cae3797a | 2615 | } else { |
63129754 | 2616 | kvm_get_dr(vcpu, dr, &val); |
27b4a9c4 | 2617 | kvm_register_write(vcpu, reg, val); |
cae3797a AP |
2618 | } |
2619 | ||
63129754 | 2620 | return kvm_complete_insn_gp(vcpu, err); |
cae3797a AP |
2621 | } |
2622 | ||
63129754 | 2623 | static int cr8_write_interception(struct kvm_vcpu *vcpu) |
1d075434 | 2624 | { |
eea1cff9 | 2625 | int r; |
851ba692 | 2626 | |
63129754 | 2627 | u8 cr8_prev = kvm_get_cr8(vcpu); |
0a5fff19 | 2628 | /* instruction emulation calls kvm_set_cr8() */ |
63129754 PB |
2629 | r = cr_interception(vcpu); |
2630 | if (lapic_in_kernel(vcpu)) | |
7ff76d58 | 2631 | return r; |
63129754 | 2632 | if (cr8_prev <= kvm_get_cr8(vcpu)) |
7ff76d58 | 2633 | return r; |
63129754 | 2634 | vcpu->run->exit_reason = KVM_EXIT_SET_TPR; |
1d075434 JR |
2635 | return 0; |
2636 | } | |
2637 | ||
63129754 | 2638 | static int efer_trap(struct kvm_vcpu *vcpu) |
2985afbc TL |
2639 | { |
2640 | struct msr_data msr_info; | |
2641 | int ret; | |
2642 | ||
2643 | /* | |
2644 | * Clear the EFER_SVME bit from EFER. The SVM code always sets this | |
2645 | * bit in svm_set_efer(), but __kvm_valid_efer() checks it against | |
2646 | * whether the guest has X86_FEATURE_SVM - this avoids a failure if | |
2647 | * the guest doesn't have X86_FEATURE_SVM. | |
2648 | */ | |
2649 | msr_info.host_initiated = false; | |
2650 | msr_info.index = MSR_EFER; | |
63129754 PB |
2651 | msr_info.data = to_svm(vcpu)->vmcb->control.exit_info_1 & ~EFER_SVME; |
2652 | ret = kvm_set_msr_common(vcpu, &msr_info); | |
2985afbc | 2653 | |
63129754 | 2654 | return kvm_complete_insn_gp(vcpu, ret); |
2985afbc TL |
2655 | } |
2656 | ||
801e459a TL |
2657 | static int svm_get_msr_feature(struct kvm_msr_entry *msr) |
2658 | { | |
d1d93fa9 TL |
2659 | msr->data = 0; |
2660 | ||
2661 | switch (msr->index) { | |
2662 | case MSR_F10H_DECFG: | |
2663 | if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) | |
2664 | msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE; | |
2665 | break; | |
d574c539 VK |
2666 | case MSR_IA32_PERF_CAPABILITIES: |
2667 | return 0; | |
d1d93fa9 | 2668 | default: |
12bc2132 | 2669 | return KVM_MSR_RET_INVALID; |
d1d93fa9 TL |
2670 | } |
2671 | ||
2672 | return 0; | |
801e459a TL |
2673 | } |
2674 | ||
609e36d3 | 2675 | static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
6aa8b732 | 2676 | { |
a2fa3e9f GH |
2677 | struct vcpu_svm *svm = to_svm(vcpu); |
2678 | ||
609e36d3 | 2679 | switch (msr_info->index) { |
8c06585d | 2680 | case MSR_STAR: |
cc3ed80a | 2681 | msr_info->data = svm->vmcb01.ptr->save.star; |
6aa8b732 | 2682 | break; |
0e859cac | 2683 | #ifdef CONFIG_X86_64 |
6aa8b732 | 2684 | case MSR_LSTAR: |
cc3ed80a | 2685 | msr_info->data = svm->vmcb01.ptr->save.lstar; |
6aa8b732 AK |
2686 | break; |
2687 | case MSR_CSTAR: | |
cc3ed80a | 2688 | msr_info->data = svm->vmcb01.ptr->save.cstar; |
6aa8b732 AK |
2689 | break; |
2690 | case MSR_KERNEL_GS_BASE: | |
cc3ed80a | 2691 | msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base; |
6aa8b732 AK |
2692 | break; |
2693 | case MSR_SYSCALL_MASK: | |
cc3ed80a | 2694 | msr_info->data = svm->vmcb01.ptr->save.sfmask; |
6aa8b732 AK |
2695 | break; |
2696 | #endif | |
2697 | case MSR_IA32_SYSENTER_CS: | |
cc3ed80a | 2698 | msr_info->data = svm->vmcb01.ptr->save.sysenter_cs; |
6aa8b732 AK |
2699 | break; |
2700 | case MSR_IA32_SYSENTER_EIP: | |
adc2a237 ML |
2701 | msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip; |
2702 | if (guest_cpuid_is_intel(vcpu)) | |
2703 | msr_info->data |= (u64)svm->sysenter_eip_hi << 32; | |
6aa8b732 AK |
2704 | break; |
2705 | case MSR_IA32_SYSENTER_ESP: | |
adc2a237 ML |
2706 | msr_info->data = svm->vmcb01.ptr->save.sysenter_esp; |
2707 | if (guest_cpuid_is_intel(vcpu)) | |
2708 | msr_info->data |= (u64)svm->sysenter_esp_hi << 32; | |
6aa8b732 | 2709 | break; |
46896c73 | 2710 | case MSR_TSC_AUX: |
46896c73 PB |
2711 | msr_info->data = svm->tsc_aux; |
2712 | break; | |
e0231715 JR |
2713 | /* |
2714 | * Nobody will change the following 5 values in the VMCB so we can | |
2715 | * safely return them on rdmsr. They will always be 0 until LBRV is | |
2716 | * implemented. | |
2717 | */ | |
a2938c80 | 2718 | case MSR_IA32_DEBUGCTLMSR: |
609e36d3 | 2719 | msr_info->data = svm->vmcb->save.dbgctl; |
a2938c80 JR |
2720 | break; |
2721 | case MSR_IA32_LASTBRANCHFROMIP: | |
609e36d3 | 2722 | msr_info->data = svm->vmcb->save.br_from; |
a2938c80 JR |
2723 | break; |
2724 | case MSR_IA32_LASTBRANCHTOIP: | |
609e36d3 | 2725 | msr_info->data = svm->vmcb->save.br_to; |
a2938c80 JR |
2726 | break; |
2727 | case MSR_IA32_LASTINTFROMIP: | |
609e36d3 | 2728 | msr_info->data = svm->vmcb->save.last_excp_from; |
a2938c80 JR |
2729 | break; |
2730 | case MSR_IA32_LASTINTTOIP: | |
609e36d3 | 2731 | msr_info->data = svm->vmcb->save.last_excp_to; |
a2938c80 | 2732 | break; |
b286d5d8 | 2733 | case MSR_VM_HSAVE_PA: |
609e36d3 | 2734 | msr_info->data = svm->nested.hsave_msr; |
b286d5d8 | 2735 | break; |
eb6f302e | 2736 | case MSR_VM_CR: |
609e36d3 | 2737 | msr_info->data = svm->nested.vm_cr_msr; |
eb6f302e | 2738 | break; |
b2ac58f9 KA |
2739 | case MSR_IA32_SPEC_CTRL: |
2740 | if (!msr_info->host_initiated && | |
39485ed9 | 2741 | !guest_has_spec_ctrl_msr(vcpu)) |
b2ac58f9 KA |
2742 | return 1; |
2743 | ||
d00b99c5 BM |
2744 | if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) |
2745 | msr_info->data = svm->vmcb->save.spec_ctrl; | |
2746 | else | |
2747 | msr_info->data = svm->spec_ctrl; | |
b2ac58f9 | 2748 | break; |
bc226f07 TL |
2749 | case MSR_AMD64_VIRT_SPEC_CTRL: |
2750 | if (!msr_info->host_initiated && | |
2751 | !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) | |
2752 | return 1; | |
2753 | ||
2754 | msr_info->data = svm->virt_spec_ctrl; | |
2755 | break; | |
ae8b7875 BP |
2756 | case MSR_F15H_IC_CFG: { |
2757 | ||
2758 | int family, model; | |
2759 | ||
2760 | family = guest_cpuid_family(vcpu); | |
2761 | model = guest_cpuid_model(vcpu); | |
2762 | ||
2763 | if (family < 0 || model < 0) | |
2764 | return kvm_get_msr_common(vcpu, msr_info); | |
2765 | ||
2766 | msr_info->data = 0; | |
2767 | ||
2768 | if (family == 0x15 && | |
2769 | (model >= 0x2 && model < 0x20)) | |
2770 | msr_info->data = 0x1E; | |
2771 | } | |
2772 | break; | |
d1d93fa9 TL |
2773 | case MSR_F10H_DECFG: |
2774 | msr_info->data = svm->msr_decfg; | |
2775 | break; | |
6aa8b732 | 2776 | default: |
609e36d3 | 2777 | return kvm_get_msr_common(vcpu, msr_info); |
6aa8b732 AK |
2778 | } |
2779 | return 0; | |
2780 | } | |
2781 | ||
f1c6366e TL |
2782 | static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) |
2783 | { | |
2784 | struct vcpu_svm *svm = to_svm(vcpu); | |
a3ba26ec | 2785 | if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->ghcb)) |
63129754 | 2786 | return kvm_complete_insn_gp(vcpu, err); |
f1c6366e TL |
2787 | |
2788 | ghcb_set_sw_exit_info_1(svm->ghcb, 1); | |
2789 | ghcb_set_sw_exit_info_2(svm->ghcb, | |
2790 | X86_TRAP_GP | | |
2791 | SVM_EVTINJ_TYPE_EXEPT | | |
2792 | SVM_EVTINJ_VALID); | |
2793 | return 1; | |
2794 | } | |
2795 | ||
4a810181 JR |
2796 | static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) |
2797 | { | |
2798 | struct vcpu_svm *svm = to_svm(vcpu); | |
2799 | int svm_dis, chg_mask; | |
2800 | ||
2801 | if (data & ~SVM_VM_CR_VALID_MASK) | |
2802 | return 1; | |
2803 | ||
2804 | chg_mask = SVM_VM_CR_VALID_MASK; | |
2805 | ||
2806 | if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) | |
2807 | chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK); | |
2808 | ||
2809 | svm->nested.vm_cr_msr &= ~chg_mask; | |
2810 | svm->nested.vm_cr_msr |= (data & chg_mask); | |
2811 | ||
2812 | svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; | |
2813 | ||
2814 | /* check for svm_disable while efer.svme is set */ | |
2815 | if (svm_dis && (vcpu->arch.efer & EFER_SVME)) | |
2816 | return 1; | |
2817 | ||
2818 | return 0; | |
2819 | } | |
2820 | ||
8fe8ab46 | 2821 | static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) |
6aa8b732 | 2822 | { |
a2fa3e9f | 2823 | struct vcpu_svm *svm = to_svm(vcpu); |
844d69c2 | 2824 | int r; |
a2fa3e9f | 2825 | |
8fe8ab46 WA |
2826 | u32 ecx = msr->index; |
2827 | u64 data = msr->data; | |
6aa8b732 | 2828 | switch (ecx) { |
15038e14 PB |
2829 | case MSR_IA32_CR_PAT: |
2830 | if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) | |
2831 | return 1; | |
2832 | vcpu->arch.pat = data; | |
4995a368 CA |
2833 | svm->vmcb01.ptr->save.g_pat = data; |
2834 | if (is_guest_mode(vcpu)) | |
2835 | nested_vmcb02_compute_g_pat(svm); | |
06e7852c | 2836 | vmcb_mark_dirty(svm->vmcb, VMCB_NPT); |
15038e14 | 2837 | break; |
b2ac58f9 KA |
2838 | case MSR_IA32_SPEC_CTRL: |
2839 | if (!msr->host_initiated && | |
39485ed9 | 2840 | !guest_has_spec_ctrl_msr(vcpu)) |
b2ac58f9 KA |
2841 | return 1; |
2842 | ||
841c2be0 | 2843 | if (kvm_spec_ctrl_test_value(data)) |
b2ac58f9 KA |
2844 | return 1; |
2845 | ||
d00b99c5 BM |
2846 | if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) |
2847 | svm->vmcb->save.spec_ctrl = data; | |
2848 | else | |
2849 | svm->spec_ctrl = data; | |
b2ac58f9 KA |
2850 | if (!data) |
2851 | break; | |
2852 | ||
2853 | /* | |
2854 | * For non-nested: | |
2855 | * When it's written (to non-zero) for the first time, pass | |
2856 | * it through. | |
2857 | * | |
2858 | * For nested: | |
2859 | * The handling of the MSR bitmap for L2 guests is done in | |
2860 | * nested_svm_vmrun_msrpm. | |
2861 | * We update the L1 MSR bit as well since it will end up | |
2862 | * touching the MSR anyway now. | |
2863 | */ | |
476c9bd8 | 2864 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); |
b2ac58f9 | 2865 | break; |
15d45071 AR |
2866 | case MSR_IA32_PRED_CMD: |
2867 | if (!msr->host_initiated && | |
39485ed9 | 2868 | !guest_has_pred_cmd_msr(vcpu)) |
15d45071 AR |
2869 | return 1; |
2870 | ||
2871 | if (data & ~PRED_CMD_IBPB) | |
2872 | return 1; | |
39485ed9 | 2873 | if (!boot_cpu_has(X86_FEATURE_IBPB)) |
6441fa61 | 2874 | return 1; |
15d45071 AR |
2875 | if (!data) |
2876 | break; | |
2877 | ||
2878 | wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); | |
476c9bd8 | 2879 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); |
15d45071 | 2880 | break; |
bc226f07 TL |
2881 | case MSR_AMD64_VIRT_SPEC_CTRL: |
2882 | if (!msr->host_initiated && | |
2883 | !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) | |
2884 | return 1; | |
2885 | ||
2886 | if (data & ~SPEC_CTRL_SSBD) | |
2887 | return 1; | |
2888 | ||
2889 | svm->virt_spec_ctrl = data; | |
2890 | break; | |
8c06585d | 2891 | case MSR_STAR: |
cc3ed80a | 2892 | svm->vmcb01.ptr->save.star = data; |
6aa8b732 | 2893 | break; |
49b14f24 | 2894 | #ifdef CONFIG_X86_64 |
6aa8b732 | 2895 | case MSR_LSTAR: |
cc3ed80a | 2896 | svm->vmcb01.ptr->save.lstar = data; |
6aa8b732 AK |
2897 | break; |
2898 | case MSR_CSTAR: | |
cc3ed80a | 2899 | svm->vmcb01.ptr->save.cstar = data; |
6aa8b732 AK |
2900 | break; |
2901 | case MSR_KERNEL_GS_BASE: | |
cc3ed80a | 2902 | svm->vmcb01.ptr->save.kernel_gs_base = data; |
6aa8b732 AK |
2903 | break; |
2904 | case MSR_SYSCALL_MASK: | |
cc3ed80a | 2905 | svm->vmcb01.ptr->save.sfmask = data; |
6aa8b732 AK |
2906 | break; |
2907 | #endif | |
2908 | case MSR_IA32_SYSENTER_CS: | |
cc3ed80a | 2909 | svm->vmcb01.ptr->save.sysenter_cs = data; |
6aa8b732 AK |
2910 | break; |
2911 | case MSR_IA32_SYSENTER_EIP: | |
adc2a237 ML |
2912 | svm->vmcb01.ptr->save.sysenter_eip = (u32)data; |
2913 | /* | |
2914 | * We only intercept the MSR_IA32_SYSENTER_{EIP|ESP} msrs | |
2915 | * when we spoof an Intel vendor ID (for cross vendor migration). | |
2916 | * In this case we use this intercept to track the high | |
2917 | * 32 bit part of these msrs to support Intel's | |
2918 | * implementation of SYSENTER/SYSEXIT. | |
2919 | */ | |
2920 | svm->sysenter_eip_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0; | |
6aa8b732 AK |
2921 | break; |
2922 | case MSR_IA32_SYSENTER_ESP: | |
adc2a237 ML |
2923 | svm->vmcb01.ptr->save.sysenter_esp = (u32)data; |
2924 | svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0; | |
6aa8b732 | 2925 | break; |
46896c73 | 2926 | case MSR_TSC_AUX: |
46896c73 | 2927 | /* |
844d69c2 SC |
2928 | * TSC_AUX is usually changed only during boot and never read |
2929 | * directly. Intercept TSC_AUX instead of exposing it to the | |
2930 | * guest via direct_access_msrs, and switch it via user return. | |
46896c73 | 2931 | */ |
844d69c2 | 2932 | preempt_disable(); |
0caa0a77 | 2933 | r = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull); |
844d69c2 SC |
2934 | preempt_enable(); |
2935 | if (r) | |
2936 | return 1; | |
2937 | ||
46896c73 | 2938 | svm->tsc_aux = data; |
46896c73 | 2939 | break; |
a2938c80 | 2940 | case MSR_IA32_DEBUGCTLMSR: |
2a6b20b8 | 2941 | if (!boot_cpu_has(X86_FEATURE_LBRV)) { |
a737f256 CD |
2942 | vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", |
2943 | __func__, data); | |
24e09cbf JR |
2944 | break; |
2945 | } | |
2946 | if (data & DEBUGCTL_RESERVED_BITS) | |
2947 | return 1; | |
2948 | ||
2949 | svm->vmcb->save.dbgctl = data; | |
06e7852c | 2950 | vmcb_mark_dirty(svm->vmcb, VMCB_LBR); |
24e09cbf | 2951 | if (data & (1ULL<<0)) |
476c9bd8 | 2952 | svm_enable_lbrv(vcpu); |
24e09cbf | 2953 | else |
476c9bd8 | 2954 | svm_disable_lbrv(vcpu); |
a2938c80 | 2955 | break; |
b286d5d8 | 2956 | case MSR_VM_HSAVE_PA: |
e6aa9abd | 2957 | svm->nested.hsave_msr = data; |
62b9abaa | 2958 | break; |
3c5d0a44 | 2959 | case MSR_VM_CR: |
4a810181 | 2960 | return svm_set_vm_cr(vcpu, data); |
3c5d0a44 | 2961 | case MSR_VM_IGNNE: |
a737f256 | 2962 | vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); |
3c5d0a44 | 2963 | break; |
d1d93fa9 TL |
2964 | case MSR_F10H_DECFG: { |
2965 | struct kvm_msr_entry msr_entry; | |
2966 | ||
2967 | msr_entry.index = msr->index; | |
2968 | if (svm_get_msr_feature(&msr_entry)) | |
2969 | return 1; | |
2970 | ||
2971 | /* Check the supported bits */ | |
2972 | if (data & ~msr_entry.data) | |
2973 | return 1; | |
2974 | ||
2975 | /* Don't allow the guest to change a bit, #GP */ | |
2976 | if (!msr->host_initiated && (data ^ msr_entry.data)) | |
2977 | return 1; | |
2978 | ||
2979 | svm->msr_decfg = data; | |
2980 | break; | |
2981 | } | |
44a95dae SS |
2982 | case MSR_IA32_APICBASE: |
2983 | if (kvm_vcpu_apicv_active(vcpu)) | |
2984 | avic_update_vapic_bar(to_svm(vcpu), data); | |
df561f66 | 2985 | fallthrough; |
6aa8b732 | 2986 | default: |
8fe8ab46 | 2987 | return kvm_set_msr_common(vcpu, msr); |
6aa8b732 AK |
2988 | } |
2989 | return 0; | |
2990 | } | |
2991 | ||
63129754 | 2992 | static int msr_interception(struct kvm_vcpu *vcpu) |
6aa8b732 | 2993 | { |
63129754 | 2994 | if (to_svm(vcpu)->vmcb->control.exit_info_1) |
5ff3a351 | 2995 | return kvm_emulate_wrmsr(vcpu); |
6aa8b732 | 2996 | else |
5ff3a351 | 2997 | return kvm_emulate_rdmsr(vcpu); |
6aa8b732 AK |
2998 | } |
2999 | ||
63129754 | 3000 | static int interrupt_window_interception(struct kvm_vcpu *vcpu) |
c1150d8c | 3001 | { |
63129754 PB |
3002 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
3003 | svm_clear_vintr(to_svm(vcpu)); | |
f3515dc3 SS |
3004 | |
3005 | /* | |
3006 | * For AVIC, the only reason to end up here is ExtINTs. | |
3007 | * In this case AVIC was temporarily disabled for | |
3008 | * requesting the IRQ window and we have to re-enable it. | |
3009 | */ | |
63129754 | 3010 | svm_toggle_avic_for_irq_window(vcpu, true); |
f3515dc3 | 3011 | |
63129754 | 3012 | ++vcpu->stat.irq_window_exits; |
c1150d8c DL |
3013 | return 1; |
3014 | } | |
3015 | ||
63129754 | 3016 | static int pause_interception(struct kvm_vcpu *vcpu) |
565d0998 | 3017 | { |
f1c6366e TL |
3018 | bool in_kernel; |
3019 | ||
3020 | /* | |
3021 | * CPL is not made available for an SEV-ES guest, therefore | |
3022 | * vcpu->arch.preempted_in_kernel can never be true. Just | |
3023 | * set in_kernel to false as well. | |
3024 | */ | |
63129754 | 3025 | in_kernel = !sev_es_guest(vcpu->kvm) && svm_get_cpl(vcpu) == 0; |
de63ad4c | 3026 | |
830f01b0 | 3027 | if (!kvm_pause_in_guest(vcpu->kvm)) |
8566ac8b BM |
3028 | grow_ple_window(vcpu); |
3029 | ||
de63ad4c | 3030 | kvm_vcpu_on_spin(vcpu, in_kernel); |
c8781fea | 3031 | return kvm_skip_emulated_instruction(vcpu); |
87c00572 GS |
3032 | } |
3033 | ||
63129754 | 3034 | static int invpcid_interception(struct kvm_vcpu *vcpu) |
87c00572 | 3035 | { |
63129754 | 3036 | struct vcpu_svm *svm = to_svm(vcpu); |
4407a797 BM |
3037 | unsigned long type; |
3038 | gva_t gva; | |
3039 | ||
3040 | if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) { | |
3041 | kvm_queue_exception(vcpu, UD_VECTOR); | |
3042 | return 1; | |
3043 | } | |
3044 | ||
3045 | /* | |
3046 | * For an INVPCID intercept: | |
3047 | * EXITINFO1 provides the linear address of the memory operand. | |
3048 | * EXITINFO2 provides the contents of the register operand. | |
3049 | */ | |
3050 | type = svm->vmcb->control.exit_info_2; | |
3051 | gva = svm->vmcb->control.exit_info_1; | |
3052 | ||
3053 | if (type > 3) { | |
3054 | kvm_inject_gp(vcpu, 0); | |
3055 | return 1; | |
3056 | } | |
3057 | ||
3058 | return kvm_handle_invpcid(vcpu, type, gva); | |
3059 | } | |
3060 | ||
63129754 | 3061 | static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = { |
7ff76d58 AP |
3062 | [SVM_EXIT_READ_CR0] = cr_interception, |
3063 | [SVM_EXIT_READ_CR3] = cr_interception, | |
3064 | [SVM_EXIT_READ_CR4] = cr_interception, | |
3065 | [SVM_EXIT_READ_CR8] = cr_interception, | |
5e57518d | 3066 | [SVM_EXIT_CR0_SEL_WRITE] = cr_interception, |
628afd2a | 3067 | [SVM_EXIT_WRITE_CR0] = cr_interception, |
7ff76d58 AP |
3068 | [SVM_EXIT_WRITE_CR3] = cr_interception, |
3069 | [SVM_EXIT_WRITE_CR4] = cr_interception, | |
e0231715 | 3070 | [SVM_EXIT_WRITE_CR8] = cr8_write_interception, |
cae3797a AP |
3071 | [SVM_EXIT_READ_DR0] = dr_interception, |
3072 | [SVM_EXIT_READ_DR1] = dr_interception, | |
3073 | [SVM_EXIT_READ_DR2] = dr_interception, | |
3074 | [SVM_EXIT_READ_DR3] = dr_interception, | |
3075 | [SVM_EXIT_READ_DR4] = dr_interception, | |
3076 | [SVM_EXIT_READ_DR5] = dr_interception, | |
3077 | [SVM_EXIT_READ_DR6] = dr_interception, | |
3078 | [SVM_EXIT_READ_DR7] = dr_interception, | |
3079 | [SVM_EXIT_WRITE_DR0] = dr_interception, | |
3080 | [SVM_EXIT_WRITE_DR1] = dr_interception, | |
3081 | [SVM_EXIT_WRITE_DR2] = dr_interception, | |
3082 | [SVM_EXIT_WRITE_DR3] = dr_interception, | |
3083 | [SVM_EXIT_WRITE_DR4] = dr_interception, | |
3084 | [SVM_EXIT_WRITE_DR5] = dr_interception, | |
3085 | [SVM_EXIT_WRITE_DR6] = dr_interception, | |
3086 | [SVM_EXIT_WRITE_DR7] = dr_interception, | |
d0bfb940 JK |
3087 | [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception, |
3088 | [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception, | |
7aa81cc0 | 3089 | [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, |
e0231715 | 3090 | [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, |
e0231715 | 3091 | [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception, |
54a20552 | 3092 | [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception, |
9718420e | 3093 | [SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception, |
e0231715 | 3094 | [SVM_EXIT_INTR] = intr_interception, |
c47f098d | 3095 | [SVM_EXIT_NMI] = nmi_interception, |
991afbbe | 3096 | [SVM_EXIT_SMI] = smi_interception, |
c1150d8c | 3097 | [SVM_EXIT_VINTR] = interrupt_window_interception, |
32c23c7d | 3098 | [SVM_EXIT_RDPMC] = kvm_emulate_rdpmc, |
5ff3a351 | 3099 | [SVM_EXIT_CPUID] = kvm_emulate_cpuid, |
95ba8273 | 3100 | [SVM_EXIT_IRET] = iret_interception, |
5ff3a351 | 3101 | [SVM_EXIT_INVD] = kvm_emulate_invd, |
565d0998 | 3102 | [SVM_EXIT_PAUSE] = pause_interception, |
5ff3a351 | 3103 | [SVM_EXIT_HLT] = kvm_emulate_halt, |
a7052897 | 3104 | [SVM_EXIT_INVLPG] = invlpg_interception, |
ff092385 | 3105 | [SVM_EXIT_INVLPGA] = invlpga_interception, |
e0231715 | 3106 | [SVM_EXIT_IOIO] = io_interception, |
6aa8b732 AK |
3107 | [SVM_EXIT_MSR] = msr_interception, |
3108 | [SVM_EXIT_TASK_SWITCH] = task_switch_interception, | |
46fe4ddd | 3109 | [SVM_EXIT_SHUTDOWN] = shutdown_interception, |
3d6368ef | 3110 | [SVM_EXIT_VMRUN] = vmrun_interception, |
5ff3a351 | 3111 | [SVM_EXIT_VMMCALL] = kvm_emulate_hypercall, |
5542675b AG |
3112 | [SVM_EXIT_VMLOAD] = vmload_interception, |
3113 | [SVM_EXIT_VMSAVE] = vmsave_interception, | |
1371d904 AG |
3114 | [SVM_EXIT_STGI] = stgi_interception, |
3115 | [SVM_EXIT_CLGI] = clgi_interception, | |
532a46b9 | 3116 | [SVM_EXIT_SKINIT] = skinit_interception, |
3b195ac9 | 3117 | [SVM_EXIT_RDTSCP] = kvm_handle_invalid_op, |
5ff3a351 SC |
3118 | [SVM_EXIT_WBINVD] = kvm_emulate_wbinvd, |
3119 | [SVM_EXIT_MONITOR] = kvm_emulate_monitor, | |
3120 | [SVM_EXIT_MWAIT] = kvm_emulate_mwait, | |
92f9895c | 3121 | [SVM_EXIT_XSETBV] = kvm_emulate_xsetbv, |
5ff3a351 | 3122 | [SVM_EXIT_RDPRU] = kvm_handle_invalid_op, |
2985afbc | 3123 | [SVM_EXIT_EFER_WRITE_TRAP] = efer_trap, |
f27ad38a | 3124 | [SVM_EXIT_CR0_WRITE_TRAP] = cr_trap, |
5b51cb13 | 3125 | [SVM_EXIT_CR4_WRITE_TRAP] = cr_trap, |
d1949b93 | 3126 | [SVM_EXIT_CR8_WRITE_TRAP] = cr_trap, |
4407a797 | 3127 | [SVM_EXIT_INVPCID] = invpcid_interception, |
d0006530 | 3128 | [SVM_EXIT_NPF] = npf_interception, |
7607b717 | 3129 | [SVM_EXIT_RSM] = rsm_interception, |
18f40c53 SS |
3130 | [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception, |
3131 | [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception, | |
291bd20d | 3132 | [SVM_EXIT_VMGEXIT] = sev_handle_vmgexit, |
6aa8b732 AK |
3133 | }; |
3134 | ||
ae8cc059 | 3135 | static void dump_vmcb(struct kvm_vcpu *vcpu) |
3f10c846 JR |
3136 | { |
3137 | struct vcpu_svm *svm = to_svm(vcpu); | |
3138 | struct vmcb_control_area *control = &svm->vmcb->control; | |
3139 | struct vmcb_save_area *save = &svm->vmcb->save; | |
cc3ed80a | 3140 | struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save; |
3f10c846 | 3141 | |
6f2f8453 PB |
3142 | if (!dump_invalid_vmcb) { |
3143 | pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n"); | |
3144 | return; | |
3145 | } | |
3146 | ||
18f63b15 JM |
3147 | pr_err("VMCB %p, last attempted VMRUN on CPU %d\n", |
3148 | svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu); | |
3f10c846 | 3149 | pr_err("VMCB Control Area:\n"); |
03bfeeb9 BM |
3150 | pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff); |
3151 | pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16); | |
30abaa88 BM |
3152 | pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff); |
3153 | pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16); | |
9780d51d | 3154 | pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]); |
c62e2e94 BM |
3155 | pr_err("%-20s%08x %08x\n", "intercepts:", |
3156 | control->intercepts[INTERCEPT_WORD3], | |
3157 | control->intercepts[INTERCEPT_WORD4]); | |
ae8cc059 | 3158 | pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count); |
1d8fb44a BM |
3159 | pr_err("%-20s%d\n", "pause filter threshold:", |
3160 | control->pause_filter_thresh); | |
ae8cc059 JP |
3161 | pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa); |
3162 | pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa); | |
3163 | pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset); | |
3164 | pr_err("%-20s%d\n", "asid:", control->asid); | |
3165 | pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl); | |
3166 | pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl); | |
3167 | pr_err("%-20s%08x\n", "int_vector:", control->int_vector); | |
3168 | pr_err("%-20s%08x\n", "int_state:", control->int_state); | |
3169 | pr_err("%-20s%08x\n", "exit_code:", control->exit_code); | |
3170 | pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1); | |
3171 | pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2); | |
3172 | pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info); | |
3173 | pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err); | |
3174 | pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl); | |
3175 | pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3); | |
44a95dae | 3176 | pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar); |
291bd20d | 3177 | pr_err("%-20s%016llx\n", "ghcb:", control->ghcb_gpa); |
ae8cc059 JP |
3178 | pr_err("%-20s%08x\n", "event_inj:", control->event_inj); |
3179 | pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err); | |
0dc92119 | 3180 | pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext); |
ae8cc059 | 3181 | pr_err("%-20s%016llx\n", "next_rip:", control->next_rip); |
44a95dae SS |
3182 | pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page); |
3183 | pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id); | |
3184 | pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id); | |
376c6d28 | 3185 | pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa); |
3f10c846 | 3186 | pr_err("VMCB State Save Area:\n"); |
ae8cc059 JP |
3187 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
3188 | "es:", | |
3189 | save->es.selector, save->es.attrib, | |
3190 | save->es.limit, save->es.base); | |
3191 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", | |
3192 | "cs:", | |
3193 | save->cs.selector, save->cs.attrib, | |
3194 | save->cs.limit, save->cs.base); | |
3195 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", | |
3196 | "ss:", | |
3197 | save->ss.selector, save->ss.attrib, | |
3198 | save->ss.limit, save->ss.base); | |
3199 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", | |
3200 | "ds:", | |
3201 | save->ds.selector, save->ds.attrib, | |
3202 | save->ds.limit, save->ds.base); | |
3203 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", | |
3204 | "fs:", | |
cc3ed80a ML |
3205 | save01->fs.selector, save01->fs.attrib, |
3206 | save01->fs.limit, save01->fs.base); | |
ae8cc059 JP |
3207 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
3208 | "gs:", | |
cc3ed80a ML |
3209 | save01->gs.selector, save01->gs.attrib, |
3210 | save01->gs.limit, save01->gs.base); | |
ae8cc059 JP |
3211 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
3212 | "gdtr:", | |
3213 | save->gdtr.selector, save->gdtr.attrib, | |
3214 | save->gdtr.limit, save->gdtr.base); | |
3215 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", | |
3216 | "ldtr:", | |
cc3ed80a ML |
3217 | save01->ldtr.selector, save01->ldtr.attrib, |
3218 | save01->ldtr.limit, save01->ldtr.base); | |
ae8cc059 JP |
3219 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
3220 | "idtr:", | |
3221 | save->idtr.selector, save->idtr.attrib, | |
3222 | save->idtr.limit, save->idtr.base); | |
3223 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", | |
3224 | "tr:", | |
cc3ed80a ML |
3225 | save01->tr.selector, save01->tr.attrib, |
3226 | save01->tr.limit, save01->tr.base); | |
3f10c846 JR |
3227 | pr_err("cpl: %d efer: %016llx\n", |
3228 | save->cpl, save->efer); | |
ae8cc059 JP |
3229 | pr_err("%-15s %016llx %-13s %016llx\n", |
3230 | "cr0:", save->cr0, "cr2:", save->cr2); | |
3231 | pr_err("%-15s %016llx %-13s %016llx\n", | |
3232 | "cr3:", save->cr3, "cr4:", save->cr4); | |
3233 | pr_err("%-15s %016llx %-13s %016llx\n", | |
3234 | "dr6:", save->dr6, "dr7:", save->dr7); | |
3235 | pr_err("%-15s %016llx %-13s %016llx\n", | |
3236 | "rip:", save->rip, "rflags:", save->rflags); | |
3237 | pr_err("%-15s %016llx %-13s %016llx\n", | |
3238 | "rsp:", save->rsp, "rax:", save->rax); | |
3239 | pr_err("%-15s %016llx %-13s %016llx\n", | |
cc3ed80a | 3240 | "star:", save01->star, "lstar:", save01->lstar); |
ae8cc059 | 3241 | pr_err("%-15s %016llx %-13s %016llx\n", |
cc3ed80a | 3242 | "cstar:", save01->cstar, "sfmask:", save01->sfmask); |
ae8cc059 | 3243 | pr_err("%-15s %016llx %-13s %016llx\n", |
cc3ed80a ML |
3244 | "kernel_gs_base:", save01->kernel_gs_base, |
3245 | "sysenter_cs:", save01->sysenter_cs); | |
ae8cc059 | 3246 | pr_err("%-15s %016llx %-13s %016llx\n", |
cc3ed80a ML |
3247 | "sysenter_esp:", save01->sysenter_esp, |
3248 | "sysenter_eip:", save01->sysenter_eip); | |
ae8cc059 JP |
3249 | pr_err("%-15s %016llx %-13s %016llx\n", |
3250 | "gpat:", save->g_pat, "dbgctl:", save->dbgctl); | |
3251 | pr_err("%-15s %016llx %-13s %016llx\n", | |
3252 | "br_from:", save->br_from, "br_to:", save->br_to); | |
3253 | pr_err("%-15s %016llx %-13s %016llx\n", | |
3254 | "excp_from:", save->last_excp_from, | |
3255 | "excp_to:", save->last_excp_to); | |
3f10c846 JR |
3256 | } |
3257 | ||
e9093fd4 TL |
3258 | static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code) |
3259 | { | |
3260 | if (exit_code < ARRAY_SIZE(svm_exit_handlers) && | |
3261 | svm_exit_handlers[exit_code]) | |
3262 | return 0; | |
3263 | ||
3264 | vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%llx\n", exit_code); | |
3265 | dump_vmcb(vcpu); | |
3266 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
3267 | vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; | |
3268 | vcpu->run->internal.ndata = 2; | |
3269 | vcpu->run->internal.data[0] = exit_code; | |
3270 | vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; | |
3271 | ||
3272 | return -EINVAL; | |
3273 | } | |
3274 | ||
63129754 | 3275 | int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code) |
e9093fd4 | 3276 | { |
63129754 | 3277 | if (svm_handle_invalid_exit(vcpu, exit_code)) |
e9093fd4 TL |
3278 | return 0; |
3279 | ||
3280 | #ifdef CONFIG_RETPOLINE | |
3281 | if (exit_code == SVM_EXIT_MSR) | |
63129754 | 3282 | return msr_interception(vcpu); |
e9093fd4 | 3283 | else if (exit_code == SVM_EXIT_VINTR) |
63129754 | 3284 | return interrupt_window_interception(vcpu); |
e9093fd4 | 3285 | else if (exit_code == SVM_EXIT_INTR) |
63129754 | 3286 | return intr_interception(vcpu); |
e9093fd4 | 3287 | else if (exit_code == SVM_EXIT_HLT) |
5ff3a351 | 3288 | return kvm_emulate_halt(vcpu); |
e9093fd4 | 3289 | else if (exit_code == SVM_EXIT_NPF) |
63129754 | 3290 | return npf_interception(vcpu); |
e9093fd4 | 3291 | #endif |
63129754 | 3292 | return svm_exit_handlers[exit_code](vcpu); |
e9093fd4 TL |
3293 | } |
3294 | ||
235ba74f SC |
3295 | static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2, |
3296 | u32 *intr_info, u32 *error_code) | |
586f9607 AK |
3297 | { |
3298 | struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; | |
3299 | ||
3300 | *info1 = control->exit_info_1; | |
3301 | *info2 = control->exit_info_2; | |
235ba74f SC |
3302 | *intr_info = control->exit_int_info; |
3303 | if ((*intr_info & SVM_EXITINTINFO_VALID) && | |
3304 | (*intr_info & SVM_EXITINTINFO_VALID_ERR)) | |
3305 | *error_code = control->exit_int_info_err; | |
3306 | else | |
3307 | *error_code = 0; | |
586f9607 AK |
3308 | } |
3309 | ||
404d5d7b | 3310 | static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) |
6aa8b732 | 3311 | { |
04d2cc77 | 3312 | struct vcpu_svm *svm = to_svm(vcpu); |
851ba692 | 3313 | struct kvm_run *kvm_run = vcpu->run; |
a2fa3e9f | 3314 | u32 exit_code = svm->vmcb->control.exit_code; |
6aa8b732 | 3315 | |
8b89fe1f PB |
3316 | trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM); |
3317 | ||
f1c6366e TL |
3318 | /* SEV-ES guests must use the CR write traps to track CR registers. */ |
3319 | if (!sev_es_guest(vcpu->kvm)) { | |
3320 | if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE)) | |
3321 | vcpu->arch.cr0 = svm->vmcb->save.cr0; | |
3322 | if (npt_enabled) | |
3323 | vcpu->arch.cr3 = svm->vmcb->save.cr3; | |
3324 | } | |
af9ca2d7 | 3325 | |
2030753d | 3326 | if (is_guest_mode(vcpu)) { |
410e4d57 JR |
3327 | int vmexit; |
3328 | ||
cc167bd7 | 3329 | trace_kvm_nested_vmexit(exit_code, vcpu, KVM_ISA_SVM); |
d8cabddf | 3330 | |
410e4d57 JR |
3331 | vmexit = nested_svm_exit_special(svm); |
3332 | ||
3333 | if (vmexit == NESTED_EXIT_CONTINUE) | |
3334 | vmexit = nested_svm_exit_handled(svm); | |
3335 | ||
3336 | if (vmexit == NESTED_EXIT_DONE) | |
cf74a78b | 3337 | return 1; |
cf74a78b AG |
3338 | } |
3339 | ||
04d2cc77 AK |
3340 | if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { |
3341 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | |
3342 | kvm_run->fail_entry.hardware_entry_failure_reason | |
3343 | = svm->vmcb->control.exit_code; | |
8a14fe4f | 3344 | kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; |
3f10c846 | 3345 | dump_vmcb(vcpu); |
04d2cc77 AK |
3346 | return 0; |
3347 | } | |
3348 | ||
a2fa3e9f | 3349 | if (is_external_interrupt(svm->vmcb->control.exit_int_info) && |
709ddebf | 3350 | exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR && |
55c5e464 JR |
3351 | exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH && |
3352 | exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI) | |
6614c7d0 | 3353 | printk(KERN_ERR "%s: unexpected exit_int_info 0x%x " |
6aa8b732 | 3354 | "exit_code 0x%x\n", |
b8688d51 | 3355 | __func__, svm->vmcb->control.exit_int_info, |
6aa8b732 AK |
3356 | exit_code); |
3357 | ||
404d5d7b | 3358 | if (exit_fastpath != EXIT_FASTPATH_NONE) |
1e9e2622 | 3359 | return 1; |
404d5d7b | 3360 | |
63129754 | 3361 | return svm_invoke_exit_handler(vcpu, exit_code); |
6aa8b732 AK |
3362 | } |
3363 | ||
3364 | static void reload_tss(struct kvm_vcpu *vcpu) | |
3365 | { | |
73cd6e5f | 3366 | struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); |
6aa8b732 | 3367 | |
0fe1e009 | 3368 | sd->tss_desc->type = 9; /* available 32/64-bit TSS */ |
6aa8b732 AK |
3369 | load_TR_desc(); |
3370 | } | |
3371 | ||
63129754 | 3372 | static void pre_svm_run(struct kvm_vcpu *vcpu) |
6aa8b732 | 3373 | { |
63129754 PB |
3374 | struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); |
3375 | struct vcpu_svm *svm = to_svm(vcpu); | |
6aa8b732 | 3376 | |
af18fa77 | 3377 | /* |
44f1b558 SC |
3378 | * If the previous vmrun of the vmcb occurred on a different physical |
3379 | * cpu, then mark the vmcb dirty and assign a new asid. Hardware's | |
3380 | * vmcb clean bits are per logical CPU, as are KVM's asid assignments. | |
3381 | */ | |
63129754 | 3382 | if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) { |
193015ad | 3383 | svm->current_vmcb->asid_generation = 0; |
af18fa77 | 3384 | vmcb_mark_all_dirty(svm->vmcb); |
63129754 | 3385 | svm->current_vmcb->cpu = vcpu->cpu; |
af18fa77 CA |
3386 | } |
3387 | ||
63129754 PB |
3388 | if (sev_guest(vcpu->kvm)) |
3389 | return pre_sev_run(svm, vcpu->cpu); | |
70cd94e6 | 3390 | |
4b656b12 | 3391 | /* FIXME: handle wraparound of asid_generation */ |
193015ad | 3392 | if (svm->current_vmcb->asid_generation != sd->asid_generation) |
0fe1e009 | 3393 | new_asid(svm, sd); |
6aa8b732 AK |
3394 | } |
3395 | ||
95ba8273 GN |
3396 | static void svm_inject_nmi(struct kvm_vcpu *vcpu) |
3397 | { | |
3398 | struct vcpu_svm *svm = to_svm(vcpu); | |
3399 | ||
3400 | svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; | |
3401 | vcpu->arch.hflags |= HF_NMI_MASK; | |
63129754 | 3402 | if (!sev_es_guest(vcpu->kvm)) |
4444dfe4 | 3403 | svm_set_intercept(svm, INTERCEPT_IRET); |
95ba8273 GN |
3404 | ++vcpu->stat.nmi_injections; |
3405 | } | |
6aa8b732 | 3406 | |
66fd3f7f | 3407 | static void svm_set_irq(struct kvm_vcpu *vcpu) |
2a8067f1 ED |
3408 | { |
3409 | struct vcpu_svm *svm = to_svm(vcpu); | |
3410 | ||
2af9194d | 3411 | BUG_ON(!(gif_set(svm))); |
cf74a78b | 3412 | |
9fb2d2b4 GN |
3413 | trace_kvm_inj_virq(vcpu->arch.interrupt.nr); |
3414 | ++vcpu->stat.irq_injections; | |
3415 | ||
219b65dc AG |
3416 | svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | |
3417 | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; | |
2a8067f1 ED |
3418 | } |
3419 | ||
b6a7cc35 | 3420 | static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) |
aaacfc9a JR |
3421 | { |
3422 | struct vcpu_svm *svm = to_svm(vcpu); | |
aaacfc9a | 3423 | |
f1c6366e TL |
3424 | /* |
3425 | * SEV-ES guests must always keep the CR intercepts cleared. CR | |
3426 | * tracking is done using the CR write traps. | |
3427 | */ | |
3428 | if (sev_es_guest(vcpu->kvm)) | |
3429 | return; | |
3430 | ||
01c3b2b5 | 3431 | if (nested_svm_virtualize_tpr(vcpu)) |
88ab24ad JR |
3432 | return; |
3433 | ||
830bd71f | 3434 | svm_clr_intercept(svm, INTERCEPT_CR8_WRITE); |
596f3142 | 3435 | |
95ba8273 | 3436 | if (irr == -1) |
aaacfc9a JR |
3437 | return; |
3438 | ||
95ba8273 | 3439 | if (tpr >= irr) |
830bd71f | 3440 | svm_set_intercept(svm, INTERCEPT_CR8_WRITE); |
95ba8273 | 3441 | } |
aaacfc9a | 3442 | |
cae96af1 | 3443 | bool svm_nmi_blocked(struct kvm_vcpu *vcpu) |
95ba8273 GN |
3444 | { |
3445 | struct vcpu_svm *svm = to_svm(vcpu); | |
3446 | struct vmcb *vmcb = svm->vmcb; | |
88c604b6 | 3447 | bool ret; |
9c3d370a | 3448 | |
cae96af1 | 3449 | if (!gif_set(svm)) |
bbdad0b5 PB |
3450 | return true; |
3451 | ||
cae96af1 PB |
3452 | if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) |
3453 | return false; | |
3454 | ||
3455 | ret = (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) || | |
63129754 | 3456 | (vcpu->arch.hflags & HF_NMI_MASK); |
924584cc JR |
3457 | |
3458 | return ret; | |
aaacfc9a JR |
3459 | } |
3460 | ||
c9d40913 | 3461 | static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection) |
cae96af1 PB |
3462 | { |
3463 | struct vcpu_svm *svm = to_svm(vcpu); | |
3464 | if (svm->nested.nested_run_pending) | |
c9d40913 | 3465 | return -EBUSY; |
cae96af1 | 3466 | |
c300ab9f PB |
3467 | /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */ |
3468 | if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) | |
c9d40913 | 3469 | return -EBUSY; |
c300ab9f PB |
3470 | |
3471 | return !svm_nmi_blocked(vcpu); | |
cae96af1 PB |
3472 | } |
3473 | ||
3cfc3092 JK |
3474 | static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu) |
3475 | { | |
63129754 | 3476 | return !!(vcpu->arch.hflags & HF_NMI_MASK); |
3cfc3092 JK |
3477 | } |
3478 | ||
3479 | static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) | |
3480 | { | |
3481 | struct vcpu_svm *svm = to_svm(vcpu); | |
3482 | ||
3483 | if (masked) { | |
63129754 PB |
3484 | vcpu->arch.hflags |= HF_NMI_MASK; |
3485 | if (!sev_es_guest(vcpu->kvm)) | |
4444dfe4 | 3486 | svm_set_intercept(svm, INTERCEPT_IRET); |
3cfc3092 | 3487 | } else { |
63129754 PB |
3488 | vcpu->arch.hflags &= ~HF_NMI_MASK; |
3489 | if (!sev_es_guest(vcpu->kvm)) | |
4444dfe4 | 3490 | svm_clr_intercept(svm, INTERCEPT_IRET); |
3cfc3092 JK |
3491 | } |
3492 | } | |
3493 | ||
cae96af1 | 3494 | bool svm_interrupt_blocked(struct kvm_vcpu *vcpu) |
78646121 GN |
3495 | { |
3496 | struct vcpu_svm *svm = to_svm(vcpu); | |
3497 | struct vmcb *vmcb = svm->vmcb; | |
7fcdb510 | 3498 | |
fc6f7c03 | 3499 | if (!gif_set(svm)) |
cae96af1 | 3500 | return true; |
7fcdb510 | 3501 | |
63129754 | 3502 | if (sev_es_guest(vcpu->kvm)) { |
f1c6366e TL |
3503 | /* |
3504 | * SEV-ES guests to not expose RFLAGS. Use the VMCB interrupt mask | |
3505 | * bit to determine the state of the IF flag. | |
3506 | */ | |
3507 | if (!(vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK)) | |
3508 | return true; | |
3509 | } else if (is_guest_mode(vcpu)) { | |
fc6f7c03 | 3510 | /* As long as interrupts are being delivered... */ |
e9fd761a | 3511 | if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) |
4995a368 | 3512 | ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF) |
fc6f7c03 PB |
3513 | : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF)) |
3514 | return true; | |
3515 | ||
3516 | /* ... vmexits aren't blocked by the interrupt shadow */ | |
3517 | if (nested_exit_on_intr(svm)) | |
3518 | return false; | |
3519 | } else { | |
3520 | if (!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF)) | |
3521 | return true; | |
3522 | } | |
3523 | ||
3524 | return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK); | |
cae96af1 PB |
3525 | } |
3526 | ||
c9d40913 | 3527 | static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) |
cae96af1 PB |
3528 | { |
3529 | struct vcpu_svm *svm = to_svm(vcpu); | |
3530 | if (svm->nested.nested_run_pending) | |
c9d40913 | 3531 | return -EBUSY; |
cae96af1 | 3532 | |
c300ab9f PB |
3533 | /* |
3534 | * An IRQ must not be injected into L2 if it's supposed to VM-Exit, | |
3535 | * e.g. if the IRQ arrived asynchronously after checking nested events. | |
3536 | */ | |
3537 | if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm)) | |
c9d40913 | 3538 | return -EBUSY; |
c300ab9f PB |
3539 | |
3540 | return !svm_interrupt_blocked(vcpu); | |
78646121 GN |
3541 | } |
3542 | ||
b6a7cc35 | 3543 | static void svm_enable_irq_window(struct kvm_vcpu *vcpu) |
6aa8b732 | 3544 | { |
219b65dc | 3545 | struct vcpu_svm *svm = to_svm(vcpu); |
219b65dc | 3546 | |
e0231715 JR |
3547 | /* |
3548 | * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes | |
3549 | * 1, because that's a separate STGI/VMRUN intercept. The next time we | |
3550 | * get that intercept, this function will be called again though and | |
640bd6e5 JN |
3551 | * we'll get the vintr intercept. However, if the vGIF feature is |
3552 | * enabled, the STGI interception will not occur. Enable the irq | |
3553 | * window under the assumption that the hardware will set the GIF. | |
e0231715 | 3554 | */ |
b518ba9f | 3555 | if (vgif_enabled(svm) || gif_set(svm)) { |
f3515dc3 SS |
3556 | /* |
3557 | * IRQ window is not needed when AVIC is enabled, | |
3558 | * unless we have pending ExtINT since it cannot be injected | |
3559 | * via AVIC. In such case, we need to temporarily disable AVIC, | |
3560 | * and fallback to injecting IRQ via V_IRQ. | |
3561 | */ | |
3562 | svm_toggle_avic_for_irq_window(vcpu, false); | |
219b65dc | 3563 | svm_set_vintr(svm); |
219b65dc | 3564 | } |
85f455f7 ED |
3565 | } |
3566 | ||
b6a7cc35 | 3567 | static void svm_enable_nmi_window(struct kvm_vcpu *vcpu) |
c1150d8c | 3568 | { |
04d2cc77 | 3569 | struct vcpu_svm *svm = to_svm(vcpu); |
c1150d8c | 3570 | |
63129754 | 3571 | if ((vcpu->arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) == HF_NMI_MASK) |
c9a7953f | 3572 | return; /* IRET will cause a vm exit */ |
44c11430 | 3573 | |
640bd6e5 JN |
3574 | if (!gif_set(svm)) { |
3575 | if (vgif_enabled(svm)) | |
a284ba56 | 3576 | svm_set_intercept(svm, INTERCEPT_STGI); |
1a5e1852 | 3577 | return; /* STGI will cause a vm exit */ |
640bd6e5 | 3578 | } |
1a5e1852 | 3579 | |
e0231715 JR |
3580 | /* |
3581 | * Something prevents NMI from been injected. Single step over possible | |
3582 | * problem (IRET or exception injection or interrupt shadow) | |
3583 | */ | |
ab2f4d73 | 3584 | svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu); |
6be7d306 | 3585 | svm->nmi_singlestep = true; |
44c11430 | 3586 | svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); |
c1150d8c DL |
3587 | } |
3588 | ||
cbc94022 IE |
3589 | static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) |
3590 | { | |
3591 | return 0; | |
3592 | } | |
3593 | ||
2ac52ab8 SC |
3594 | static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) |
3595 | { | |
3596 | return 0; | |
3597 | } | |
3598 | ||
f55ac304 | 3599 | void svm_flush_tlb(struct kvm_vcpu *vcpu) |
d9e368d6 | 3600 | { |
38e5e92f JR |
3601 | struct vcpu_svm *svm = to_svm(vcpu); |
3602 | ||
4a41e43c SC |
3603 | /* |
3604 | * Flush only the current ASID even if the TLB flush was invoked via | |
3605 | * kvm_flush_remote_tlbs(). Although flushing remote TLBs requires all | |
3606 | * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and | |
3607 | * unconditionally does a TLB flush on both nested VM-Enter and nested | |
3608 | * VM-Exit (via kvm_mmu_reset_context()). | |
3609 | */ | |
38e5e92f JR |
3610 | if (static_cpu_has(X86_FEATURE_FLUSHBYASID)) |
3611 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; | |
3612 | else | |
193015ad | 3613 | svm->current_vmcb->asid_generation--; |
d9e368d6 AK |
3614 | } |
3615 | ||
faff8758 JS |
3616 | static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) |
3617 | { | |
3618 | struct vcpu_svm *svm = to_svm(vcpu); | |
3619 | ||
3620 | invlpga(gva, svm->vmcb->control.asid); | |
3621 | } | |
3622 | ||
d7bf8221 JR |
3623 | static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) |
3624 | { | |
3625 | struct vcpu_svm *svm = to_svm(vcpu); | |
3626 | ||
01c3b2b5 | 3627 | if (nested_svm_virtualize_tpr(vcpu)) |
88ab24ad JR |
3628 | return; |
3629 | ||
830bd71f | 3630 | if (!svm_is_intercept(svm, INTERCEPT_CR8_WRITE)) { |
d7bf8221 | 3631 | int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; |
615d5193 | 3632 | kvm_set_cr8(vcpu, cr8); |
d7bf8221 JR |
3633 | } |
3634 | } | |
3635 | ||
649d6864 JR |
3636 | static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) |
3637 | { | |
3638 | struct vcpu_svm *svm = to_svm(vcpu); | |
3639 | u64 cr8; | |
3640 | ||
01c3b2b5 | 3641 | if (nested_svm_virtualize_tpr(vcpu) || |
3bbf3565 | 3642 | kvm_vcpu_apicv_active(vcpu)) |
88ab24ad JR |
3643 | return; |
3644 | ||
649d6864 JR |
3645 | cr8 = kvm_get_cr8(vcpu); |
3646 | svm->vmcb->control.int_ctl &= ~V_TPR_MASK; | |
3647 | svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; | |
3648 | } | |
3649 | ||
63129754 | 3650 | static void svm_complete_interrupts(struct kvm_vcpu *vcpu) |
9222be18 | 3651 | { |
63129754 | 3652 | struct vcpu_svm *svm = to_svm(vcpu); |
9222be18 GN |
3653 | u8 vector; |
3654 | int type; | |
3655 | u32 exitintinfo = svm->vmcb->control.exit_int_info; | |
66b7138f JK |
3656 | unsigned int3_injected = svm->int3_injected; |
3657 | ||
3658 | svm->int3_injected = 0; | |
9222be18 | 3659 | |
bd3d1ec3 AK |
3660 | /* |
3661 | * If we've made progress since setting HF_IRET_MASK, we've | |
3662 | * executed an IRET and can allow NMI injection. | |
3663 | */ | |
63129754 PB |
3664 | if ((vcpu->arch.hflags & HF_IRET_MASK) && |
3665 | (sev_es_guest(vcpu->kvm) || | |
3666 | kvm_rip_read(vcpu) != svm->nmi_iret_rip)) { | |
3667 | vcpu->arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); | |
3668 | kvm_make_request(KVM_REQ_EVENT, vcpu); | |
3842d135 | 3669 | } |
44c11430 | 3670 | |
63129754 PB |
3671 | vcpu->arch.nmi_injected = false; |
3672 | kvm_clear_exception_queue(vcpu); | |
3673 | kvm_clear_interrupt_queue(vcpu); | |
9222be18 GN |
3674 | |
3675 | if (!(exitintinfo & SVM_EXITINTINFO_VALID)) | |
3676 | return; | |
3677 | ||
63129754 | 3678 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
3842d135 | 3679 | |
9222be18 GN |
3680 | vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK; |
3681 | type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK; | |
3682 | ||
3683 | switch (type) { | |
3684 | case SVM_EXITINTINFO_TYPE_NMI: | |
63129754 | 3685 | vcpu->arch.nmi_injected = true; |
9222be18 GN |
3686 | break; |
3687 | case SVM_EXITINTINFO_TYPE_EXEPT: | |
f1c6366e TL |
3688 | /* |
3689 | * Never re-inject a #VC exception. | |
3690 | */ | |
3691 | if (vector == X86_TRAP_VC) | |
3692 | break; | |
3693 | ||
66b7138f JK |
3694 | /* |
3695 | * In case of software exceptions, do not reinject the vector, | |
3696 | * but re-execute the instruction instead. Rewind RIP first | |
3697 | * if we emulated INT3 before. | |
3698 | */ | |
3699 | if (kvm_exception_is_soft(vector)) { | |
3700 | if (vector == BP_VECTOR && int3_injected && | |
63129754 PB |
3701 | kvm_is_linear_rip(vcpu, svm->int3_rip)) |
3702 | kvm_rip_write(vcpu, | |
3703 | kvm_rip_read(vcpu) - int3_injected); | |
9222be18 | 3704 | break; |
66b7138f | 3705 | } |
9222be18 GN |
3706 | if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) { |
3707 | u32 err = svm->vmcb->control.exit_int_info_err; | |
63129754 | 3708 | kvm_requeue_exception_e(vcpu, vector, err); |
9222be18 GN |
3709 | |
3710 | } else | |
63129754 | 3711 | kvm_requeue_exception(vcpu, vector); |
9222be18 GN |
3712 | break; |
3713 | case SVM_EXITINTINFO_TYPE_INTR: | |
63129754 | 3714 | kvm_queue_interrupt(vcpu, vector, false); |
9222be18 GN |
3715 | break; |
3716 | default: | |
3717 | break; | |
3718 | } | |
3719 | } | |
3720 | ||
b463a6f7 AK |
3721 | static void svm_cancel_injection(struct kvm_vcpu *vcpu) |
3722 | { | |
3723 | struct vcpu_svm *svm = to_svm(vcpu); | |
3724 | struct vmcb_control_area *control = &svm->vmcb->control; | |
3725 | ||
3726 | control->exit_int_info = control->event_inj; | |
3727 | control->exit_int_info_err = control->event_inj_err; | |
3728 | control->event_inj = 0; | |
63129754 | 3729 | svm_complete_interrupts(vcpu); |
b463a6f7 AK |
3730 | } |
3731 | ||
404d5d7b | 3732 | static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) |
a9ab13ff | 3733 | { |
4e810adb | 3734 | if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && |
a9ab13ff WL |
3735 | to_svm(vcpu)->vmcb->control.exit_info_1) |
3736 | return handle_fastpath_set_msr_irqoff(vcpu); | |
3737 | ||
3738 | return EXIT_FASTPATH_NONE; | |
3739 | } | |
3740 | ||
63129754 | 3741 | static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) |
135961e0 | 3742 | { |
63129754 | 3743 | struct vcpu_svm *svm = to_svm(vcpu); |
d1788191 | 3744 | unsigned long vmcb_pa = svm->current_vmcb->pa; |
63129754 | 3745 | |
bc908e09 | 3746 | kvm_guest_enter_irqoff(); |
135961e0 | 3747 | |
63129754 | 3748 | if (sev_es_guest(vcpu->kvm)) { |
d1788191 | 3749 | __svm_sev_es_vcpu_run(vmcb_pa); |
16809ecd | 3750 | } else { |
e79b91bb MR |
3751 | struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); |
3752 | ||
d1788191 SC |
3753 | /* |
3754 | * Use a single vmcb (vmcb01 because it's always valid) for | |
3755 | * context switching guest state via VMLOAD/VMSAVE, that way | |
3756 | * the state doesn't need to be copied between vmcb01 and | |
3757 | * vmcb02 when switching vmcbs for nested virtualization. | |
3758 | */ | |
cc3ed80a | 3759 | vmload(svm->vmcb01.pa); |
d1788191 | 3760 | __svm_vcpu_run(vmcb_pa, (unsigned long *)&vcpu->arch.regs); |
cc3ed80a | 3761 | vmsave(svm->vmcb01.pa); |
135961e0 | 3762 | |
e79b91bb | 3763 | vmload(__sme_page_pa(sd->save_area)); |
16809ecd | 3764 | } |
135961e0 | 3765 | |
bc908e09 | 3766 | kvm_guest_exit_irqoff(); |
135961e0 TG |
3767 | } |
3768 | ||
b95273f1 | 3769 | static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) |
6aa8b732 | 3770 | { |
a2fa3e9f | 3771 | struct vcpu_svm *svm = to_svm(vcpu); |
d9e368d6 | 3772 | |
d95df951 LB |
3773 | trace_kvm_entry(vcpu); |
3774 | ||
2041a06a JR |
3775 | svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; |
3776 | svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; | |
3777 | svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; | |
3778 | ||
a12713c2 LP |
3779 | /* |
3780 | * Disable singlestep if we're injecting an interrupt/exception. | |
3781 | * We don't want our modified rflags to be pushed on the stack where | |
3782 | * we might not be able to easily reset them if we disabled NMI | |
3783 | * singlestep later. | |
3784 | */ | |
3785 | if (svm->nmi_singlestep && svm->vmcb->control.event_inj) { | |
3786 | /* | |
3787 | * Event injection happens before external interrupts cause a | |
3788 | * vmexit and interrupts are disabled here, so smp_send_reschedule | |
3789 | * is enough to force an immediate vmexit. | |
3790 | */ | |
3791 | disable_nmi_singlestep(svm); | |
3792 | smp_send_reschedule(vcpu->cpu); | |
3793 | } | |
3794 | ||
63129754 | 3795 | pre_svm_run(vcpu); |
6aa8b732 | 3796 | |
649d6864 JR |
3797 | sync_lapic_to_cr8(vcpu); |
3798 | ||
7e8e6eed CA |
3799 | if (unlikely(svm->asid != svm->vmcb->control.asid)) { |
3800 | svm->vmcb->control.asid = svm->asid; | |
3801 | vmcb_mark_dirty(svm->vmcb, VMCB_ASID); | |
3802 | } | |
cda0ffdd | 3803 | svm->vmcb->save.cr2 = vcpu->arch.cr2; |
6aa8b732 | 3804 | |
1183646a VP |
3805 | svm_hv_update_vp_id(svm->vmcb, vcpu); |
3806 | ||
d67668e9 PB |
3807 | /* |
3808 | * Run with all-zero DR6 unless needed, so that we can get the exact cause | |
3809 | * of a #DB. | |
3810 | */ | |
63129754 | 3811 | if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) |
d67668e9 PB |
3812 | svm_set_dr6(svm, vcpu->arch.dr6); |
3813 | else | |
9a3ecd5e | 3814 | svm_set_dr6(svm, DR6_ACTIVE_LOW); |
d67668e9 | 3815 | |
04d2cc77 | 3816 | clgi(); |
139a12cf | 3817 | kvm_load_guest_xsave_state(vcpu); |
04d2cc77 | 3818 | |
010fd37f | 3819 | kvm_wait_lapic_expire(vcpu); |
b6c4bc65 | 3820 | |
b2ac58f9 KA |
3821 | /* |
3822 | * If this vCPU has touched SPEC_CTRL, restore the guest's value if | |
3823 | * it's non-zero. Since vmentry is serialising on affected CPUs, there | |
3824 | * is no need to worry about the conditional branch over the wrmsr | |
3825 | * being speculatively taken. | |
3826 | */ | |
d00b99c5 BM |
3827 | if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL)) |
3828 | x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); | |
b2ac58f9 | 3829 | |
63129754 | 3830 | svm_vcpu_enter_exit(vcpu); |
15e6c22f | 3831 | |
b2ac58f9 KA |
3832 | /* |
3833 | * We do not use IBRS in the kernel. If this vCPU has used the | |
3834 | * SPEC_CTRL MSR it may have left it on; save the value and | |
3835 | * turn it off. This is much more efficient than blindly adding | |
3836 | * it to the atomic save/restore list. Especially as the former | |
3837 | * (Saving guest MSRs on vmexit) doesn't even exist in KVM. | |
3838 | * | |
3839 | * For non-nested case: | |
3840 | * If the L01 MSR bitmap does not intercept the MSR, then we need to | |
3841 | * save it. | |
3842 | * | |
3843 | * For nested case: | |
3844 | * If the L02 MSR bitmap does not intercept the MSR, then we need to | |
3845 | * save it. | |
3846 | */ | |
d00b99c5 BM |
3847 | if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL) && |
3848 | unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) | |
ecb586bd | 3849 | svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); |
b2ac58f9 | 3850 | |
63129754 | 3851 | if (!sev_es_guest(vcpu->kvm)) |
16809ecd | 3852 | reload_tss(vcpu); |
6aa8b732 | 3853 | |
d00b99c5 BM |
3854 | if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL)) |
3855 | x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); | |
024d83ca | 3856 | |
63129754 | 3857 | if (!sev_es_guest(vcpu->kvm)) { |
16809ecd TL |
3858 | vcpu->arch.cr2 = svm->vmcb->save.cr2; |
3859 | vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; | |
3860 | vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; | |
3861 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; | |
3862 | } | |
13c34e07 | 3863 | |
3781c01c | 3864 | if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) |
63129754 | 3865 | kvm_before_interrupt(vcpu); |
3781c01c | 3866 | |
139a12cf | 3867 | kvm_load_host_xsave_state(vcpu); |
3781c01c JR |
3868 | stgi(); |
3869 | ||
3870 | /* Any pending NMI will happen here */ | |
3871 | ||
3872 | if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) | |
63129754 | 3873 | kvm_after_interrupt(vcpu); |
3781c01c | 3874 | |
d7bf8221 JR |
3875 | sync_cr8_to_lapic(vcpu); |
3876 | ||
a2fa3e9f | 3877 | svm->next_rip = 0; |
63129754 | 3878 | if (is_guest_mode(vcpu)) { |
9e8f0fbf | 3879 | nested_sync_control_from_vmcb02(svm); |
b93af02c KS |
3880 | |
3881 | /* Track VMRUNs that have made past consistency checking */ | |
3882 | if (svm->nested.nested_run_pending && | |
3883 | svm->vmcb->control.exit_code != SVM_EXIT_ERR) | |
3884 | ++vcpu->stat.nested_run; | |
3885 | ||
2d8a42be PB |
3886 | svm->nested.nested_run_pending = 0; |
3887 | } | |
9222be18 | 3888 | |
38e5e92f | 3889 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; |
e42c6828 | 3890 | vmcb_mark_all_clean(svm->vmcb); |
38e5e92f | 3891 | |
631bc487 GN |
3892 | /* if exit due to PF check for async PF */ |
3893 | if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) | |
63129754 | 3894 | vcpu->arch.apf.host_apf_flags = |
68fd66f1 | 3895 | kvm_read_and_reset_apf_flags(); |
631bc487 | 3896 | |
329675dd ML |
3897 | if (npt_enabled) |
3898 | kvm_register_clear_available(vcpu, VCPU_EXREG_PDPTR); | |
fe5913e4 JR |
3899 | |
3900 | /* | |
3901 | * We need to handle MC intercepts here before the vcpu has a chance to | |
3902 | * change the physical cpu | |
3903 | */ | |
3904 | if (unlikely(svm->vmcb->control.exit_code == | |
3905 | SVM_EXIT_EXCP_BASE + MC_VECTOR)) | |
63129754 | 3906 | svm_handle_mce(vcpu); |
8d28fec4 | 3907 | |
63129754 | 3908 | svm_complete_interrupts(vcpu); |
4e810adb WL |
3909 | |
3910 | if (is_guest_mode(vcpu)) | |
3911 | return EXIT_FASTPATH_NONE; | |
3912 | ||
3913 | return svm_exit_handlers_fastpath(vcpu); | |
6aa8b732 AK |
3914 | } |
3915 | ||
e83bc09c | 3916 | static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, |
2a40b900 | 3917 | int root_level) |
6aa8b732 | 3918 | { |
a2fa3e9f | 3919 | struct vcpu_svm *svm = to_svm(vcpu); |
689f3bf2 | 3920 | unsigned long cr3; |
a2fa3e9f | 3921 | |
689f3bf2 | 3922 | if (npt_enabled) { |
4a98623d | 3923 | svm->vmcb->control.nested_cr3 = __sme_set(root_hpa); |
06e7852c | 3924 | vmcb_mark_dirty(svm->vmcb, VMCB_NPT); |
1c97f0a0 | 3925 | |
1e0c7d40 VP |
3926 | hv_track_root_tdp(vcpu, root_hpa); |
3927 | ||
689f3bf2 | 3928 | /* Loading L2's CR3 is handled by enter_svm_guest_mode. */ |
978ce583 PB |
3929 | if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) |
3930 | return; | |
3931 | cr3 = vcpu->arch.cr3; | |
e83bc09c | 3932 | } else if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) { |
4a98623d | 3933 | cr3 = __sme_set(root_hpa) | kvm_get_active_pcid(vcpu); |
e83bc09c SC |
3934 | } else { |
3935 | /* PCID in the guest should be impossible with a 32-bit MMU. */ | |
3936 | WARN_ON_ONCE(kvm_get_active_pcid(vcpu)); | |
3937 | cr3 = root_hpa; | |
689f3bf2 | 3938 | } |
1c97f0a0 | 3939 | |
978ce583 | 3940 | svm->vmcb->save.cr3 = cr3; |
06e7852c | 3941 | vmcb_mark_dirty(svm->vmcb, VMCB_CR); |
1c97f0a0 JR |
3942 | } |
3943 | ||
6aa8b732 AK |
3944 | static int is_disabled(void) |
3945 | { | |
6031a61c JR |
3946 | u64 vm_cr; |
3947 | ||
3948 | rdmsrl(MSR_VM_CR, vm_cr); | |
3949 | if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE)) | |
3950 | return 1; | |
3951 | ||
6aa8b732 AK |
3952 | return 0; |
3953 | } | |
3954 | ||
102d8325 IM |
3955 | static void |
3956 | svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) | |
3957 | { | |
3958 | /* | |
3959 | * Patch in the VMMCALL instruction: | |
3960 | */ | |
3961 | hypercall[0] = 0x0f; | |
3962 | hypercall[1] = 0x01; | |
3963 | hypercall[2] = 0xd9; | |
102d8325 IM |
3964 | } |
3965 | ||
f257d6dc | 3966 | static int __init svm_check_processor_compat(void) |
002c7f7c | 3967 | { |
f257d6dc | 3968 | return 0; |
002c7f7c YS |
3969 | } |
3970 | ||
774ead3a AK |
3971 | static bool svm_cpu_has_accelerated_tpr(void) |
3972 | { | |
3973 | return false; | |
3974 | } | |
3975 | ||
5719455f TL |
3976 | /* |
3977 | * The kvm parameter can be NULL (module initialization, or invocation before | |
3978 | * VM creation). Be sure to check the kvm parameter before using it. | |
3979 | */ | |
3980 | static bool svm_has_emulated_msr(struct kvm *kvm, u32 index) | |
6d396b55 | 3981 | { |
e87555e5 VK |
3982 | switch (index) { |
3983 | case MSR_IA32_MCG_EXT_CTL: | |
95c5c7c7 | 3984 | case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: |
e87555e5 | 3985 | return false; |
5719455f TL |
3986 | case MSR_IA32_SMBASE: |
3987 | /* SEV-ES guests do not support SMM, so report false */ | |
3988 | if (kvm && sev_es_guest(kvm)) | |
3989 | return false; | |
3990 | break; | |
e87555e5 VK |
3991 | default: |
3992 | break; | |
3993 | } | |
3994 | ||
6d396b55 PB |
3995 | return true; |
3996 | } | |
3997 | ||
fc07e76a PB |
3998 | static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) |
3999 | { | |
4000 | return 0; | |
4001 | } | |
4002 | ||
7c1b761b | 4003 | static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) |
0e851880 | 4004 | { |
6092d3d3 | 4005 | struct vcpu_svm *svm = to_svm(vcpu); |
96308b06 | 4006 | struct kvm_cpuid_entry2 *best; |
6092d3d3 | 4007 | |
7204160e | 4008 | vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && |
96be4e06 | 4009 | boot_cpu_has(X86_FEATURE_XSAVE) && |
7204160e AL |
4010 | boot_cpu_has(X86_FEATURE_XSAVES); |
4011 | ||
6092d3d3 | 4012 | /* Update nrips enabled cache */ |
4eb87460 | 4013 | svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) && |
63129754 | 4014 | guest_cpuid_has(vcpu, X86_FEATURE_NRIPS); |
46781eae | 4015 | |
3b195ac9 | 4016 | svm_recalc_instruction_intercepts(vcpu, svm); |
4407a797 | 4017 | |
96308b06 BM |
4018 | /* For sev guests, the memory encryption bit is not reserved in CR3. */ |
4019 | if (sev_guest(vcpu->kvm)) { | |
4020 | best = kvm_find_cpuid_entry(vcpu, 0x8000001F, 0); | |
4021 | if (best) | |
ca29e145 | 4022 | vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f)); |
96308b06 BM |
4023 | } |
4024 | ||
adc2a237 ML |
4025 | if (kvm_vcpu_apicv_active(vcpu)) { |
4026 | /* | |
4027 | * AVIC does not work with an x2APIC mode guest. If the X2APIC feature | |
4028 | * is exposed to the guest, disable AVIC. | |
4029 | */ | |
4030 | if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC)) | |
4031 | kvm_request_apicv_update(vcpu->kvm, false, | |
4032 | APICV_INHIBIT_REASON_X2APIC); | |
46781eae | 4033 | |
adc2a237 ML |
4034 | /* |
4035 | * Currently, AVIC does not work with nested virtualization. | |
4036 | * So, we disable AVIC when cpuid for SVM is set in the L1 guest. | |
4037 | */ | |
4038 | if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM)) | |
4039 | kvm_request_apicv_update(vcpu->kvm, false, | |
4040 | APICV_INHIBIT_REASON_NESTED); | |
4041 | } | |
9a0bf054 | 4042 | |
adc2a237 ML |
4043 | if (guest_cpuid_is_intel(vcpu)) { |
4044 | /* | |
4045 | * We must intercept SYSENTER_EIP and SYSENTER_ESP | |
4046 | * accesses because the processor only stores 32 bits. | |
4047 | * For the same reason we cannot use virtual VMLOAD/VMSAVE. | |
4048 | */ | |
4049 | svm_set_intercept(svm, INTERCEPT_VMLOAD); | |
4050 | svm_set_intercept(svm, INTERCEPT_VMSAVE); | |
4051 | svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; | |
4052 | ||
4053 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0); | |
4054 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0); | |
4055 | } else { | |
4056 | /* | |
4057 | * If hardware supports Virtual VMLOAD VMSAVE then enable it | |
4058 | * in VMCB and clear intercepts to avoid #VMEXIT. | |
4059 | */ | |
4060 | if (vls) { | |
4061 | svm_clr_intercept(svm, INTERCEPT_VMLOAD); | |
4062 | svm_clr_intercept(svm, INTERCEPT_VMSAVE); | |
4063 | svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; | |
4064 | } | |
4065 | /* No need to intercept these MSRs */ | |
4066 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1); | |
4067 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1); | |
4068 | } | |
0e851880 SY |
4069 | } |
4070 | ||
f5f48ee1 SY |
4071 | static bool svm_has_wbinvd_exit(void) |
4072 | { | |
4073 | return true; | |
4074 | } | |
4075 | ||
8061252e | 4076 | #define PRE_EX(exit) { .exit_code = (exit), \ |
40e19b51 | 4077 | .stage = X86_ICPT_PRE_EXCEPT, } |
cfec82cb | 4078 | #define POST_EX(exit) { .exit_code = (exit), \ |
40e19b51 | 4079 | .stage = X86_ICPT_POST_EXCEPT, } |
d7eb8203 | 4080 | #define POST_MEM(exit) { .exit_code = (exit), \ |
40e19b51 | 4081 | .stage = X86_ICPT_POST_MEMACCESS, } |
cfec82cb | 4082 | |
09941fbb | 4083 | static const struct __x86_intercept { |
cfec82cb JR |
4084 | u32 exit_code; |
4085 | enum x86_intercept_stage stage; | |
cfec82cb JR |
4086 | } x86_intercept_map[] = { |
4087 | [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0), | |
4088 | [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0), | |
4089 | [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0), | |
4090 | [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0), | |
4091 | [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0), | |
3b88e41a JR |
4092 | [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0), |
4093 | [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0), | |
dee6bb70 JR |
4094 | [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ), |
4095 | [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ), | |
4096 | [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE), | |
4097 | [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE), | |
4098 | [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ), | |
4099 | [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ), | |
4100 | [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE), | |
4101 | [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE), | |
01de8b09 JR |
4102 | [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN), |
4103 | [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL), | |
4104 | [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD), | |
4105 | [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE), | |
4106 | [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI), | |
4107 | [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI), | |
4108 | [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT), | |
4109 | [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA), | |
d7eb8203 JR |
4110 | [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP), |
4111 | [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR), | |
4112 | [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT), | |
8061252e JR |
4113 | [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG), |
4114 | [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD), | |
4115 | [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD), | |
4116 | [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR), | |
4117 | [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC), | |
4118 | [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR), | |
4119 | [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC), | |
4120 | [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID), | |
4121 | [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM), | |
bf608f88 JR |
4122 | [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE), |
4123 | [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF), | |
4124 | [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF), | |
4125 | [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT), | |
4126 | [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET), | |
4127 | [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP), | |
4128 | [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT), | |
f6511935 JR |
4129 | [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO), |
4130 | [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO), | |
4131 | [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO), | |
4132 | [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO), | |
02d4160f | 4133 | [x86_intercept_xsetbv] = PRE_EX(SVM_EXIT_XSETBV), |
cfec82cb JR |
4134 | }; |
4135 | ||
8061252e | 4136 | #undef PRE_EX |
cfec82cb | 4137 | #undef POST_EX |
d7eb8203 | 4138 | #undef POST_MEM |
cfec82cb | 4139 | |
8a76d7f2 JR |
4140 | static int svm_check_intercept(struct kvm_vcpu *vcpu, |
4141 | struct x86_instruction_info *info, | |
21f1b8f2 SC |
4142 | enum x86_intercept_stage stage, |
4143 | struct x86_exception *exception) | |
8a76d7f2 | 4144 | { |
cfec82cb JR |
4145 | struct vcpu_svm *svm = to_svm(vcpu); |
4146 | int vmexit, ret = X86EMUL_CONTINUE; | |
4147 | struct __x86_intercept icpt_info; | |
4148 | struct vmcb *vmcb = svm->vmcb; | |
4149 | ||
4150 | if (info->intercept >= ARRAY_SIZE(x86_intercept_map)) | |
4151 | goto out; | |
4152 | ||
4153 | icpt_info = x86_intercept_map[info->intercept]; | |
4154 | ||
40e19b51 | 4155 | if (stage != icpt_info.stage) |
cfec82cb JR |
4156 | goto out; |
4157 | ||
4158 | switch (icpt_info.exit_code) { | |
4159 | case SVM_EXIT_READ_CR0: | |
4160 | if (info->intercept == x86_intercept_cr_read) | |
4161 | icpt_info.exit_code += info->modrm_reg; | |
4162 | break; | |
4163 | case SVM_EXIT_WRITE_CR0: { | |
4164 | unsigned long cr0, val; | |
cfec82cb JR |
4165 | |
4166 | if (info->intercept == x86_intercept_cr_write) | |
4167 | icpt_info.exit_code += info->modrm_reg; | |
4168 | ||
62baf44c JK |
4169 | if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 || |
4170 | info->intercept == x86_intercept_clts) | |
cfec82cb JR |
4171 | break; |
4172 | ||
c62e2e94 BM |
4173 | if (!(vmcb_is_intercept(&svm->nested.ctl, |
4174 | INTERCEPT_SELECTIVE_CR0))) | |
cfec82cb JR |
4175 | break; |
4176 | ||
4177 | cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK; | |
4178 | val = info->src_val & ~SVM_CR0_SELECTIVE_MASK; | |
4179 | ||
4180 | if (info->intercept == x86_intercept_lmsw) { | |
4181 | cr0 &= 0xfUL; | |
4182 | val &= 0xfUL; | |
4183 | /* lmsw can't clear PE - catch this here */ | |
4184 | if (cr0 & X86_CR0_PE) | |
4185 | val |= X86_CR0_PE; | |
4186 | } | |
4187 | ||
4188 | if (cr0 ^ val) | |
4189 | icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE; | |
4190 | ||
4191 | break; | |
4192 | } | |
3b88e41a JR |
4193 | case SVM_EXIT_READ_DR0: |
4194 | case SVM_EXIT_WRITE_DR0: | |
4195 | icpt_info.exit_code += info->modrm_reg; | |
4196 | break; | |
8061252e JR |
4197 | case SVM_EXIT_MSR: |
4198 | if (info->intercept == x86_intercept_wrmsr) | |
4199 | vmcb->control.exit_info_1 = 1; | |
4200 | else | |
4201 | vmcb->control.exit_info_1 = 0; | |
4202 | break; | |
bf608f88 JR |
4203 | case SVM_EXIT_PAUSE: |
4204 | /* | |
4205 | * We get this for NOP only, but pause | |
4206 | * is rep not, check this here | |
4207 | */ | |
4208 | if (info->rep_prefix != REPE_PREFIX) | |
4209 | goto out; | |
49a8afca | 4210 | break; |
f6511935 JR |
4211 | case SVM_EXIT_IOIO: { |
4212 | u64 exit_info; | |
4213 | u32 bytes; | |
4214 | ||
f6511935 JR |
4215 | if (info->intercept == x86_intercept_in || |
4216 | info->intercept == x86_intercept_ins) { | |
6cbc5f5a JK |
4217 | exit_info = ((info->src_val & 0xffff) << 16) | |
4218 | SVM_IOIO_TYPE_MASK; | |
f6511935 | 4219 | bytes = info->dst_bytes; |
6493f157 | 4220 | } else { |
6cbc5f5a | 4221 | exit_info = (info->dst_val & 0xffff) << 16; |
6493f157 | 4222 | bytes = info->src_bytes; |
f6511935 JR |
4223 | } |
4224 | ||
4225 | if (info->intercept == x86_intercept_outs || | |
4226 | info->intercept == x86_intercept_ins) | |
4227 | exit_info |= SVM_IOIO_STR_MASK; | |
4228 | ||
4229 | if (info->rep_prefix) | |
4230 | exit_info |= SVM_IOIO_REP_MASK; | |
4231 | ||
4232 | bytes = min(bytes, 4u); | |
4233 | ||
4234 | exit_info |= bytes << SVM_IOIO_SIZE_SHIFT; | |
4235 | ||
4236 | exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1); | |
4237 | ||
4238 | vmcb->control.exit_info_1 = exit_info; | |
4239 | vmcb->control.exit_info_2 = info->next_rip; | |
4240 | ||
4241 | break; | |
4242 | } | |
cfec82cb JR |
4243 | default: |
4244 | break; | |
4245 | } | |
4246 | ||
f104765b BD |
4247 | /* TODO: Advertise NRIPS to guest hypervisor unconditionally */ |
4248 | if (static_cpu_has(X86_FEATURE_NRIPS)) | |
4249 | vmcb->control.next_rip = info->next_rip; | |
cfec82cb JR |
4250 | vmcb->control.exit_code = icpt_info.exit_code; |
4251 | vmexit = nested_svm_exit_handled(svm); | |
4252 | ||
4253 | ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED | |
4254 | : X86EMUL_CONTINUE; | |
4255 | ||
4256 | out: | |
4257 | return ret; | |
8a76d7f2 JR |
4258 | } |
4259 | ||
a9ab13ff | 4260 | static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu) |
a547c6db | 4261 | { |
a547c6db YZ |
4262 | } |
4263 | ||
ae97a3b8 RK |
4264 | static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) |
4265 | { | |
830f01b0 | 4266 | if (!kvm_pause_in_guest(vcpu->kvm)) |
8566ac8b | 4267 | shrink_ple_window(vcpu); |
ae97a3b8 RK |
4268 | } |
4269 | ||
74f16909 BP |
4270 | static void svm_setup_mce(struct kvm_vcpu *vcpu) |
4271 | { | |
4272 | /* [63:9] are reserved. */ | |
4273 | vcpu->arch.mcg_cap &= 0x1ff; | |
4274 | } | |
4275 | ||
cae96af1 | 4276 | bool svm_smi_blocked(struct kvm_vcpu *vcpu) |
72d7b374 | 4277 | { |
05cade71 LP |
4278 | struct vcpu_svm *svm = to_svm(vcpu); |
4279 | ||
4280 | /* Per APM Vol.2 15.22.2 "Response to SMI" */ | |
4281 | if (!gif_set(svm)) | |
cae96af1 PB |
4282 | return true; |
4283 | ||
4284 | return is_smm(vcpu); | |
4285 | } | |
4286 | ||
c9d40913 | 4287 | static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) |
cae96af1 PB |
4288 | { |
4289 | struct vcpu_svm *svm = to_svm(vcpu); | |
4290 | if (svm->nested.nested_run_pending) | |
c9d40913 | 4291 | return -EBUSY; |
05cade71 | 4292 | |
c300ab9f PB |
4293 | /* An SMI must not be injected into L2 if it's supposed to VM-Exit. */ |
4294 | if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm)) | |
c9d40913 | 4295 | return -EBUSY; |
c300ab9f | 4296 | |
cae96af1 | 4297 | return !svm_smi_blocked(vcpu); |
72d7b374 LP |
4298 | } |
4299 | ||
ecc513e5 | 4300 | static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) |
0234bf88 | 4301 | { |
05cade71 LP |
4302 | struct vcpu_svm *svm = to_svm(vcpu); |
4303 | int ret; | |
4304 | ||
4305 | if (is_guest_mode(vcpu)) { | |
4306 | /* FED8h - SVM Guest */ | |
4307 | put_smstate(u64, smstate, 0x7ed8, 1); | |
4308 | /* FEE0h - SVM Guest VMCB Physical Address */ | |
0dd16b5b | 4309 | put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa); |
05cade71 LP |
4310 | |
4311 | svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; | |
4312 | svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; | |
4313 | svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; | |
4314 | ||
4315 | ret = nested_svm_vmexit(svm); | |
4316 | if (ret) | |
4317 | return ret; | |
4318 | } | |
0234bf88 LP |
4319 | return 0; |
4320 | } | |
4321 | ||
ecc513e5 | 4322 | static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) |
0234bf88 | 4323 | { |
05cade71 | 4324 | struct vcpu_svm *svm = to_svm(vcpu); |
8c5fbf1a | 4325 | struct kvm_host_map map; |
59cd9bc5 | 4326 | int ret = 0; |
05cade71 | 4327 | |
3ebb5d26 ML |
4328 | if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) { |
4329 | u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0); | |
4330 | u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8); | |
0dd16b5b | 4331 | u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0); |
05cade71 | 4332 | |
3ebb5d26 ML |
4333 | if (guest) { |
4334 | if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM)) | |
4335 | return 1; | |
4336 | ||
4337 | if (!(saved_efer & EFER_SVME)) | |
4338 | return 1; | |
4339 | ||
63129754 | 4340 | if (kvm_vcpu_map(vcpu, |
0dd16b5b | 4341 | gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL) |
3ebb5d26 ML |
4342 | return 1; |
4343 | ||
2fcf4876 | 4344 | if (svm_allocate_nested(svm)) |
3ebb5d26 ML |
4345 | return 1; |
4346 | ||
63129754 PB |
4347 | ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, map.hva); |
4348 | kvm_vcpu_unmap(vcpu, &map, true); | |
3ebb5d26 | 4349 | } |
05cade71 | 4350 | } |
59cd9bc5 VK |
4351 | |
4352 | return ret; | |
0234bf88 LP |
4353 | } |
4354 | ||
b6a7cc35 | 4355 | static void svm_enable_smi_window(struct kvm_vcpu *vcpu) |
cc3d967f LP |
4356 | { |
4357 | struct vcpu_svm *svm = to_svm(vcpu); | |
4358 | ||
4359 | if (!gif_set(svm)) { | |
4360 | if (vgif_enabled(svm)) | |
a284ba56 | 4361 | svm_set_intercept(svm, INTERCEPT_STGI); |
cc3d967f | 4362 | /* STGI will cause a vm exit */ |
c9d40913 PB |
4363 | } else { |
4364 | /* We must be in SMM; RSM will cause a vmexit anyway. */ | |
cc3d967f | 4365 | } |
cc3d967f LP |
4366 | } |
4367 | ||
09e3e2a1 | 4368 | static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int insn_len) |
05d5a486 | 4369 | { |
09e3e2a1 SC |
4370 | bool smep, smap, is_user; |
4371 | unsigned long cr4; | |
e72436bc | 4372 | |
bc624d9f TL |
4373 | /* |
4374 | * When the guest is an SEV-ES guest, emulation is not possible. | |
4375 | */ | |
4376 | if (sev_es_guest(vcpu->kvm)) | |
4377 | return false; | |
4378 | ||
05d5a486 | 4379 | /* |
118154bd LA |
4380 | * Detect and workaround Errata 1096 Fam_17h_00_0Fh. |
4381 | * | |
4382 | * Errata: | |
4383 | * When CPU raise #NPF on guest data access and vCPU CR4.SMAP=1, it is | |
4384 | * possible that CPU microcode implementing DecodeAssist will fail | |
4385 | * to read bytes of instruction which caused #NPF. In this case, | |
4386 | * GuestIntrBytes field of the VMCB on a VMEXIT will incorrectly | |
4387 | * return 0 instead of the correct guest instruction bytes. | |
4388 | * | |
4389 | * This happens because CPU microcode reading instruction bytes | |
4390 | * uses a special opcode which attempts to read data using CPL=0 | |
d9f6e12f | 4391 | * privileges. The microcode reads CS:RIP and if it hits a SMAP |
118154bd LA |
4392 | * fault, it gives up and returns no instruction bytes. |
4393 | * | |
4394 | * Detection: | |
4395 | * We reach here in case CPU supports DecodeAssist, raised #NPF and | |
4396 | * returned 0 in GuestIntrBytes field of the VMCB. | |
4397 | * First, errata can only be triggered in case vCPU CR4.SMAP=1. | |
4398 | * Second, if vCPU CR4.SMEP=1, errata could only be triggered | |
4399 | * in case vCPU CPL==3 (Because otherwise guest would have triggered | |
4400 | * a SMEP fault instead of #NPF). | |
4401 | * Otherwise, vCPU CR4.SMEP=0, errata could be triggered by any vCPU CPL. | |
4402 | * As most guests enable SMAP if they have also enabled SMEP, use above | |
4403 | * logic in order to attempt minimize false-positive of detecting errata | |
4404 | * while still preserving all cases semantic correctness. | |
4405 | * | |
4406 | * Workaround: | |
4407 | * To determine what instruction the guest was executing, the hypervisor | |
4408 | * will have to decode the instruction at the instruction pointer. | |
05d5a486 SB |
4409 | * |
4410 | * In non SEV guest, hypervisor will be able to read the guest | |
4411 | * memory to decode the instruction pointer when insn_len is zero | |
4412 | * so we return true to indicate that decoding is possible. | |
4413 | * | |
4414 | * But in the SEV guest, the guest memory is encrypted with the | |
4415 | * guest specific key and hypervisor will not be able to decode the | |
4416 | * instruction pointer so we will not able to workaround it. Lets | |
4417 | * print the error and request to kill the guest. | |
4418 | */ | |
09e3e2a1 SC |
4419 | if (likely(!insn || insn_len)) |
4420 | return true; | |
4421 | ||
4422 | /* | |
4423 | * If RIP is invalid, go ahead with emulation which will cause an | |
4424 | * internal error exit. | |
4425 | */ | |
4426 | if (!kvm_vcpu_gfn_to_memslot(vcpu, kvm_rip_read(vcpu) >> PAGE_SHIFT)) | |
4427 | return true; | |
4428 | ||
4429 | cr4 = kvm_read_cr4(vcpu); | |
4430 | smep = cr4 & X86_CR4_SMEP; | |
4431 | smap = cr4 & X86_CR4_SMAP; | |
4432 | is_user = svm_get_cpl(vcpu) == 3; | |
118154bd | 4433 | if (smap && (!smep || is_user)) { |
05d5a486 SB |
4434 | if (!sev_guest(vcpu->kvm)) |
4435 | return true; | |
4436 | ||
118154bd | 4437 | pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n"); |
05d5a486 SB |
4438 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); |
4439 | } | |
4440 | ||
4441 | return false; | |
4442 | } | |
4443 | ||
4b9852f4 LA |
4444 | static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu) |
4445 | { | |
4446 | struct vcpu_svm *svm = to_svm(vcpu); | |
4447 | ||
4448 | /* | |
4449 | * TODO: Last condition latch INIT signals on vCPU when | |
4450 | * vCPU is in guest-mode and vmcb12 defines intercept on INIT. | |
33b22172 PB |
4451 | * To properly emulate the INIT intercept, |
4452 | * svm_check_nested_events() should call nested_svm_vmexit() | |
4453 | * if an INIT signal is pending. | |
4b9852f4 LA |
4454 | */ |
4455 | return !gif_set(svm) || | |
c62e2e94 | 4456 | (vmcb_is_intercept(&svm->vmcb->control, INTERCEPT_INIT)); |
4b9852f4 LA |
4457 | } |
4458 | ||
647daca2 TL |
4459 | static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) |
4460 | { | |
4461 | if (!sev_es_guest(vcpu->kvm)) | |
4462 | return kvm_vcpu_deliver_sipi_vector(vcpu, vector); | |
4463 | ||
4464 | sev_vcpu_deliver_sipi_vector(vcpu, vector); | |
4465 | } | |
4466 | ||
eaf78265 JR |
4467 | static void svm_vm_destroy(struct kvm *kvm) |
4468 | { | |
4469 | avic_vm_destroy(kvm); | |
4470 | sev_vm_destroy(kvm); | |
4471 | } | |
4472 | ||
4473 | static int svm_vm_init(struct kvm *kvm) | |
4474 | { | |
830f01b0 WL |
4475 | if (!pause_filter_count || !pause_filter_thresh) |
4476 | kvm->arch.pause_in_guest = true; | |
4477 | ||
fdf513e3 | 4478 | if (enable_apicv) { |
eaf78265 JR |
4479 | int ret = avic_vm_init(kvm); |
4480 | if (ret) | |
4481 | return ret; | |
4482 | } | |
4483 | ||
eaf78265 JR |
4484 | return 0; |
4485 | } | |
4486 | ||
9c14ee21 | 4487 | static struct kvm_x86_ops svm_x86_ops __initdata = { |
dd58f3c9 | 4488 | .hardware_unsetup = svm_hardware_teardown, |
6aa8b732 AK |
4489 | .hardware_enable = svm_hardware_enable, |
4490 | .hardware_disable = svm_hardware_disable, | |
774ead3a | 4491 | .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr, |
bc226f07 | 4492 | .has_emulated_msr = svm_has_emulated_msr, |
6aa8b732 AK |
4493 | |
4494 | .vcpu_create = svm_create_vcpu, | |
4495 | .vcpu_free = svm_free_vcpu, | |
04d2cc77 | 4496 | .vcpu_reset = svm_vcpu_reset, |
6aa8b732 | 4497 | |
562b6b08 | 4498 | .vm_size = sizeof(struct kvm_svm), |
4e19c36f | 4499 | .vm_init = svm_vm_init, |
1654efcb | 4500 | .vm_destroy = svm_vm_destroy, |
44a95dae | 4501 | |
04d2cc77 | 4502 | .prepare_guest_switch = svm_prepare_guest_switch, |
6aa8b732 AK |
4503 | .vcpu_load = svm_vcpu_load, |
4504 | .vcpu_put = svm_vcpu_put, | |
8221c137 SS |
4505 | .vcpu_blocking = svm_vcpu_blocking, |
4506 | .vcpu_unblocking = svm_vcpu_unblocking, | |
6aa8b732 | 4507 | |
b6a7cc35 | 4508 | .update_exception_bitmap = svm_update_exception_bitmap, |
801e459a | 4509 | .get_msr_feature = svm_get_msr_feature, |
6aa8b732 AK |
4510 | .get_msr = svm_get_msr, |
4511 | .set_msr = svm_set_msr, | |
4512 | .get_segment_base = svm_get_segment_base, | |
4513 | .get_segment = svm_get_segment, | |
4514 | .set_segment = svm_set_segment, | |
2e4d2653 | 4515 | .get_cpl = svm_get_cpl, |
1747fb71 | 4516 | .get_cs_db_l_bits = kvm_get_cs_db_l_bits, |
6aa8b732 | 4517 | .set_cr0 = svm_set_cr0, |
c2fe3cd4 | 4518 | .is_valid_cr4 = svm_is_valid_cr4, |
6aa8b732 AK |
4519 | .set_cr4 = svm_set_cr4, |
4520 | .set_efer = svm_set_efer, | |
4521 | .get_idt = svm_get_idt, | |
4522 | .set_idt = svm_set_idt, | |
4523 | .get_gdt = svm_get_gdt, | |
4524 | .set_gdt = svm_set_gdt, | |
020df079 | 4525 | .set_dr7 = svm_set_dr7, |
facb0139 | 4526 | .sync_dirty_debug_regs = svm_sync_dirty_debug_regs, |
6de4f3ad | 4527 | .cache_reg = svm_cache_reg, |
6aa8b732 AK |
4528 | .get_rflags = svm_get_rflags, |
4529 | .set_rflags = svm_set_rflags, | |
be94f6b7 | 4530 | |
7780938c | 4531 | .tlb_flush_all = svm_flush_tlb, |
eeeb4f67 | 4532 | .tlb_flush_current = svm_flush_tlb, |
faff8758 | 4533 | .tlb_flush_gva = svm_flush_tlb_gva, |
72b38320 | 4534 | .tlb_flush_guest = svm_flush_tlb, |
6aa8b732 | 4535 | |
6aa8b732 | 4536 | .run = svm_vcpu_run, |
04d2cc77 | 4537 | .handle_exit = handle_exit, |
6aa8b732 | 4538 | .skip_emulated_instruction = skip_emulated_instruction, |
5ef8acbd | 4539 | .update_emulated_instruction = NULL, |
2809f5d2 GC |
4540 | .set_interrupt_shadow = svm_set_interrupt_shadow, |
4541 | .get_interrupt_shadow = svm_get_interrupt_shadow, | |
102d8325 | 4542 | .patch_hypercall = svm_patch_hypercall, |
2a8067f1 | 4543 | .set_irq = svm_set_irq, |
95ba8273 | 4544 | .set_nmi = svm_inject_nmi, |
298101da | 4545 | .queue_exception = svm_queue_exception, |
b463a6f7 | 4546 | .cancel_injection = svm_cancel_injection, |
78646121 | 4547 | .interrupt_allowed = svm_interrupt_allowed, |
95ba8273 | 4548 | .nmi_allowed = svm_nmi_allowed, |
3cfc3092 JK |
4549 | .get_nmi_mask = svm_get_nmi_mask, |
4550 | .set_nmi_mask = svm_set_nmi_mask, | |
b6a7cc35 JB |
4551 | .enable_nmi_window = svm_enable_nmi_window, |
4552 | .enable_irq_window = svm_enable_irq_window, | |
4553 | .update_cr8_intercept = svm_update_cr8_intercept, | |
8d860bbe | 4554 | .set_virtual_apic_mode = svm_set_virtual_apic_mode, |
d62caabb | 4555 | .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl, |
ef8efd7a | 4556 | .check_apicv_inhibit_reasons = svm_check_apicv_inhibit_reasons, |
2de9d0cc | 4557 | .pre_update_apicv_exec_ctrl = svm_pre_update_apicv_exec_ctrl, |
c7c9c56c | 4558 | .load_eoi_exitmap = svm_load_eoi_exitmap, |
44a95dae SS |
4559 | .hwapic_irr_update = svm_hwapic_irr_update, |
4560 | .hwapic_isr_update = svm_hwapic_isr_update, | |
fa59cc00 | 4561 | .sync_pir_to_irr = kvm_lapic_find_highest_irr, |
be8ca170 | 4562 | .apicv_post_state_restore = avic_post_state_restore, |
cbc94022 IE |
4563 | |
4564 | .set_tss_addr = svm_set_tss_addr, | |
2ac52ab8 | 4565 | .set_identity_map_addr = svm_set_identity_map_addr, |
4b12f0de | 4566 | .get_mt_mask = svm_get_mt_mask, |
229456fc | 4567 | |
586f9607 | 4568 | .get_exit_info = svm_get_exit_info, |
586f9607 | 4569 | |
7c1b761b | 4570 | .vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid, |
4e47c7a6 | 4571 | |
f5f48ee1 | 4572 | .has_wbinvd_exit = svm_has_wbinvd_exit, |
99e3e30a | 4573 | |
307a94c7 IS |
4574 | .get_l2_tsc_offset = svm_get_l2_tsc_offset, |
4575 | .get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier, | |
edcfe540 | 4576 | .write_tsc_offset = svm_write_tsc_offset, |
1ab9287a | 4577 | .write_tsc_multiplier = svm_write_tsc_multiplier, |
1c97f0a0 | 4578 | |
727a7e27 | 4579 | .load_mmu_pgd = svm_load_mmu_pgd, |
8a76d7f2 JR |
4580 | |
4581 | .check_intercept = svm_check_intercept, | |
95b5a48c | 4582 | .handle_exit_irqoff = svm_handle_exit_irqoff, |
ae97a3b8 | 4583 | |
d264ee0c SC |
4584 | .request_immediate_exit = __kvm_request_immediate_exit, |
4585 | ||
ae97a3b8 | 4586 | .sched_in = svm_sched_in, |
25462f7f WH |
4587 | |
4588 | .pmu_ops = &amd_pmu_ops, | |
33b22172 PB |
4589 | .nested_ops = &svm_nested_ops, |
4590 | ||
340d3bc3 | 4591 | .deliver_posted_interrupt = svm_deliver_avic_intr, |
17e433b5 | 4592 | .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt, |
411b44ba | 4593 | .update_pi_irte = svm_update_pi_irte, |
74f16909 | 4594 | .setup_mce = svm_setup_mce, |
0234bf88 | 4595 | |
72d7b374 | 4596 | .smi_allowed = svm_smi_allowed, |
ecc513e5 SC |
4597 | .enter_smm = svm_enter_smm, |
4598 | .leave_smm = svm_leave_smm, | |
b6a7cc35 | 4599 | .enable_smi_window = svm_enable_smi_window, |
1654efcb BS |
4600 | |
4601 | .mem_enc_op = svm_mem_enc_op, | |
1e80fdc0 BS |
4602 | .mem_enc_reg_region = svm_register_enc_region, |
4603 | .mem_enc_unreg_region = svm_unregister_enc_region, | |
57b119da | 4604 | |
54526d1f NT |
4605 | .vm_copy_enc_context_from = svm_vm_copy_asid_from, |
4606 | ||
09e3e2a1 | 4607 | .can_emulate_instruction = svm_can_emulate_instruction, |
4b9852f4 LA |
4608 | |
4609 | .apic_init_signal_blocked = svm_apic_init_signal_blocked, | |
fd6fa73d AG |
4610 | |
4611 | .msr_filter_changed = svm_msr_filter_changed, | |
f1c6366e | 4612 | .complete_emulated_msr = svm_complete_emulated_msr, |
647daca2 TL |
4613 | |
4614 | .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector, | |
6aa8b732 AK |
4615 | }; |
4616 | ||
d008dfdb SC |
4617 | static struct kvm_x86_init_ops svm_init_ops __initdata = { |
4618 | .cpu_has_kvm_support = has_svm, | |
4619 | .disabled_by_bios = is_disabled, | |
4620 | .hardware_setup = svm_hardware_setup, | |
4621 | .check_processor_compatibility = svm_check_processor_compat, | |
4622 | ||
4623 | .runtime_ops = &svm_x86_ops, | |
6aa8b732 AK |
4624 | }; |
4625 | ||
4626 | static int __init svm_init(void) | |
4627 | { | |
d07f46f9 TL |
4628 | __unused_size_checks(); |
4629 | ||
d008dfdb | 4630 | return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm), |
0ee75bea | 4631 | __alignof__(struct vcpu_svm), THIS_MODULE); |
6aa8b732 AK |
4632 | } |
4633 | ||
4634 | static void __exit svm_exit(void) | |
4635 | { | |
cb498ea2 | 4636 | kvm_exit(); |
6aa8b732 AK |
4637 | } |
4638 | ||
4639 | module_init(svm_init) | |
4640 | module_exit(svm_exit) |