Merge tag 'net-6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-2.6-block.git] / arch / x86 / kvm / svm / svm.c
1 #define pr_fmt(fmt) "SVM: " fmt
2
3 #include <linux/kvm_host.h>
4
5 #include "irq.h"
6 #include "mmu.h"
7 #include "kvm_cache_regs.h"
8 #include "x86.h"
9 #include "cpuid.h"
10 #include "pmu.h"
11
12 #include <linux/module.h>
13 #include <linux/mod_devicetable.h>
14 #include <linux/kernel.h>
15 #include <linux/vmalloc.h>
16 #include <linux/highmem.h>
17 #include <linux/amd-iommu.h>
18 #include <linux/sched.h>
19 #include <linux/trace_events.h>
20 #include <linux/slab.h>
21 #include <linux/hashtable.h>
22 #include <linux/objtool.h>
23 #include <linux/psp-sev.h>
24 #include <linux/file.h>
25 #include <linux/pagemap.h>
26 #include <linux/swap.h>
27 #include <linux/rwsem.h>
28 #include <linux/cc_platform.h>
29
30 #include <asm/apic.h>
31 #include <asm/perf_event.h>
32 #include <asm/tlbflush.h>
33 #include <asm/desc.h>
34 #include <asm/debugreg.h>
35 #include <asm/kvm_para.h>
36 #include <asm/irq_remapping.h>
37 #include <asm/spec-ctrl.h>
38 #include <asm/cpu_device_id.h>
39 #include <asm/traps.h>
40 #include <asm/fpu/api.h>
41
42 #include <asm/virtext.h>
43 #include "trace.h"
44
45 #include "svm.h"
46 #include "svm_ops.h"
47
48 #include "kvm_onhyperv.h"
49 #include "svm_onhyperv.h"
50
51 MODULE_AUTHOR("Qumranet");
52 MODULE_LICENSE("GPL");
53
54 #ifdef MODULE
55 static const struct x86_cpu_id svm_cpu_id[] = {
56         X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL),
57         {}
58 };
59 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
60 #endif
61
62 #define SEG_TYPE_LDT 2
63 #define SEG_TYPE_BUSY_TSS16 3
64
65 static bool erratum_383_found __read_mostly;
66
67 u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
68
69 /*
70  * Set osvw_len to higher value when updated Revision Guides
71  * are published and we know what the new status bits are
72  */
73 static uint64_t osvw_len = 4, osvw_status;
74
75 static DEFINE_PER_CPU(u64, current_tsc_ratio);
76
77 #define X2APIC_MSR(x)   (APIC_BASE_MSR + (x >> 4))
78
79 static const struct svm_direct_access_msrs {
80         u32 index;   /* Index of the MSR */
81         bool always; /* True if intercept is initially cleared */
82 } direct_access_msrs[MAX_DIRECT_ACCESS_MSRS] = {
83         { .index = MSR_STAR,                            .always = true  },
84         { .index = MSR_IA32_SYSENTER_CS,                .always = true  },
85         { .index = MSR_IA32_SYSENTER_EIP,               .always = false },
86         { .index = MSR_IA32_SYSENTER_ESP,               .always = false },
87 #ifdef CONFIG_X86_64
88         { .index = MSR_GS_BASE,                         .always = true  },
89         { .index = MSR_FS_BASE,                         .always = true  },
90         { .index = MSR_KERNEL_GS_BASE,                  .always = true  },
91         { .index = MSR_LSTAR,                           .always = true  },
92         { .index = MSR_CSTAR,                           .always = true  },
93         { .index = MSR_SYSCALL_MASK,                    .always = true  },
94 #endif
95         { .index = MSR_IA32_SPEC_CTRL,                  .always = false },
96         { .index = MSR_IA32_PRED_CMD,                   .always = false },
97         { .index = MSR_IA32_LASTBRANCHFROMIP,           .always = false },
98         { .index = MSR_IA32_LASTBRANCHTOIP,             .always = false },
99         { .index = MSR_IA32_LASTINTFROMIP,              .always = false },
100         { .index = MSR_IA32_LASTINTTOIP,                .always = false },
101         { .index = MSR_EFER,                            .always = false },
102         { .index = MSR_IA32_CR_PAT,                     .always = false },
103         { .index = MSR_AMD64_SEV_ES_GHCB,               .always = true  },
104         { .index = MSR_TSC_AUX,                         .always = false },
105         { .index = X2APIC_MSR(APIC_ID),                 .always = false },
106         { .index = X2APIC_MSR(APIC_LVR),                .always = false },
107         { .index = X2APIC_MSR(APIC_TASKPRI),            .always = false },
108         { .index = X2APIC_MSR(APIC_ARBPRI),             .always = false },
109         { .index = X2APIC_MSR(APIC_PROCPRI),            .always = false },
110         { .index = X2APIC_MSR(APIC_EOI),                .always = false },
111         { .index = X2APIC_MSR(APIC_RRR),                .always = false },
112         { .index = X2APIC_MSR(APIC_LDR),                .always = false },
113         { .index = X2APIC_MSR(APIC_DFR),                .always = false },
114         { .index = X2APIC_MSR(APIC_SPIV),               .always = false },
115         { .index = X2APIC_MSR(APIC_ISR),                .always = false },
116         { .index = X2APIC_MSR(APIC_TMR),                .always = false },
117         { .index = X2APIC_MSR(APIC_IRR),                .always = false },
118         { .index = X2APIC_MSR(APIC_ESR),                .always = false },
119         { .index = X2APIC_MSR(APIC_ICR),                .always = false },
120         { .index = X2APIC_MSR(APIC_ICR2),               .always = false },
121
122         /*
123          * Note:
124          * AMD does not virtualize APIC TSC-deadline timer mode, but it is
125          * emulated by KVM. When setting APIC LVTT (0x832) register bit 18,
126          * the AVIC hardware would generate GP fault. Therefore, always
127          * intercept the MSR 0x832, and do not setup direct_access_msr.
128          */
129         { .index = X2APIC_MSR(APIC_LVTTHMR),            .always = false },
130         { .index = X2APIC_MSR(APIC_LVTPC),              .always = false },
131         { .index = X2APIC_MSR(APIC_LVT0),               .always = false },
132         { .index = X2APIC_MSR(APIC_LVT1),               .always = false },
133         { .index = X2APIC_MSR(APIC_LVTERR),             .always = false },
134         { .index = X2APIC_MSR(APIC_TMICT),              .always = false },
135         { .index = X2APIC_MSR(APIC_TMCCT),              .always = false },
136         { .index = X2APIC_MSR(APIC_TDCR),               .always = false },
137         { .index = MSR_INVALID,                         .always = false },
138 };
139
140 /*
141  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
142  * pause_filter_count: On processors that support Pause filtering(indicated
143  *      by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
144  *      count value. On VMRUN this value is loaded into an internal counter.
145  *      Each time a pause instruction is executed, this counter is decremented
146  *      until it reaches zero at which time a #VMEXIT is generated if pause
147  *      intercept is enabled. Refer to  AMD APM Vol 2 Section 15.14.4 Pause
148  *      Intercept Filtering for more details.
149  *      This also indicate if ple logic enabled.
150  *
151  * pause_filter_thresh: In addition, some processor families support advanced
152  *      pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
153  *      the amount of time a guest is allowed to execute in a pause loop.
154  *      In this mode, a 16-bit pause filter threshold field is added in the
155  *      VMCB. The threshold value is a cycle count that is used to reset the
156  *      pause counter. As with simple pause filtering, VMRUN loads the pause
157  *      count value from VMCB into an internal counter. Then, on each pause
158  *      instruction the hardware checks the elapsed number of cycles since
159  *      the most recent pause instruction against the pause filter threshold.
160  *      If the elapsed cycle count is greater than the pause filter threshold,
161  *      then the internal pause count is reloaded from the VMCB and execution
162  *      continues. If the elapsed cycle count is less than the pause filter
163  *      threshold, then the internal pause count is decremented. If the count
164  *      value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
165  *      triggered. If advanced pause filtering is supported and pause filter
166  *      threshold field is set to zero, the filter will operate in the simpler,
167  *      count only mode.
168  */
169
170 static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
171 module_param(pause_filter_thresh, ushort, 0444);
172
173 static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
174 module_param(pause_filter_count, ushort, 0444);
175
176 /* Default doubles per-vcpu window every exit. */
177 static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
178 module_param(pause_filter_count_grow, ushort, 0444);
179
180 /* Default resets per-vcpu window every exit to pause_filter_count. */
181 static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
182 module_param(pause_filter_count_shrink, ushort, 0444);
183
184 /* Default is to compute the maximum so we can never overflow. */
185 static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
186 module_param(pause_filter_count_max, ushort, 0444);
187
188 /*
189  * Use nested page tables by default.  Note, NPT may get forced off by
190  * svm_hardware_setup() if it's unsupported by hardware or the host kernel.
191  */
192 bool npt_enabled = true;
193 module_param_named(npt, npt_enabled, bool, 0444);
194
195 /* allow nested virtualization in KVM/SVM */
196 static int nested = true;
197 module_param(nested, int, S_IRUGO);
198
199 /* enable/disable Next RIP Save */
200 static int nrips = true;
201 module_param(nrips, int, 0444);
202
203 /* enable/disable Virtual VMLOAD VMSAVE */
204 static int vls = true;
205 module_param(vls, int, 0444);
206
207 /* enable/disable Virtual GIF */
208 int vgif = true;
209 module_param(vgif, int, 0444);
210
211 /* enable/disable LBR virtualization */
212 static int lbrv = true;
213 module_param(lbrv, int, 0444);
214
215 static int tsc_scaling = true;
216 module_param(tsc_scaling, int, 0444);
217
218 /*
219  * enable / disable AVIC.  Because the defaults differ for APICv
220  * support between VMX and SVM we cannot use module_param_named.
221  */
222 static bool avic;
223 module_param(avic, bool, 0444);
224
225 bool __read_mostly dump_invalid_vmcb;
226 module_param(dump_invalid_vmcb, bool, 0644);
227
228
229 bool intercept_smi = true;
230 module_param(intercept_smi, bool, 0444);
231
232
233 static bool svm_gp_erratum_intercept = true;
234
235 static u8 rsm_ins_bytes[] = "\x0f\xaa";
236
237 static unsigned long iopm_base;
238
239 struct kvm_ldttss_desc {
240         u16 limit0;
241         u16 base0;
242         unsigned base1:8, type:5, dpl:2, p:1;
243         unsigned limit1:4, zero0:3, g:1, base2:8;
244         u32 base3;
245         u32 zero1;
246 } __attribute__((packed));
247
248 DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
249
250 /*
251  * Only MSR_TSC_AUX is switched via the user return hook.  EFER is switched via
252  * the VMCB, and the SYSCALL/SYSENTER MSRs are handled by VMLOAD/VMSAVE.
253  *
254  * RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to
255  * defer the restoration of TSC_AUX until the CPU returns to userspace.
256  */
257 static int tsc_aux_uret_slot __read_mostly = -1;
258
259 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
260
261 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
262 #define MSRS_RANGE_SIZE 2048
263 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
264
265 u32 svm_msrpm_offset(u32 msr)
266 {
267         u32 offset;
268         int i;
269
270         for (i = 0; i < NUM_MSR_MAPS; i++) {
271                 if (msr < msrpm_ranges[i] ||
272                     msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
273                         continue;
274
275                 offset  = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
276                 offset += (i * MSRS_RANGE_SIZE);       /* add range offset */
277
278                 /* Now we have the u8 offset - but need the u32 offset */
279                 return offset / 4;
280         }
281
282         /* MSR not in any range */
283         return MSR_INVALID;
284 }
285
286 static void svm_flush_tlb_current(struct kvm_vcpu *vcpu);
287
288 static int get_npt_level(void)
289 {
290 #ifdef CONFIG_X86_64
291         return pgtable_l5_enabled() ? PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
292 #else
293         return PT32E_ROOT_LEVEL;
294 #endif
295 }
296
297 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
298 {
299         struct vcpu_svm *svm = to_svm(vcpu);
300         u64 old_efer = vcpu->arch.efer;
301         vcpu->arch.efer = efer;
302
303         if (!npt_enabled) {
304                 /* Shadow paging assumes NX to be available.  */
305                 efer |= EFER_NX;
306
307                 if (!(efer & EFER_LMA))
308                         efer &= ~EFER_LME;
309         }
310
311         if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) {
312                 if (!(efer & EFER_SVME)) {
313                         svm_leave_nested(vcpu);
314                         svm_set_gif(svm, true);
315                         /* #GP intercept is still needed for vmware backdoor */
316                         if (!enable_vmware_backdoor)
317                                 clr_exception_intercept(svm, GP_VECTOR);
318
319                         /*
320                          * Free the nested guest state, unless we are in SMM.
321                          * In this case we will return to the nested guest
322                          * as soon as we leave SMM.
323                          */
324                         if (!is_smm(vcpu))
325                                 svm_free_nested(svm);
326
327                 } else {
328                         int ret = svm_allocate_nested(svm);
329
330                         if (ret) {
331                                 vcpu->arch.efer = old_efer;
332                                 return ret;
333                         }
334
335                         /*
336                          * Never intercept #GP for SEV guests, KVM can't
337                          * decrypt guest memory to workaround the erratum.
338                          */
339                         if (svm_gp_erratum_intercept && !sev_guest(vcpu->kvm))
340                                 set_exception_intercept(svm, GP_VECTOR);
341                 }
342         }
343
344         svm->vmcb->save.efer = efer | EFER_SVME;
345         vmcb_mark_dirty(svm->vmcb, VMCB_CR);
346         return 0;
347 }
348
349 static int is_external_interrupt(u32 info)
350 {
351         info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
352         return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
353 }
354
355 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
356 {
357         struct vcpu_svm *svm = to_svm(vcpu);
358         u32 ret = 0;
359
360         if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
361                 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
362         return ret;
363 }
364
365 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
366 {
367         struct vcpu_svm *svm = to_svm(vcpu);
368
369         if (mask == 0)
370                 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
371         else
372                 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
373
374 }
375
376 static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
377                                            bool commit_side_effects)
378 {
379         struct vcpu_svm *svm = to_svm(vcpu);
380         unsigned long old_rflags;
381
382         /*
383          * SEV-ES does not expose the next RIP. The RIP update is controlled by
384          * the type of exit and the #VC handler in the guest.
385          */
386         if (sev_es_guest(vcpu->kvm))
387                 goto done;
388
389         if (nrips && svm->vmcb->control.next_rip != 0) {
390                 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
391                 svm->next_rip = svm->vmcb->control.next_rip;
392         }
393
394         if (!svm->next_rip) {
395                 if (unlikely(!commit_side_effects))
396                         old_rflags = svm->vmcb->save.rflags;
397
398                 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
399                         return 0;
400
401                 if (unlikely(!commit_side_effects))
402                         svm->vmcb->save.rflags = old_rflags;
403         } else {
404                 kvm_rip_write(vcpu, svm->next_rip);
405         }
406
407 done:
408         if (likely(commit_side_effects))
409                 svm_set_interrupt_shadow(vcpu, 0);
410
411         return 1;
412 }
413
414 static int svm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
415 {
416         return __svm_skip_emulated_instruction(vcpu, true);
417 }
418
419 static int svm_update_soft_interrupt_rip(struct kvm_vcpu *vcpu)
420 {
421         unsigned long rip, old_rip = kvm_rip_read(vcpu);
422         struct vcpu_svm *svm = to_svm(vcpu);
423
424         /*
425          * Due to architectural shortcomings, the CPU doesn't always provide
426          * NextRIP, e.g. if KVM intercepted an exception that occurred while
427          * the CPU was vectoring an INTO/INT3 in the guest.  Temporarily skip
428          * the instruction even if NextRIP is supported to acquire the next
429          * RIP so that it can be shoved into the NextRIP field, otherwise
430          * hardware will fail to advance guest RIP during event injection.
431          * Drop the exception/interrupt if emulation fails and effectively
432          * retry the instruction, it's the least awful option.  If NRIPS is
433          * in use, the skip must not commit any side effects such as clearing
434          * the interrupt shadow or RFLAGS.RF.
435          */
436         if (!__svm_skip_emulated_instruction(vcpu, !nrips))
437                 return -EIO;
438
439         rip = kvm_rip_read(vcpu);
440
441         /*
442          * Save the injection information, even when using next_rip, as the
443          * VMCB's next_rip will be lost (cleared on VM-Exit) if the injection
444          * doesn't complete due to a VM-Exit occurring while the CPU is
445          * vectoring the event.   Decoding the instruction isn't guaranteed to
446          * work as there may be no backing instruction, e.g. if the event is
447          * being injected by L1 for L2, or if the guest is patching INT3 into
448          * a different instruction.
449          */
450         svm->soft_int_injected = true;
451         svm->soft_int_csbase = svm->vmcb->save.cs.base;
452         svm->soft_int_old_rip = old_rip;
453         svm->soft_int_next_rip = rip;
454
455         if (nrips)
456                 kvm_rip_write(vcpu, old_rip);
457
458         if (static_cpu_has(X86_FEATURE_NRIPS))
459                 svm->vmcb->control.next_rip = rip;
460
461         return 0;
462 }
463
464 static void svm_inject_exception(struct kvm_vcpu *vcpu)
465 {
466         struct kvm_queued_exception *ex = &vcpu->arch.exception;
467         struct vcpu_svm *svm = to_svm(vcpu);
468
469         kvm_deliver_exception_payload(vcpu, ex);
470
471         if (kvm_exception_is_soft(ex->vector) &&
472             svm_update_soft_interrupt_rip(vcpu))
473                 return;
474
475         svm->vmcb->control.event_inj = ex->vector
476                 | SVM_EVTINJ_VALID
477                 | (ex->has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
478                 | SVM_EVTINJ_TYPE_EXEPT;
479         svm->vmcb->control.event_inj_err = ex->error_code;
480 }
481
482 static void svm_init_erratum_383(void)
483 {
484         u32 low, high;
485         int err;
486         u64 val;
487
488         if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
489                 return;
490
491         /* Use _safe variants to not break nested virtualization */
492         val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
493         if (err)
494                 return;
495
496         val |= (1ULL << 47);
497
498         low  = lower_32_bits(val);
499         high = upper_32_bits(val);
500
501         native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
502
503         erratum_383_found = true;
504 }
505
506 static void svm_init_osvw(struct kvm_vcpu *vcpu)
507 {
508         /*
509          * Guests should see errata 400 and 415 as fixed (assuming that
510          * HLT and IO instructions are intercepted).
511          */
512         vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
513         vcpu->arch.osvw.status = osvw_status & ~(6ULL);
514
515         /*
516          * By increasing VCPU's osvw.length to 3 we are telling the guest that
517          * all osvw.status bits inside that length, including bit 0 (which is
518          * reserved for erratum 298), are valid. However, if host processor's
519          * osvw_len is 0 then osvw_status[0] carries no information. We need to
520          * be conservative here and therefore we tell the guest that erratum 298
521          * is present (because we really don't know).
522          */
523         if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
524                 vcpu->arch.osvw.status |= 1;
525 }
526
527 static int has_svm(void)
528 {
529         const char *msg;
530
531         if (!cpu_has_svm(&msg)) {
532                 printk(KERN_INFO "has_svm: %s\n", msg);
533                 return 0;
534         }
535
536         if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
537                 pr_info("KVM is unsupported when running as an SEV guest\n");
538                 return 0;
539         }
540
541         return 1;
542 }
543
544 void __svm_write_tsc_multiplier(u64 multiplier)
545 {
546         preempt_disable();
547
548         if (multiplier == __this_cpu_read(current_tsc_ratio))
549                 goto out;
550
551         wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
552         __this_cpu_write(current_tsc_ratio, multiplier);
553 out:
554         preempt_enable();
555 }
556
557 static void svm_hardware_disable(void)
558 {
559         /* Make sure we clean up behind us */
560         if (tsc_scaling)
561                 __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
562
563         cpu_svm_disable();
564
565         amd_pmu_disable_virt();
566 }
567
568 static int svm_hardware_enable(void)
569 {
570
571         struct svm_cpu_data *sd;
572         uint64_t efer;
573         struct desc_struct *gdt;
574         int me = raw_smp_processor_id();
575
576         rdmsrl(MSR_EFER, efer);
577         if (efer & EFER_SVME)
578                 return -EBUSY;
579
580         if (!has_svm()) {
581                 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
582                 return -EINVAL;
583         }
584         sd = per_cpu(svm_data, me);
585         if (!sd) {
586                 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
587                 return -EINVAL;
588         }
589
590         sd->asid_generation = 1;
591         sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
592         sd->next_asid = sd->max_asid + 1;
593         sd->min_asid = max_sev_asid + 1;
594
595         gdt = get_current_gdt_rw();
596         sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
597
598         wrmsrl(MSR_EFER, efer | EFER_SVME);
599
600         wrmsrl(MSR_VM_HSAVE_PA, __sme_page_pa(sd->save_area));
601
602         if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
603                 /*
604                  * Set the default value, even if we don't use TSC scaling
605                  * to avoid having stale value in the msr
606                  */
607                 __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
608         }
609
610
611         /*
612          * Get OSVW bits.
613          *
614          * Note that it is possible to have a system with mixed processor
615          * revisions and therefore different OSVW bits. If bits are not the same
616          * on different processors then choose the worst case (i.e. if erratum
617          * is present on one processor and not on another then assume that the
618          * erratum is present everywhere).
619          */
620         if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
621                 uint64_t len, status = 0;
622                 int err;
623
624                 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
625                 if (!err)
626                         status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
627                                                       &err);
628
629                 if (err)
630                         osvw_status = osvw_len = 0;
631                 else {
632                         if (len < osvw_len)
633                                 osvw_len = len;
634                         osvw_status |= status;
635                         osvw_status &= (1ULL << osvw_len) - 1;
636                 }
637         } else
638                 osvw_status = osvw_len = 0;
639
640         svm_init_erratum_383();
641
642         amd_pmu_enable_virt();
643
644         return 0;
645 }
646
647 static void svm_cpu_uninit(int cpu)
648 {
649         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
650
651         if (!sd)
652                 return;
653
654         per_cpu(svm_data, cpu) = NULL;
655         kfree(sd->sev_vmcbs);
656         __free_page(sd->save_area);
657         kfree(sd);
658 }
659
660 static int svm_cpu_init(int cpu)
661 {
662         struct svm_cpu_data *sd;
663         int ret = -ENOMEM;
664
665         sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
666         if (!sd)
667                 return ret;
668         sd->cpu = cpu;
669         sd->save_area = alloc_page(GFP_KERNEL | __GFP_ZERO);
670         if (!sd->save_area)
671                 goto free_cpu_data;
672
673         ret = sev_cpu_init(sd);
674         if (ret)
675                 goto free_save_area;
676
677         per_cpu(svm_data, cpu) = sd;
678
679         return 0;
680
681 free_save_area:
682         __free_page(sd->save_area);
683 free_cpu_data:
684         kfree(sd);
685         return ret;
686
687 }
688
689 static int direct_access_msr_slot(u32 msr)
690 {
691         u32 i;
692
693         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
694                 if (direct_access_msrs[i].index == msr)
695                         return i;
696
697         return -ENOENT;
698 }
699
700 static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read,
701                                      int write)
702 {
703         struct vcpu_svm *svm = to_svm(vcpu);
704         int slot = direct_access_msr_slot(msr);
705
706         if (slot == -ENOENT)
707                 return;
708
709         /* Set the shadow bitmaps to the desired intercept states */
710         if (read)
711                 set_bit(slot, svm->shadow_msr_intercept.read);
712         else
713                 clear_bit(slot, svm->shadow_msr_intercept.read);
714
715         if (write)
716                 set_bit(slot, svm->shadow_msr_intercept.write);
717         else
718                 clear_bit(slot, svm->shadow_msr_intercept.write);
719 }
720
721 static bool valid_msr_intercept(u32 index)
722 {
723         return direct_access_msr_slot(index) != -ENOENT;
724 }
725
726 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
727 {
728         u8 bit_write;
729         unsigned long tmp;
730         u32 offset;
731         u32 *msrpm;
732
733         msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
734                                       to_svm(vcpu)->msrpm;
735
736         offset    = svm_msrpm_offset(msr);
737         bit_write = 2 * (msr & 0x0f) + 1;
738         tmp       = msrpm[offset];
739
740         BUG_ON(offset == MSR_INVALID);
741
742         return !!test_bit(bit_write,  &tmp);
743 }
744
745 static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
746                                         u32 msr, int read, int write)
747 {
748         struct vcpu_svm *svm = to_svm(vcpu);
749         u8 bit_read, bit_write;
750         unsigned long tmp;
751         u32 offset;
752
753         /*
754          * If this warning triggers extend the direct_access_msrs list at the
755          * beginning of the file
756          */
757         WARN_ON(!valid_msr_intercept(msr));
758
759         /* Enforce non allowed MSRs to trap */
760         if (read && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
761                 read = 0;
762
763         if (write && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
764                 write = 0;
765
766         offset    = svm_msrpm_offset(msr);
767         bit_read  = 2 * (msr & 0x0f);
768         bit_write = 2 * (msr & 0x0f) + 1;
769         tmp       = msrpm[offset];
770
771         BUG_ON(offset == MSR_INVALID);
772
773         read  ? clear_bit(bit_read,  &tmp) : set_bit(bit_read,  &tmp);
774         write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
775
776         msrpm[offset] = tmp;
777
778         svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
779         svm->nested.force_msr_bitmap_recalc = true;
780 }
781
782 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
783                           int read, int write)
784 {
785         set_shadow_msr_intercept(vcpu, msr, read, write);
786         set_msr_interception_bitmap(vcpu, msrpm, msr, read, write);
787 }
788
789 u32 *svm_vcpu_alloc_msrpm(void)
790 {
791         unsigned int order = get_order(MSRPM_SIZE);
792         struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, order);
793         u32 *msrpm;
794
795         if (!pages)
796                 return NULL;
797
798         msrpm = page_address(pages);
799         memset(msrpm, 0xff, PAGE_SIZE * (1 << order));
800
801         return msrpm;
802 }
803
804 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm)
805 {
806         int i;
807
808         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
809                 if (!direct_access_msrs[i].always)
810                         continue;
811                 set_msr_interception(vcpu, msrpm, direct_access_msrs[i].index, 1, 1);
812         }
813 }
814
815 void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
816 {
817         int i;
818
819         if (intercept == svm->x2avic_msrs_intercepted)
820                 return;
821
822         if (avic_mode != AVIC_MODE_X2 ||
823             !apic_x2apic_mode(svm->vcpu.arch.apic))
824                 return;
825
826         for (i = 0; i < MAX_DIRECT_ACCESS_MSRS; i++) {
827                 int index = direct_access_msrs[i].index;
828
829                 if ((index < APIC_BASE_MSR) ||
830                     (index > APIC_BASE_MSR + 0xff))
831                         continue;
832                 set_msr_interception(&svm->vcpu, svm->msrpm, index,
833                                      !intercept, !intercept);
834         }
835
836         svm->x2avic_msrs_intercepted = intercept;
837 }
838
839 void svm_vcpu_free_msrpm(u32 *msrpm)
840 {
841         __free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE));
842 }
843
844 static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
845 {
846         struct vcpu_svm *svm = to_svm(vcpu);
847         u32 i;
848
849         /*
850          * Set intercept permissions for all direct access MSRs again. They
851          * will automatically get filtered through the MSR filter, so we are
852          * back in sync after this.
853          */
854         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
855                 u32 msr = direct_access_msrs[i].index;
856                 u32 read = test_bit(i, svm->shadow_msr_intercept.read);
857                 u32 write = test_bit(i, svm->shadow_msr_intercept.write);
858
859                 set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write);
860         }
861 }
862
863 static void add_msr_offset(u32 offset)
864 {
865         int i;
866
867         for (i = 0; i < MSRPM_OFFSETS; ++i) {
868
869                 /* Offset already in list? */
870                 if (msrpm_offsets[i] == offset)
871                         return;
872
873                 /* Slot used by another offset? */
874                 if (msrpm_offsets[i] != MSR_INVALID)
875                         continue;
876
877                 /* Add offset to list */
878                 msrpm_offsets[i] = offset;
879
880                 return;
881         }
882
883         /*
884          * If this BUG triggers the msrpm_offsets table has an overflow. Just
885          * increase MSRPM_OFFSETS in this case.
886          */
887         BUG();
888 }
889
890 static void init_msrpm_offsets(void)
891 {
892         int i;
893
894         memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
895
896         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
897                 u32 offset;
898
899                 offset = svm_msrpm_offset(direct_access_msrs[i].index);
900                 BUG_ON(offset == MSR_INVALID);
901
902                 add_msr_offset(offset);
903         }
904 }
905
906 void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
907 {
908         to_vmcb->save.dbgctl            = from_vmcb->save.dbgctl;
909         to_vmcb->save.br_from           = from_vmcb->save.br_from;
910         to_vmcb->save.br_to             = from_vmcb->save.br_to;
911         to_vmcb->save.last_excp_from    = from_vmcb->save.last_excp_from;
912         to_vmcb->save.last_excp_to      = from_vmcb->save.last_excp_to;
913
914         vmcb_mark_dirty(to_vmcb, VMCB_LBR);
915 }
916
917 static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
918 {
919         struct vcpu_svm *svm = to_svm(vcpu);
920
921         svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
922         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
923         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
924         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
925         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
926
927         /* Move the LBR msrs to the vmcb02 so that the guest can see them. */
928         if (is_guest_mode(vcpu))
929                 svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
930 }
931
932 static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
933 {
934         struct vcpu_svm *svm = to_svm(vcpu);
935
936         svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
937         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
938         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
939         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
940         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
941
942         /*
943          * Move the LBR msrs back to the vmcb01 to avoid copying them
944          * on nested guest entries.
945          */
946         if (is_guest_mode(vcpu))
947                 svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb);
948 }
949
950 static int svm_get_lbr_msr(struct vcpu_svm *svm, u32 index)
951 {
952         /*
953          * If the LBR virtualization is disabled, the LBR msrs are always
954          * kept in the vmcb01 to avoid copying them on nested guest entries.
955          *
956          * If nested, and the LBR virtualization is enabled/disabled, the msrs
957          * are moved between the vmcb01 and vmcb02 as needed.
958          */
959         struct vmcb *vmcb =
960                 (svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) ?
961                         svm->vmcb : svm->vmcb01.ptr;
962
963         switch (index) {
964         case MSR_IA32_DEBUGCTLMSR:
965                 return vmcb->save.dbgctl;
966         case MSR_IA32_LASTBRANCHFROMIP:
967                 return vmcb->save.br_from;
968         case MSR_IA32_LASTBRANCHTOIP:
969                 return vmcb->save.br_to;
970         case MSR_IA32_LASTINTFROMIP:
971                 return vmcb->save.last_excp_from;
972         case MSR_IA32_LASTINTTOIP:
973                 return vmcb->save.last_excp_to;
974         default:
975                 KVM_BUG(false, svm->vcpu.kvm,
976                         "%s: Unknown MSR 0x%x", __func__, index);
977                 return 0;
978         }
979 }
980
981 void svm_update_lbrv(struct kvm_vcpu *vcpu)
982 {
983         struct vcpu_svm *svm = to_svm(vcpu);
984
985         bool enable_lbrv = svm_get_lbr_msr(svm, MSR_IA32_DEBUGCTLMSR) &
986                                            DEBUGCTLMSR_LBR;
987
988         bool current_enable_lbrv = !!(svm->vmcb->control.virt_ext &
989                                       LBR_CTL_ENABLE_MASK);
990
991         if (unlikely(is_guest_mode(vcpu) && svm->lbrv_enabled))
992                 if (unlikely(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))
993                         enable_lbrv = true;
994
995         if (enable_lbrv == current_enable_lbrv)
996                 return;
997
998         if (enable_lbrv)
999                 svm_enable_lbrv(vcpu);
1000         else
1001                 svm_disable_lbrv(vcpu);
1002 }
1003
1004 void disable_nmi_singlestep(struct vcpu_svm *svm)
1005 {
1006         svm->nmi_singlestep = false;
1007
1008         if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
1009                 /* Clear our flags if they were not set by the guest */
1010                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1011                         svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
1012                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1013                         svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
1014         }
1015 }
1016
1017 static void grow_ple_window(struct kvm_vcpu *vcpu)
1018 {
1019         struct vcpu_svm *svm = to_svm(vcpu);
1020         struct vmcb_control_area *control = &svm->vmcb->control;
1021         int old = control->pause_filter_count;
1022
1023         if (kvm_pause_in_guest(vcpu->kvm))
1024                 return;
1025
1026         control->pause_filter_count = __grow_ple_window(old,
1027                                                         pause_filter_count,
1028                                                         pause_filter_count_grow,
1029                                                         pause_filter_count_max);
1030
1031         if (control->pause_filter_count != old) {
1032                 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1033                 trace_kvm_ple_window_update(vcpu->vcpu_id,
1034                                             control->pause_filter_count, old);
1035         }
1036 }
1037
1038 static void shrink_ple_window(struct kvm_vcpu *vcpu)
1039 {
1040         struct vcpu_svm *svm = to_svm(vcpu);
1041         struct vmcb_control_area *control = &svm->vmcb->control;
1042         int old = control->pause_filter_count;
1043
1044         if (kvm_pause_in_guest(vcpu->kvm))
1045                 return;
1046
1047         control->pause_filter_count =
1048                                 __shrink_ple_window(old,
1049                                                     pause_filter_count,
1050                                                     pause_filter_count_shrink,
1051                                                     pause_filter_count);
1052         if (control->pause_filter_count != old) {
1053                 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1054                 trace_kvm_ple_window_update(vcpu->vcpu_id,
1055                                             control->pause_filter_count, old);
1056         }
1057 }
1058
1059 static void svm_hardware_unsetup(void)
1060 {
1061         int cpu;
1062
1063         sev_hardware_unsetup();
1064
1065         for_each_possible_cpu(cpu)
1066                 svm_cpu_uninit(cpu);
1067
1068         __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT),
1069         get_order(IOPM_SIZE));
1070         iopm_base = 0;
1071 }
1072
1073 static void init_seg(struct vmcb_seg *seg)
1074 {
1075         seg->selector = 0;
1076         seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
1077                       SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
1078         seg->limit = 0xffff;
1079         seg->base = 0;
1080 }
1081
1082 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
1083 {
1084         seg->selector = 0;
1085         seg->attrib = SVM_SELECTOR_P_MASK | type;
1086         seg->limit = 0xffff;
1087         seg->base = 0;
1088 }
1089
1090 static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
1091 {
1092         struct vcpu_svm *svm = to_svm(vcpu);
1093
1094         return svm->nested.ctl.tsc_offset;
1095 }
1096
1097 static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
1098 {
1099         struct vcpu_svm *svm = to_svm(vcpu);
1100
1101         return svm->tsc_ratio_msr;
1102 }
1103
1104 static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1105 {
1106         struct vcpu_svm *svm = to_svm(vcpu);
1107
1108         svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset;
1109         svm->vmcb->control.tsc_offset = offset;
1110         vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1111 }
1112
1113 static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
1114 {
1115         __svm_write_tsc_multiplier(multiplier);
1116 }
1117
1118
1119 /* Evaluate instruction intercepts that depend on guest CPUID features. */
1120 static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
1121                                               struct vcpu_svm *svm)
1122 {
1123         /*
1124          * Intercept INVPCID if shadow paging is enabled to sync/free shadow
1125          * roots, or if INVPCID is disabled in the guest to inject #UD.
1126          */
1127         if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) {
1128                 if (!npt_enabled ||
1129                     !guest_cpuid_has(&svm->vcpu, X86_FEATURE_INVPCID))
1130                         svm_set_intercept(svm, INTERCEPT_INVPCID);
1131                 else
1132                         svm_clr_intercept(svm, INTERCEPT_INVPCID);
1133         }
1134
1135         if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) {
1136                 if (guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
1137                         svm_clr_intercept(svm, INTERCEPT_RDTSCP);
1138                 else
1139                         svm_set_intercept(svm, INTERCEPT_RDTSCP);
1140         }
1141 }
1142
1143 static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
1144 {
1145         struct vcpu_svm *svm = to_svm(vcpu);
1146
1147         if (guest_cpuid_is_intel(vcpu)) {
1148                 /*
1149                  * We must intercept SYSENTER_EIP and SYSENTER_ESP
1150                  * accesses because the processor only stores 32 bits.
1151                  * For the same reason we cannot use virtual VMLOAD/VMSAVE.
1152                  */
1153                 svm_set_intercept(svm, INTERCEPT_VMLOAD);
1154                 svm_set_intercept(svm, INTERCEPT_VMSAVE);
1155                 svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1156
1157                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0);
1158                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0);
1159
1160                 svm->v_vmload_vmsave_enabled = false;
1161         } else {
1162                 /*
1163                  * If hardware supports Virtual VMLOAD VMSAVE then enable it
1164                  * in VMCB and clear intercepts to avoid #VMEXIT.
1165                  */
1166                 if (vls) {
1167                         svm_clr_intercept(svm, INTERCEPT_VMLOAD);
1168                         svm_clr_intercept(svm, INTERCEPT_VMSAVE);
1169                         svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1170                 }
1171                 /* No need to intercept these MSRs */
1172                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
1173                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
1174         }
1175 }
1176
1177 static void init_vmcb(struct kvm_vcpu *vcpu)
1178 {
1179         struct vcpu_svm *svm = to_svm(vcpu);
1180         struct vmcb *vmcb = svm->vmcb01.ptr;
1181         struct vmcb_control_area *control = &vmcb->control;
1182         struct vmcb_save_area *save = &vmcb->save;
1183
1184         svm_set_intercept(svm, INTERCEPT_CR0_READ);
1185         svm_set_intercept(svm, INTERCEPT_CR3_READ);
1186         svm_set_intercept(svm, INTERCEPT_CR4_READ);
1187         svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
1188         svm_set_intercept(svm, INTERCEPT_CR3_WRITE);
1189         svm_set_intercept(svm, INTERCEPT_CR4_WRITE);
1190         if (!kvm_vcpu_apicv_active(vcpu))
1191                 svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
1192
1193         set_dr_intercepts(svm);
1194
1195         set_exception_intercept(svm, PF_VECTOR);
1196         set_exception_intercept(svm, UD_VECTOR);
1197         set_exception_intercept(svm, MC_VECTOR);
1198         set_exception_intercept(svm, AC_VECTOR);
1199         set_exception_intercept(svm, DB_VECTOR);
1200         /*
1201          * Guest access to VMware backdoor ports could legitimately
1202          * trigger #GP because of TSS I/O permission bitmap.
1203          * We intercept those #GP and allow access to them anyway
1204          * as VMware does.  Don't intercept #GP for SEV guests as KVM can't
1205          * decrypt guest memory to decode the faulting instruction.
1206          */
1207         if (enable_vmware_backdoor && !sev_guest(vcpu->kvm))
1208                 set_exception_intercept(svm, GP_VECTOR);
1209
1210         svm_set_intercept(svm, INTERCEPT_INTR);
1211         svm_set_intercept(svm, INTERCEPT_NMI);
1212
1213         if (intercept_smi)
1214                 svm_set_intercept(svm, INTERCEPT_SMI);
1215
1216         svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1217         svm_set_intercept(svm, INTERCEPT_RDPMC);
1218         svm_set_intercept(svm, INTERCEPT_CPUID);
1219         svm_set_intercept(svm, INTERCEPT_INVD);
1220         svm_set_intercept(svm, INTERCEPT_INVLPG);
1221         svm_set_intercept(svm, INTERCEPT_INVLPGA);
1222         svm_set_intercept(svm, INTERCEPT_IOIO_PROT);
1223         svm_set_intercept(svm, INTERCEPT_MSR_PROT);
1224         svm_set_intercept(svm, INTERCEPT_TASK_SWITCH);
1225         svm_set_intercept(svm, INTERCEPT_SHUTDOWN);
1226         svm_set_intercept(svm, INTERCEPT_VMRUN);
1227         svm_set_intercept(svm, INTERCEPT_VMMCALL);
1228         svm_set_intercept(svm, INTERCEPT_VMLOAD);
1229         svm_set_intercept(svm, INTERCEPT_VMSAVE);
1230         svm_set_intercept(svm, INTERCEPT_STGI);
1231         svm_set_intercept(svm, INTERCEPT_CLGI);
1232         svm_set_intercept(svm, INTERCEPT_SKINIT);
1233         svm_set_intercept(svm, INTERCEPT_WBINVD);
1234         svm_set_intercept(svm, INTERCEPT_XSETBV);
1235         svm_set_intercept(svm, INTERCEPT_RDPRU);
1236         svm_set_intercept(svm, INTERCEPT_RSM);
1237
1238         if (!kvm_mwait_in_guest(vcpu->kvm)) {
1239                 svm_set_intercept(svm, INTERCEPT_MONITOR);
1240                 svm_set_intercept(svm, INTERCEPT_MWAIT);
1241         }
1242
1243         if (!kvm_hlt_in_guest(vcpu->kvm))
1244                 svm_set_intercept(svm, INTERCEPT_HLT);
1245
1246         control->iopm_base_pa = __sme_set(iopm_base);
1247         control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
1248         control->int_ctl = V_INTR_MASKING_MASK;
1249
1250         init_seg(&save->es);
1251         init_seg(&save->ss);
1252         init_seg(&save->ds);
1253         init_seg(&save->fs);
1254         init_seg(&save->gs);
1255
1256         save->cs.selector = 0xf000;
1257         save->cs.base = 0xffff0000;
1258         /* Executable/Readable Code Segment */
1259         save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1260                 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1261         save->cs.limit = 0xffff;
1262
1263         save->gdtr.base = 0;
1264         save->gdtr.limit = 0xffff;
1265         save->idtr.base = 0;
1266         save->idtr.limit = 0xffff;
1267
1268         init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1269         init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1270
1271         if (npt_enabled) {
1272                 /* Setup VMCB for Nested Paging */
1273                 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
1274                 svm_clr_intercept(svm, INTERCEPT_INVLPG);
1275                 clr_exception_intercept(svm, PF_VECTOR);
1276                 svm_clr_intercept(svm, INTERCEPT_CR3_READ);
1277                 svm_clr_intercept(svm, INTERCEPT_CR3_WRITE);
1278                 save->g_pat = vcpu->arch.pat;
1279                 save->cr3 = 0;
1280         }
1281         svm->current_vmcb->asid_generation = 0;
1282         svm->asid = 0;
1283
1284         svm->nested.vmcb12_gpa = INVALID_GPA;
1285         svm->nested.last_vmcb12_gpa = INVALID_GPA;
1286
1287         if (!kvm_pause_in_guest(vcpu->kvm)) {
1288                 control->pause_filter_count = pause_filter_count;
1289                 if (pause_filter_thresh)
1290                         control->pause_filter_thresh = pause_filter_thresh;
1291                 svm_set_intercept(svm, INTERCEPT_PAUSE);
1292         } else {
1293                 svm_clr_intercept(svm, INTERCEPT_PAUSE);
1294         }
1295
1296         svm_recalc_instruction_intercepts(vcpu, svm);
1297
1298         /*
1299          * If the host supports V_SPEC_CTRL then disable the interception
1300          * of MSR_IA32_SPEC_CTRL.
1301          */
1302         if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
1303                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
1304
1305         if (kvm_vcpu_apicv_active(vcpu))
1306                 avic_init_vmcb(svm, vmcb);
1307
1308         if (vgif) {
1309                 svm_clr_intercept(svm, INTERCEPT_STGI);
1310                 svm_clr_intercept(svm, INTERCEPT_CLGI);
1311                 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1312         }
1313
1314         if (sev_guest(vcpu->kvm))
1315                 sev_init_vmcb(svm);
1316
1317         svm_hv_init_vmcb(vmcb);
1318         init_vmcb_after_set_cpuid(vcpu);
1319
1320         vmcb_mark_all_dirty(vmcb);
1321
1322         enable_gif(svm);
1323 }
1324
1325 static void __svm_vcpu_reset(struct kvm_vcpu *vcpu)
1326 {
1327         struct vcpu_svm *svm = to_svm(vcpu);
1328
1329         svm_vcpu_init_msrpm(vcpu, svm->msrpm);
1330
1331         svm_init_osvw(vcpu);
1332         vcpu->arch.microcode_version = 0x01000065;
1333         svm->tsc_ratio_msr = kvm_caps.default_tsc_scaling_ratio;
1334
1335         if (sev_es_guest(vcpu->kvm))
1336                 sev_es_vcpu_reset(svm);
1337 }
1338
1339 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1340 {
1341         struct vcpu_svm *svm = to_svm(vcpu);
1342
1343         svm->spec_ctrl = 0;
1344         svm->virt_spec_ctrl = 0;
1345
1346         init_vmcb(vcpu);
1347
1348         if (!init_event)
1349                 __svm_vcpu_reset(vcpu);
1350 }
1351
1352 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb)
1353 {
1354         svm->current_vmcb = target_vmcb;
1355         svm->vmcb = target_vmcb->ptr;
1356 }
1357
1358 static int svm_vcpu_create(struct kvm_vcpu *vcpu)
1359 {
1360         struct vcpu_svm *svm;
1361         struct page *vmcb01_page;
1362         struct page *vmsa_page = NULL;
1363         int err;
1364
1365         BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
1366         svm = to_svm(vcpu);
1367
1368         err = -ENOMEM;
1369         vmcb01_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1370         if (!vmcb01_page)
1371                 goto out;
1372
1373         if (sev_es_guest(vcpu->kvm)) {
1374                 /*
1375                  * SEV-ES guests require a separate VMSA page used to contain
1376                  * the encrypted register state of the guest.
1377                  */
1378                 vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1379                 if (!vmsa_page)
1380                         goto error_free_vmcb_page;
1381
1382                 /*
1383                  * SEV-ES guests maintain an encrypted version of their FPU
1384                  * state which is restored and saved on VMRUN and VMEXIT.
1385                  * Mark vcpu->arch.guest_fpu->fpstate as scratch so it won't
1386                  * do xsave/xrstor on it.
1387                  */
1388                 fpstate_set_confidential(&vcpu->arch.guest_fpu);
1389         }
1390
1391         err = avic_init_vcpu(svm);
1392         if (err)
1393                 goto error_free_vmsa_page;
1394
1395         svm->msrpm = svm_vcpu_alloc_msrpm();
1396         if (!svm->msrpm) {
1397                 err = -ENOMEM;
1398                 goto error_free_vmsa_page;
1399         }
1400
1401         svm->x2avic_msrs_intercepted = true;
1402
1403         svm->vmcb01.ptr = page_address(vmcb01_page);
1404         svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
1405         svm_switch_vmcb(svm, &svm->vmcb01);
1406
1407         if (vmsa_page)
1408                 svm->sev_es.vmsa = page_address(vmsa_page);
1409
1410         svm->guest_state_loaded = false;
1411
1412         return 0;
1413
1414 error_free_vmsa_page:
1415         if (vmsa_page)
1416                 __free_page(vmsa_page);
1417 error_free_vmcb_page:
1418         __free_page(vmcb01_page);
1419 out:
1420         return err;
1421 }
1422
1423 static void svm_clear_current_vmcb(struct vmcb *vmcb)
1424 {
1425         int i;
1426
1427         for_each_online_cpu(i)
1428                 cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
1429 }
1430
1431 static void svm_vcpu_free(struct kvm_vcpu *vcpu)
1432 {
1433         struct vcpu_svm *svm = to_svm(vcpu);
1434
1435         /*
1436          * The vmcb page can be recycled, causing a false negative in
1437          * svm_vcpu_load(). So, ensure that no logical CPU has this
1438          * vmcb page recorded as its current vmcb.
1439          */
1440         svm_clear_current_vmcb(svm->vmcb);
1441
1442         svm_free_nested(svm);
1443
1444         sev_free_vcpu(vcpu);
1445
1446         __free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT));
1447         __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
1448 }
1449
1450 static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
1451 {
1452         struct vcpu_svm *svm = to_svm(vcpu);
1453         struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
1454
1455         if (sev_es_guest(vcpu->kvm))
1456                 sev_es_unmap_ghcb(svm);
1457
1458         if (svm->guest_state_loaded)
1459                 return;
1460
1461         /*
1462          * Save additional host state that will be restored on VMEXIT (sev-es)
1463          * or subsequent vmload of host save area.
1464          */
1465         vmsave(__sme_page_pa(sd->save_area));
1466         if (sev_es_guest(vcpu->kvm)) {
1467                 struct sev_es_save_area *hostsa;
1468                 hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
1469
1470                 sev_es_prepare_switch_to_guest(hostsa);
1471         }
1472
1473         if (tsc_scaling)
1474                 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
1475
1476         if (likely(tsc_aux_uret_slot >= 0))
1477                 kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
1478
1479         svm->guest_state_loaded = true;
1480 }
1481
1482 static void svm_prepare_host_switch(struct kvm_vcpu *vcpu)
1483 {
1484         to_svm(vcpu)->guest_state_loaded = false;
1485 }
1486
1487 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1488 {
1489         struct vcpu_svm *svm = to_svm(vcpu);
1490         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
1491
1492         if (sd->current_vmcb != svm->vmcb) {
1493                 sd->current_vmcb = svm->vmcb;
1494                 indirect_branch_prediction_barrier();
1495         }
1496         if (kvm_vcpu_apicv_active(vcpu))
1497                 avic_vcpu_load(vcpu, cpu);
1498 }
1499
1500 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1501 {
1502         if (kvm_vcpu_apicv_active(vcpu))
1503                 avic_vcpu_put(vcpu);
1504
1505         svm_prepare_host_switch(vcpu);
1506
1507         ++vcpu->stat.host_state_reload;
1508 }
1509
1510 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1511 {
1512         struct vcpu_svm *svm = to_svm(vcpu);
1513         unsigned long rflags = svm->vmcb->save.rflags;
1514
1515         if (svm->nmi_singlestep) {
1516                 /* Hide our flags if they were not set by the guest */
1517                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1518                         rflags &= ~X86_EFLAGS_TF;
1519                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1520                         rflags &= ~X86_EFLAGS_RF;
1521         }
1522         return rflags;
1523 }
1524
1525 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1526 {
1527         if (to_svm(vcpu)->nmi_singlestep)
1528                 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
1529
1530        /*
1531         * Any change of EFLAGS.VM is accompanied by a reload of SS
1532         * (caused by either a task switch or an inter-privilege IRET),
1533         * so we do not need to update the CPL here.
1534         */
1535         to_svm(vcpu)->vmcb->save.rflags = rflags;
1536 }
1537
1538 static bool svm_get_if_flag(struct kvm_vcpu *vcpu)
1539 {
1540         struct vmcb *vmcb = to_svm(vcpu)->vmcb;
1541
1542         return sev_es_guest(vcpu->kvm)
1543                 ? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK
1544                 : kvm_get_rflags(vcpu) & X86_EFLAGS_IF;
1545 }
1546
1547 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1548 {
1549         kvm_register_mark_available(vcpu, reg);
1550
1551         switch (reg) {
1552         case VCPU_EXREG_PDPTR:
1553                 /*
1554                  * When !npt_enabled, mmu->pdptrs[] is already available since
1555                  * it is always updated per SDM when moving to CRs.
1556                  */
1557                 if (npt_enabled)
1558                         load_pdptrs(vcpu, kvm_read_cr3(vcpu));
1559                 break;
1560         default:
1561                 KVM_BUG_ON(1, vcpu->kvm);
1562         }
1563 }
1564
1565 static void svm_set_vintr(struct vcpu_svm *svm)
1566 {
1567         struct vmcb_control_area *control;
1568
1569         /*
1570          * The following fields are ignored when AVIC is enabled
1571          */
1572         WARN_ON(kvm_vcpu_apicv_activated(&svm->vcpu));
1573
1574         svm_set_intercept(svm, INTERCEPT_VINTR);
1575
1576         /*
1577          * This is just a dummy VINTR to actually cause a vmexit to happen.
1578          * Actual injection of virtual interrupts happens through EVENTINJ.
1579          */
1580         control = &svm->vmcb->control;
1581         control->int_vector = 0x0;
1582         control->int_ctl &= ~V_INTR_PRIO_MASK;
1583         control->int_ctl |= V_IRQ_MASK |
1584                 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1585         vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
1586 }
1587
1588 static void svm_clear_vintr(struct vcpu_svm *svm)
1589 {
1590         svm_clr_intercept(svm, INTERCEPT_VINTR);
1591
1592         /* Drop int_ctl fields related to VINTR injection.  */
1593         svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
1594         if (is_guest_mode(&svm->vcpu)) {
1595                 svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
1596
1597                 WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
1598                         (svm->nested.ctl.int_ctl & V_TPR_MASK));
1599
1600                 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
1601                         V_IRQ_INJECTION_BITS_MASK;
1602
1603                 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
1604         }
1605
1606         vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
1607 }
1608
1609 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1610 {
1611         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1612         struct vmcb_save_area *save01 = &to_svm(vcpu)->vmcb01.ptr->save;
1613
1614         switch (seg) {
1615         case VCPU_SREG_CS: return &save->cs;
1616         case VCPU_SREG_DS: return &save->ds;
1617         case VCPU_SREG_ES: return &save->es;
1618         case VCPU_SREG_FS: return &save01->fs;
1619         case VCPU_SREG_GS: return &save01->gs;
1620         case VCPU_SREG_SS: return &save->ss;
1621         case VCPU_SREG_TR: return &save01->tr;
1622         case VCPU_SREG_LDTR: return &save01->ldtr;
1623         }
1624         BUG();
1625         return NULL;
1626 }
1627
1628 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1629 {
1630         struct vmcb_seg *s = svm_seg(vcpu, seg);
1631
1632         return s->base;
1633 }
1634
1635 static void svm_get_segment(struct kvm_vcpu *vcpu,
1636                             struct kvm_segment *var, int seg)
1637 {
1638         struct vmcb_seg *s = svm_seg(vcpu, seg);
1639
1640         var->base = s->base;
1641         var->limit = s->limit;
1642         var->selector = s->selector;
1643         var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1644         var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1645         var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1646         var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1647         var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1648         var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1649         var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1650
1651         /*
1652          * AMD CPUs circa 2014 track the G bit for all segments except CS.
1653          * However, the SVM spec states that the G bit is not observed by the
1654          * CPU, and some VMware virtual CPUs drop the G bit for all segments.
1655          * So let's synthesize a legal G bit for all segments, this helps
1656          * running KVM nested. It also helps cross-vendor migration, because
1657          * Intel's vmentry has a check on the 'G' bit.
1658          */
1659         var->g = s->limit > 0xfffff;
1660
1661         /*
1662          * AMD's VMCB does not have an explicit unusable field, so emulate it
1663          * for cross vendor migration purposes by "not present"
1664          */
1665         var->unusable = !var->present;
1666
1667         switch (seg) {
1668         case VCPU_SREG_TR:
1669                 /*
1670                  * Work around a bug where the busy flag in the tr selector
1671                  * isn't exposed
1672                  */
1673                 var->type |= 0x2;
1674                 break;
1675         case VCPU_SREG_DS:
1676         case VCPU_SREG_ES:
1677         case VCPU_SREG_FS:
1678         case VCPU_SREG_GS:
1679                 /*
1680                  * The accessed bit must always be set in the segment
1681                  * descriptor cache, although it can be cleared in the
1682                  * descriptor, the cached bit always remains at 1. Since
1683                  * Intel has a check on this, set it here to support
1684                  * cross-vendor migration.
1685                  */
1686                 if (!var->unusable)
1687                         var->type |= 0x1;
1688                 break;
1689         case VCPU_SREG_SS:
1690                 /*
1691                  * On AMD CPUs sometimes the DB bit in the segment
1692                  * descriptor is left as 1, although the whole segment has
1693                  * been made unusable. Clear it here to pass an Intel VMX
1694                  * entry check when cross vendor migrating.
1695                  */
1696                 if (var->unusable)
1697                         var->db = 0;
1698                 /* This is symmetric with svm_set_segment() */
1699                 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1700                 break;
1701         }
1702 }
1703
1704 static int svm_get_cpl(struct kvm_vcpu *vcpu)
1705 {
1706         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1707
1708         return save->cpl;
1709 }
1710
1711 static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1712 {
1713         struct kvm_segment cs;
1714
1715         svm_get_segment(vcpu, &cs, VCPU_SREG_CS);
1716         *db = cs.db;
1717         *l = cs.l;
1718 }
1719
1720 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1721 {
1722         struct vcpu_svm *svm = to_svm(vcpu);
1723
1724         dt->size = svm->vmcb->save.idtr.limit;
1725         dt->address = svm->vmcb->save.idtr.base;
1726 }
1727
1728 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1729 {
1730         struct vcpu_svm *svm = to_svm(vcpu);
1731
1732         svm->vmcb->save.idtr.limit = dt->size;
1733         svm->vmcb->save.idtr.base = dt->address ;
1734         vmcb_mark_dirty(svm->vmcb, VMCB_DT);
1735 }
1736
1737 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1738 {
1739         struct vcpu_svm *svm = to_svm(vcpu);
1740
1741         dt->size = svm->vmcb->save.gdtr.limit;
1742         dt->address = svm->vmcb->save.gdtr.base;
1743 }
1744
1745 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1746 {
1747         struct vcpu_svm *svm = to_svm(vcpu);
1748
1749         svm->vmcb->save.gdtr.limit = dt->size;
1750         svm->vmcb->save.gdtr.base = dt->address ;
1751         vmcb_mark_dirty(svm->vmcb, VMCB_DT);
1752 }
1753
1754 static void sev_post_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1755 {
1756         struct vcpu_svm *svm = to_svm(vcpu);
1757
1758         /*
1759          * For guests that don't set guest_state_protected, the cr3 update is
1760          * handled via kvm_mmu_load() while entering the guest. For guests
1761          * that do (SEV-ES/SEV-SNP), the cr3 update needs to be written to
1762          * VMCB save area now, since the save area will become the initial
1763          * contents of the VMSA, and future VMCB save area updates won't be
1764          * seen.
1765          */
1766         if (sev_es_guest(vcpu->kvm)) {
1767                 svm->vmcb->save.cr3 = cr3;
1768                 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1769         }
1770 }
1771
1772 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1773 {
1774         struct vcpu_svm *svm = to_svm(vcpu);
1775         u64 hcr0 = cr0;
1776         bool old_paging = is_paging(vcpu);
1777
1778 #ifdef CONFIG_X86_64
1779         if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
1780                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1781                         vcpu->arch.efer |= EFER_LMA;
1782                         svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
1783                 }
1784
1785                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1786                         vcpu->arch.efer &= ~EFER_LMA;
1787                         svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1788                 }
1789         }
1790 #endif
1791         vcpu->arch.cr0 = cr0;
1792
1793         if (!npt_enabled) {
1794                 hcr0 |= X86_CR0_PG | X86_CR0_WP;
1795                 if (old_paging != is_paging(vcpu))
1796                         svm_set_cr4(vcpu, kvm_read_cr4(vcpu));
1797         }
1798
1799         /*
1800          * re-enable caching here because the QEMU bios
1801          * does not do it - this results in some delay at
1802          * reboot
1803          */
1804         if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1805                 hcr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1806
1807         svm->vmcb->save.cr0 = hcr0;
1808         vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1809
1810         /*
1811          * SEV-ES guests must always keep the CR intercepts cleared. CR
1812          * tracking is done using the CR write traps.
1813          */
1814         if (sev_es_guest(vcpu->kvm))
1815                 return;
1816
1817         if (hcr0 == cr0) {
1818                 /* Selective CR0 write remains on.  */
1819                 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
1820                 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
1821         } else {
1822                 svm_set_intercept(svm, INTERCEPT_CR0_READ);
1823                 svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
1824         }
1825 }
1826
1827 static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1828 {
1829         return true;
1830 }
1831
1832 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1833 {
1834         unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
1835         unsigned long old_cr4 = vcpu->arch.cr4;
1836
1837         if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
1838                 svm_flush_tlb_current(vcpu);
1839
1840         vcpu->arch.cr4 = cr4;
1841         if (!npt_enabled) {
1842                 cr4 |= X86_CR4_PAE;
1843
1844                 if (!is_paging(vcpu))
1845                         cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
1846         }
1847         cr4 |= host_cr4_mce;
1848         to_svm(vcpu)->vmcb->save.cr4 = cr4;
1849         vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
1850
1851         if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
1852                 kvm_update_cpuid_runtime(vcpu);
1853 }
1854
1855 static void svm_set_segment(struct kvm_vcpu *vcpu,
1856                             struct kvm_segment *var, int seg)
1857 {
1858         struct vcpu_svm *svm = to_svm(vcpu);
1859         struct vmcb_seg *s = svm_seg(vcpu, seg);
1860
1861         s->base = var->base;
1862         s->limit = var->limit;
1863         s->selector = var->selector;
1864         s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1865         s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1866         s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1867         s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
1868         s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1869         s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1870         s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1871         s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1872
1873         /*
1874          * This is always accurate, except if SYSRET returned to a segment
1875          * with SS.DPL != 3.  Intel does not have this quirk, and always
1876          * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
1877          * would entail passing the CPL to userspace and back.
1878          */
1879         if (seg == VCPU_SREG_SS)
1880                 /* This is symmetric with svm_get_segment() */
1881                 svm->vmcb->save.cpl = (var->dpl & 3);
1882
1883         vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
1884 }
1885
1886 static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu)
1887 {
1888         struct vcpu_svm *svm = to_svm(vcpu);
1889
1890         clr_exception_intercept(svm, BP_VECTOR);
1891
1892         if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1893                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1894                         set_exception_intercept(svm, BP_VECTOR);
1895         }
1896 }
1897
1898 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1899 {
1900         if (sd->next_asid > sd->max_asid) {
1901                 ++sd->asid_generation;
1902                 sd->next_asid = sd->min_asid;
1903                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1904                 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
1905         }
1906
1907         svm->current_vmcb->asid_generation = sd->asid_generation;
1908         svm->asid = sd->next_asid++;
1909 }
1910
1911 static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
1912 {
1913         struct vmcb *vmcb = svm->vmcb;
1914
1915         if (svm->vcpu.arch.guest_state_protected)
1916                 return;
1917
1918         if (unlikely(value != vmcb->save.dr6)) {
1919                 vmcb->save.dr6 = value;
1920                 vmcb_mark_dirty(vmcb, VMCB_DR);
1921         }
1922 }
1923
1924 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
1925 {
1926         struct vcpu_svm *svm = to_svm(vcpu);
1927
1928         if (vcpu->arch.guest_state_protected)
1929                 return;
1930
1931         get_debugreg(vcpu->arch.db[0], 0);
1932         get_debugreg(vcpu->arch.db[1], 1);
1933         get_debugreg(vcpu->arch.db[2], 2);
1934         get_debugreg(vcpu->arch.db[3], 3);
1935         /*
1936          * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here,
1937          * because db_interception might need it.  We can do it before vmentry.
1938          */
1939         vcpu->arch.dr6 = svm->vmcb->save.dr6;
1940         vcpu->arch.dr7 = svm->vmcb->save.dr7;
1941         vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
1942         set_dr_intercepts(svm);
1943 }
1944
1945 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
1946 {
1947         struct vcpu_svm *svm = to_svm(vcpu);
1948
1949         if (vcpu->arch.guest_state_protected)
1950                 return;
1951
1952         svm->vmcb->save.dr7 = value;
1953         vmcb_mark_dirty(svm->vmcb, VMCB_DR);
1954 }
1955
1956 static int pf_interception(struct kvm_vcpu *vcpu)
1957 {
1958         struct vcpu_svm *svm = to_svm(vcpu);
1959
1960         u64 fault_address = svm->vmcb->control.exit_info_2;
1961         u64 error_code = svm->vmcb->control.exit_info_1;
1962
1963         return kvm_handle_page_fault(vcpu, error_code, fault_address,
1964                         static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
1965                         svm->vmcb->control.insn_bytes : NULL,
1966                         svm->vmcb->control.insn_len);
1967 }
1968
1969 static int npf_interception(struct kvm_vcpu *vcpu)
1970 {
1971         struct vcpu_svm *svm = to_svm(vcpu);
1972
1973         u64 fault_address = svm->vmcb->control.exit_info_2;
1974         u64 error_code = svm->vmcb->control.exit_info_1;
1975
1976         trace_kvm_page_fault(vcpu, fault_address, error_code);
1977         return kvm_mmu_page_fault(vcpu, fault_address, error_code,
1978                         static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
1979                         svm->vmcb->control.insn_bytes : NULL,
1980                         svm->vmcb->control.insn_len);
1981 }
1982
1983 static int db_interception(struct kvm_vcpu *vcpu)
1984 {
1985         struct kvm_run *kvm_run = vcpu->run;
1986         struct vcpu_svm *svm = to_svm(vcpu);
1987
1988         if (!(vcpu->guest_debug &
1989               (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
1990                 !svm->nmi_singlestep) {
1991                 u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW;
1992                 kvm_queue_exception_p(vcpu, DB_VECTOR, payload);
1993                 return 1;
1994         }
1995
1996         if (svm->nmi_singlestep) {
1997                 disable_nmi_singlestep(svm);
1998                 /* Make sure we check for pending NMIs upon entry */
1999                 kvm_make_request(KVM_REQ_EVENT, vcpu);
2000         }
2001
2002         if (vcpu->guest_debug &
2003             (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
2004                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2005                 kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6;
2006                 kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7;
2007                 kvm_run->debug.arch.pc =
2008                         svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2009                 kvm_run->debug.arch.exception = DB_VECTOR;
2010                 return 0;
2011         }
2012
2013         return 1;
2014 }
2015
2016 static int bp_interception(struct kvm_vcpu *vcpu)
2017 {
2018         struct vcpu_svm *svm = to_svm(vcpu);
2019         struct kvm_run *kvm_run = vcpu->run;
2020
2021         kvm_run->exit_reason = KVM_EXIT_DEBUG;
2022         kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2023         kvm_run->debug.arch.exception = BP_VECTOR;
2024         return 0;
2025 }
2026
2027 static int ud_interception(struct kvm_vcpu *vcpu)
2028 {
2029         return handle_ud(vcpu);
2030 }
2031
2032 static int ac_interception(struct kvm_vcpu *vcpu)
2033 {
2034         kvm_queue_exception_e(vcpu, AC_VECTOR, 0);
2035         return 1;
2036 }
2037
2038 static bool is_erratum_383(void)
2039 {
2040         int err, i;
2041         u64 value;
2042
2043         if (!erratum_383_found)
2044                 return false;
2045
2046         value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
2047         if (err)
2048                 return false;
2049
2050         /* Bit 62 may or may not be set for this mce */
2051         value &= ~(1ULL << 62);
2052
2053         if (value != 0xb600000000010015ULL)
2054                 return false;
2055
2056         /* Clear MCi_STATUS registers */
2057         for (i = 0; i < 6; ++i)
2058                 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
2059
2060         value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
2061         if (!err) {
2062                 u32 low, high;
2063
2064                 value &= ~(1ULL << 2);
2065                 low    = lower_32_bits(value);
2066                 high   = upper_32_bits(value);
2067
2068                 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
2069         }
2070
2071         /* Flush tlb to evict multi-match entries */
2072         __flush_tlb_all();
2073
2074         return true;
2075 }
2076
2077 static void svm_handle_mce(struct kvm_vcpu *vcpu)
2078 {
2079         if (is_erratum_383()) {
2080                 /*
2081                  * Erratum 383 triggered. Guest state is corrupt so kill the
2082                  * guest.
2083                  */
2084                 pr_err("KVM: Guest triggered AMD Erratum 383\n");
2085
2086                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2087
2088                 return;
2089         }
2090
2091         /*
2092          * On an #MC intercept the MCE handler is not called automatically in
2093          * the host. So do it by hand here.
2094          */
2095         kvm_machine_check();
2096 }
2097
2098 static int mc_interception(struct kvm_vcpu *vcpu)
2099 {
2100         return 1;
2101 }
2102
2103 static int shutdown_interception(struct kvm_vcpu *vcpu)
2104 {
2105         struct kvm_run *kvm_run = vcpu->run;
2106         struct vcpu_svm *svm = to_svm(vcpu);
2107
2108         /*
2109          * The VM save area has already been encrypted so it
2110          * cannot be reinitialized - just terminate.
2111          */
2112         if (sev_es_guest(vcpu->kvm))
2113                 return -EINVAL;
2114
2115         /*
2116          * VMCB is undefined after a SHUTDOWN intercept.  INIT the vCPU to put
2117          * the VMCB in a known good state.  Unfortuately, KVM doesn't have
2118          * KVM_MP_STATE_SHUTDOWN and can't add it without potentially breaking
2119          * userspace.  At a platform view, INIT is acceptable behavior as
2120          * there exist bare metal platforms that automatically INIT the CPU
2121          * in response to shutdown.
2122          */
2123         clear_page(svm->vmcb);
2124         kvm_vcpu_reset(vcpu, true);
2125
2126         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2127         return 0;
2128 }
2129
2130 static int io_interception(struct kvm_vcpu *vcpu)
2131 {
2132         struct vcpu_svm *svm = to_svm(vcpu);
2133         u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
2134         int size, in, string;
2135         unsigned port;
2136
2137         ++vcpu->stat.io_exits;
2138         string = (io_info & SVM_IOIO_STR_MASK) != 0;
2139         in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
2140         port = io_info >> 16;
2141         size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
2142
2143         if (string) {
2144                 if (sev_es_guest(vcpu->kvm))
2145                         return sev_es_string_io(svm, size, port, in);
2146                 else
2147                         return kvm_emulate_instruction(vcpu, 0);
2148         }
2149
2150         svm->next_rip = svm->vmcb->control.exit_info_2;
2151
2152         return kvm_fast_pio(vcpu, size, port, in);
2153 }
2154
2155 static int nmi_interception(struct kvm_vcpu *vcpu)
2156 {
2157         return 1;
2158 }
2159
2160 static int smi_interception(struct kvm_vcpu *vcpu)
2161 {
2162         return 1;
2163 }
2164
2165 static int intr_interception(struct kvm_vcpu *vcpu)
2166 {
2167         ++vcpu->stat.irq_exits;
2168         return 1;
2169 }
2170
2171 static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
2172 {
2173         struct vcpu_svm *svm = to_svm(vcpu);
2174         struct vmcb *vmcb12;
2175         struct kvm_host_map map;
2176         int ret;
2177
2178         if (nested_svm_check_permissions(vcpu))
2179                 return 1;
2180
2181         ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
2182         if (ret) {
2183                 if (ret == -EINVAL)
2184                         kvm_inject_gp(vcpu, 0);
2185                 return 1;
2186         }
2187
2188         vmcb12 = map.hva;
2189
2190         ret = kvm_skip_emulated_instruction(vcpu);
2191
2192         if (vmload) {
2193                 svm_copy_vmloadsave_state(svm->vmcb, vmcb12);
2194                 svm->sysenter_eip_hi = 0;
2195                 svm->sysenter_esp_hi = 0;
2196         } else {
2197                 svm_copy_vmloadsave_state(vmcb12, svm->vmcb);
2198         }
2199
2200         kvm_vcpu_unmap(vcpu, &map, true);
2201
2202         return ret;
2203 }
2204
2205 static int vmload_interception(struct kvm_vcpu *vcpu)
2206 {
2207         return vmload_vmsave_interception(vcpu, true);
2208 }
2209
2210 static int vmsave_interception(struct kvm_vcpu *vcpu)
2211 {
2212         return vmload_vmsave_interception(vcpu, false);
2213 }
2214
2215 static int vmrun_interception(struct kvm_vcpu *vcpu)
2216 {
2217         if (nested_svm_check_permissions(vcpu))
2218                 return 1;
2219
2220         return nested_svm_vmrun(vcpu);
2221 }
2222
2223 enum {
2224         NONE_SVM_INSTR,
2225         SVM_INSTR_VMRUN,
2226         SVM_INSTR_VMLOAD,
2227         SVM_INSTR_VMSAVE,
2228 };
2229
2230 /* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */
2231 static int svm_instr_opcode(struct kvm_vcpu *vcpu)
2232 {
2233         struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
2234
2235         if (ctxt->b != 0x1 || ctxt->opcode_len != 2)
2236                 return NONE_SVM_INSTR;
2237
2238         switch (ctxt->modrm) {
2239         case 0xd8: /* VMRUN */
2240                 return SVM_INSTR_VMRUN;
2241         case 0xda: /* VMLOAD */
2242                 return SVM_INSTR_VMLOAD;
2243         case 0xdb: /* VMSAVE */
2244                 return SVM_INSTR_VMSAVE;
2245         default:
2246                 break;
2247         }
2248
2249         return NONE_SVM_INSTR;
2250 }
2251
2252 static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode)
2253 {
2254         const int guest_mode_exit_codes[] = {
2255                 [SVM_INSTR_VMRUN] = SVM_EXIT_VMRUN,
2256                 [SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD,
2257                 [SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE,
2258         };
2259         int (*const svm_instr_handlers[])(struct kvm_vcpu *vcpu) = {
2260                 [SVM_INSTR_VMRUN] = vmrun_interception,
2261                 [SVM_INSTR_VMLOAD] = vmload_interception,
2262                 [SVM_INSTR_VMSAVE] = vmsave_interception,
2263         };
2264         struct vcpu_svm *svm = to_svm(vcpu);
2265         int ret;
2266
2267         if (is_guest_mode(vcpu)) {
2268                 /* Returns '1' or -errno on failure, '0' on success. */
2269                 ret = nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]);
2270                 if (ret)
2271                         return ret;
2272                 return 1;
2273         }
2274         return svm_instr_handlers[opcode](vcpu);
2275 }
2276
2277 /*
2278  * #GP handling code. Note that #GP can be triggered under the following two
2279  * cases:
2280  *   1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on
2281  *      some AMD CPUs when EAX of these instructions are in the reserved memory
2282  *      regions (e.g. SMM memory on host).
2283  *   2) VMware backdoor
2284  */
2285 static int gp_interception(struct kvm_vcpu *vcpu)
2286 {
2287         struct vcpu_svm *svm = to_svm(vcpu);
2288         u32 error_code = svm->vmcb->control.exit_info_1;
2289         int opcode;
2290
2291         /* Both #GP cases have zero error_code */
2292         if (error_code)
2293                 goto reinject;
2294
2295         /* Decode the instruction for usage later */
2296         if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
2297                 goto reinject;
2298
2299         opcode = svm_instr_opcode(vcpu);
2300
2301         if (opcode == NONE_SVM_INSTR) {
2302                 if (!enable_vmware_backdoor)
2303                         goto reinject;
2304
2305                 /*
2306                  * VMware backdoor emulation on #GP interception only handles
2307                  * IN{S}, OUT{S}, and RDPMC.
2308                  */
2309                 if (!is_guest_mode(vcpu))
2310                         return kvm_emulate_instruction(vcpu,
2311                                 EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
2312         } else {
2313                 /* All SVM instructions expect page aligned RAX */
2314                 if (svm->vmcb->save.rax & ~PAGE_MASK)
2315                         goto reinject;
2316
2317                 return emulate_svm_instr(vcpu, opcode);
2318         }
2319
2320 reinject:
2321         kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
2322         return 1;
2323 }
2324
2325 void svm_set_gif(struct vcpu_svm *svm, bool value)
2326 {
2327         if (value) {
2328                 /*
2329                  * If VGIF is enabled, the STGI intercept is only added to
2330                  * detect the opening of the SMI/NMI window; remove it now.
2331                  * Likewise, clear the VINTR intercept, we will set it
2332                  * again while processing KVM_REQ_EVENT if needed.
2333                  */
2334                 if (vgif)
2335                         svm_clr_intercept(svm, INTERCEPT_STGI);
2336                 if (svm_is_intercept(svm, INTERCEPT_VINTR))
2337                         svm_clear_vintr(svm);
2338
2339                 enable_gif(svm);
2340                 if (svm->vcpu.arch.smi_pending ||
2341                     svm->vcpu.arch.nmi_pending ||
2342                     kvm_cpu_has_injectable_intr(&svm->vcpu) ||
2343                     kvm_apic_has_pending_init_or_sipi(&svm->vcpu))
2344                         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2345         } else {
2346                 disable_gif(svm);
2347
2348                 /*
2349                  * After a CLGI no interrupts should come.  But if vGIF is
2350                  * in use, we still rely on the VINTR intercept (rather than
2351                  * STGI) to detect an open interrupt window.
2352                 */
2353                 if (!vgif)
2354                         svm_clear_vintr(svm);
2355         }
2356 }
2357
2358 static int stgi_interception(struct kvm_vcpu *vcpu)
2359 {
2360         int ret;
2361
2362         if (nested_svm_check_permissions(vcpu))
2363                 return 1;
2364
2365         ret = kvm_skip_emulated_instruction(vcpu);
2366         svm_set_gif(to_svm(vcpu), true);
2367         return ret;
2368 }
2369
2370 static int clgi_interception(struct kvm_vcpu *vcpu)
2371 {
2372         int ret;
2373
2374         if (nested_svm_check_permissions(vcpu))
2375                 return 1;
2376
2377         ret = kvm_skip_emulated_instruction(vcpu);
2378         svm_set_gif(to_svm(vcpu), false);
2379         return ret;
2380 }
2381
2382 static int invlpga_interception(struct kvm_vcpu *vcpu)
2383 {
2384         gva_t gva = kvm_rax_read(vcpu);
2385         u32 asid = kvm_rcx_read(vcpu);
2386
2387         /* FIXME: Handle an address size prefix. */
2388         if (!is_long_mode(vcpu))
2389                 gva = (u32)gva;
2390
2391         trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva);
2392
2393         /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2394         kvm_mmu_invlpg(vcpu, gva);
2395
2396         return kvm_skip_emulated_instruction(vcpu);
2397 }
2398
2399 static int skinit_interception(struct kvm_vcpu *vcpu)
2400 {
2401         trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu));
2402
2403         kvm_queue_exception(vcpu, UD_VECTOR);
2404         return 1;
2405 }
2406
2407 static int task_switch_interception(struct kvm_vcpu *vcpu)
2408 {
2409         struct vcpu_svm *svm = to_svm(vcpu);
2410         u16 tss_selector;
2411         int reason;
2412         int int_type = svm->vmcb->control.exit_int_info &
2413                 SVM_EXITINTINFO_TYPE_MASK;
2414         int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2415         uint32_t type =
2416                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2417         uint32_t idt_v =
2418                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2419         bool has_error_code = false;
2420         u32 error_code = 0;
2421
2422         tss_selector = (u16)svm->vmcb->control.exit_info_1;
2423
2424         if (svm->vmcb->control.exit_info_2 &
2425             (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
2426                 reason = TASK_SWITCH_IRET;
2427         else if (svm->vmcb->control.exit_info_2 &
2428                  (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2429                 reason = TASK_SWITCH_JMP;
2430         else if (idt_v)
2431                 reason = TASK_SWITCH_GATE;
2432         else
2433                 reason = TASK_SWITCH_CALL;
2434
2435         if (reason == TASK_SWITCH_GATE) {
2436                 switch (type) {
2437                 case SVM_EXITINTINFO_TYPE_NMI:
2438                         vcpu->arch.nmi_injected = false;
2439                         break;
2440                 case SVM_EXITINTINFO_TYPE_EXEPT:
2441                         if (svm->vmcb->control.exit_info_2 &
2442                             (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2443                                 has_error_code = true;
2444                                 error_code =
2445                                         (u32)svm->vmcb->control.exit_info_2;
2446                         }
2447                         kvm_clear_exception_queue(vcpu);
2448                         break;
2449                 case SVM_EXITINTINFO_TYPE_INTR:
2450                 case SVM_EXITINTINFO_TYPE_SOFT:
2451                         kvm_clear_interrupt_queue(vcpu);
2452                         break;
2453                 default:
2454                         break;
2455                 }
2456         }
2457
2458         if (reason != TASK_SWITCH_GATE ||
2459             int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2460             (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
2461              (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
2462                 if (!svm_skip_emulated_instruction(vcpu))
2463                         return 0;
2464         }
2465
2466         if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
2467                 int_vec = -1;
2468
2469         return kvm_task_switch(vcpu, tss_selector, int_vec, reason,
2470                                has_error_code, error_code);
2471 }
2472
2473 static int iret_interception(struct kvm_vcpu *vcpu)
2474 {
2475         struct vcpu_svm *svm = to_svm(vcpu);
2476
2477         ++vcpu->stat.nmi_window_exits;
2478         vcpu->arch.hflags |= HF_IRET_MASK;
2479         if (!sev_es_guest(vcpu->kvm)) {
2480                 svm_clr_intercept(svm, INTERCEPT_IRET);
2481                 svm->nmi_iret_rip = kvm_rip_read(vcpu);
2482         }
2483         kvm_make_request(KVM_REQ_EVENT, vcpu);
2484         return 1;
2485 }
2486
2487 static int invlpg_interception(struct kvm_vcpu *vcpu)
2488 {
2489         if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2490                 return kvm_emulate_instruction(vcpu, 0);
2491
2492         kvm_mmu_invlpg(vcpu, to_svm(vcpu)->vmcb->control.exit_info_1);
2493         return kvm_skip_emulated_instruction(vcpu);
2494 }
2495
2496 static int emulate_on_interception(struct kvm_vcpu *vcpu)
2497 {
2498         return kvm_emulate_instruction(vcpu, 0);
2499 }
2500
2501 static int rsm_interception(struct kvm_vcpu *vcpu)
2502 {
2503         return kvm_emulate_instruction_from_buffer(vcpu, rsm_ins_bytes, 2);
2504 }
2505
2506 static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu,
2507                                             unsigned long val)
2508 {
2509         struct vcpu_svm *svm = to_svm(vcpu);
2510         unsigned long cr0 = vcpu->arch.cr0;
2511         bool ret = false;
2512
2513         if (!is_guest_mode(vcpu) ||
2514             (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0))))
2515                 return false;
2516
2517         cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2518         val &= ~SVM_CR0_SELECTIVE_MASK;
2519
2520         if (cr0 ^ val) {
2521                 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2522                 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
2523         }
2524
2525         return ret;
2526 }
2527
2528 #define CR_VALID (1ULL << 63)
2529
2530 static int cr_interception(struct kvm_vcpu *vcpu)
2531 {
2532         struct vcpu_svm *svm = to_svm(vcpu);
2533         int reg, cr;
2534         unsigned long val;
2535         int err;
2536
2537         if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2538                 return emulate_on_interception(vcpu);
2539
2540         if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2541                 return emulate_on_interception(vcpu);
2542
2543         reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2544         if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
2545                 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
2546         else
2547                 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2548
2549         err = 0;
2550         if (cr >= 16) { /* mov to cr */
2551                 cr -= 16;
2552                 val = kvm_register_read(vcpu, reg);
2553                 trace_kvm_cr_write(cr, val);
2554                 switch (cr) {
2555                 case 0:
2556                         if (!check_selective_cr0_intercepted(vcpu, val))
2557                                 err = kvm_set_cr0(vcpu, val);
2558                         else
2559                                 return 1;
2560
2561                         break;
2562                 case 3:
2563                         err = kvm_set_cr3(vcpu, val);
2564                         break;
2565                 case 4:
2566                         err = kvm_set_cr4(vcpu, val);
2567                         break;
2568                 case 8:
2569                         err = kvm_set_cr8(vcpu, val);
2570                         break;
2571                 default:
2572                         WARN(1, "unhandled write to CR%d", cr);
2573                         kvm_queue_exception(vcpu, UD_VECTOR);
2574                         return 1;
2575                 }
2576         } else { /* mov from cr */
2577                 switch (cr) {
2578                 case 0:
2579                         val = kvm_read_cr0(vcpu);
2580                         break;
2581                 case 2:
2582                         val = vcpu->arch.cr2;
2583                         break;
2584                 case 3:
2585                         val = kvm_read_cr3(vcpu);
2586                         break;
2587                 case 4:
2588                         val = kvm_read_cr4(vcpu);
2589                         break;
2590                 case 8:
2591                         val = kvm_get_cr8(vcpu);
2592                         break;
2593                 default:
2594                         WARN(1, "unhandled read from CR%d", cr);
2595                         kvm_queue_exception(vcpu, UD_VECTOR);
2596                         return 1;
2597                 }
2598                 kvm_register_write(vcpu, reg, val);
2599                 trace_kvm_cr_read(cr, val);
2600         }
2601         return kvm_complete_insn_gp(vcpu, err);
2602 }
2603
2604 static int cr_trap(struct kvm_vcpu *vcpu)
2605 {
2606         struct vcpu_svm *svm = to_svm(vcpu);
2607         unsigned long old_value, new_value;
2608         unsigned int cr;
2609         int ret = 0;
2610
2611         new_value = (unsigned long)svm->vmcb->control.exit_info_1;
2612
2613         cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP;
2614         switch (cr) {
2615         case 0:
2616                 old_value = kvm_read_cr0(vcpu);
2617                 svm_set_cr0(vcpu, new_value);
2618
2619                 kvm_post_set_cr0(vcpu, old_value, new_value);
2620                 break;
2621         case 4:
2622                 old_value = kvm_read_cr4(vcpu);
2623                 svm_set_cr4(vcpu, new_value);
2624
2625                 kvm_post_set_cr4(vcpu, old_value, new_value);
2626                 break;
2627         case 8:
2628                 ret = kvm_set_cr8(vcpu, new_value);
2629                 break;
2630         default:
2631                 WARN(1, "unhandled CR%d write trap", cr);
2632                 kvm_queue_exception(vcpu, UD_VECTOR);
2633                 return 1;
2634         }
2635
2636         return kvm_complete_insn_gp(vcpu, ret);
2637 }
2638
2639 static int dr_interception(struct kvm_vcpu *vcpu)
2640 {
2641         struct vcpu_svm *svm = to_svm(vcpu);
2642         int reg, dr;
2643         unsigned long val;
2644         int err = 0;
2645
2646         if (vcpu->guest_debug == 0) {
2647                 /*
2648                  * No more DR vmexits; force a reload of the debug registers
2649                  * and reenter on this instruction.  The next vmexit will
2650                  * retrieve the full state of the debug registers.
2651                  */
2652                 clr_dr_intercepts(svm);
2653                 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
2654                 return 1;
2655         }
2656
2657         if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
2658                 return emulate_on_interception(vcpu);
2659
2660         reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2661         dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
2662         if (dr >= 16) { /* mov to DRn  */
2663                 dr -= 16;
2664                 val = kvm_register_read(vcpu, reg);
2665                 err = kvm_set_dr(vcpu, dr, val);
2666         } else {
2667                 kvm_get_dr(vcpu, dr, &val);
2668                 kvm_register_write(vcpu, reg, val);
2669         }
2670
2671         return kvm_complete_insn_gp(vcpu, err);
2672 }
2673
2674 static int cr8_write_interception(struct kvm_vcpu *vcpu)
2675 {
2676         int r;
2677
2678         u8 cr8_prev = kvm_get_cr8(vcpu);
2679         /* instruction emulation calls kvm_set_cr8() */
2680         r = cr_interception(vcpu);
2681         if (lapic_in_kernel(vcpu))
2682                 return r;
2683         if (cr8_prev <= kvm_get_cr8(vcpu))
2684                 return r;
2685         vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
2686         return 0;
2687 }
2688
2689 static int efer_trap(struct kvm_vcpu *vcpu)
2690 {
2691         struct msr_data msr_info;
2692         int ret;
2693
2694         /*
2695          * Clear the EFER_SVME bit from EFER. The SVM code always sets this
2696          * bit in svm_set_efer(), but __kvm_valid_efer() checks it against
2697          * whether the guest has X86_FEATURE_SVM - this avoids a failure if
2698          * the guest doesn't have X86_FEATURE_SVM.
2699          */
2700         msr_info.host_initiated = false;
2701         msr_info.index = MSR_EFER;
2702         msr_info.data = to_svm(vcpu)->vmcb->control.exit_info_1 & ~EFER_SVME;
2703         ret = kvm_set_msr_common(vcpu, &msr_info);
2704
2705         return kvm_complete_insn_gp(vcpu, ret);
2706 }
2707
2708 static int svm_get_msr_feature(struct kvm_msr_entry *msr)
2709 {
2710         msr->data = 0;
2711
2712         switch (msr->index) {
2713         case MSR_F10H_DECFG:
2714                 if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
2715                         msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
2716                 break;
2717         case MSR_IA32_PERF_CAPABILITIES:
2718                 return 0;
2719         default:
2720                 return KVM_MSR_RET_INVALID;
2721         }
2722
2723         return 0;
2724 }
2725
2726 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2727 {
2728         struct vcpu_svm *svm = to_svm(vcpu);
2729
2730         switch (msr_info->index) {
2731         case MSR_AMD64_TSC_RATIO:
2732                 if (!msr_info->host_initiated && !svm->tsc_scaling_enabled)
2733                         return 1;
2734                 msr_info->data = svm->tsc_ratio_msr;
2735                 break;
2736         case MSR_STAR:
2737                 msr_info->data = svm->vmcb01.ptr->save.star;
2738                 break;
2739 #ifdef CONFIG_X86_64
2740         case MSR_LSTAR:
2741                 msr_info->data = svm->vmcb01.ptr->save.lstar;
2742                 break;
2743         case MSR_CSTAR:
2744                 msr_info->data = svm->vmcb01.ptr->save.cstar;
2745                 break;
2746         case MSR_KERNEL_GS_BASE:
2747                 msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base;
2748                 break;
2749         case MSR_SYSCALL_MASK:
2750                 msr_info->data = svm->vmcb01.ptr->save.sfmask;
2751                 break;
2752 #endif
2753         case MSR_IA32_SYSENTER_CS:
2754                 msr_info->data = svm->vmcb01.ptr->save.sysenter_cs;
2755                 break;
2756         case MSR_IA32_SYSENTER_EIP:
2757                 msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip;
2758                 if (guest_cpuid_is_intel(vcpu))
2759                         msr_info->data |= (u64)svm->sysenter_eip_hi << 32;
2760                 break;
2761         case MSR_IA32_SYSENTER_ESP:
2762                 msr_info->data = svm->vmcb01.ptr->save.sysenter_esp;
2763                 if (guest_cpuid_is_intel(vcpu))
2764                         msr_info->data |= (u64)svm->sysenter_esp_hi << 32;
2765                 break;
2766         case MSR_TSC_AUX:
2767                 msr_info->data = svm->tsc_aux;
2768                 break;
2769         case MSR_IA32_DEBUGCTLMSR:
2770         case MSR_IA32_LASTBRANCHFROMIP:
2771         case MSR_IA32_LASTBRANCHTOIP:
2772         case MSR_IA32_LASTINTFROMIP:
2773         case MSR_IA32_LASTINTTOIP:
2774                 msr_info->data = svm_get_lbr_msr(svm, msr_info->index);
2775                 break;
2776         case MSR_VM_HSAVE_PA:
2777                 msr_info->data = svm->nested.hsave_msr;
2778                 break;
2779         case MSR_VM_CR:
2780                 msr_info->data = svm->nested.vm_cr_msr;
2781                 break;
2782         case MSR_IA32_SPEC_CTRL:
2783                 if (!msr_info->host_initiated &&
2784                     !guest_has_spec_ctrl_msr(vcpu))
2785                         return 1;
2786
2787                 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
2788                         msr_info->data = svm->vmcb->save.spec_ctrl;
2789                 else
2790                         msr_info->data = svm->spec_ctrl;
2791                 break;
2792         case MSR_AMD64_VIRT_SPEC_CTRL:
2793                 if (!msr_info->host_initiated &&
2794                     !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
2795                         return 1;
2796
2797                 msr_info->data = svm->virt_spec_ctrl;
2798                 break;
2799         case MSR_F15H_IC_CFG: {
2800
2801                 int family, model;
2802
2803                 family = guest_cpuid_family(vcpu);
2804                 model  = guest_cpuid_model(vcpu);
2805
2806                 if (family < 0 || model < 0)
2807                         return kvm_get_msr_common(vcpu, msr_info);
2808
2809                 msr_info->data = 0;
2810
2811                 if (family == 0x15 &&
2812                     (model >= 0x2 && model < 0x20))
2813                         msr_info->data = 0x1E;
2814                 }
2815                 break;
2816         case MSR_F10H_DECFG:
2817                 msr_info->data = svm->msr_decfg;
2818                 break;
2819         default:
2820                 return kvm_get_msr_common(vcpu, msr_info);
2821         }
2822         return 0;
2823 }
2824
2825 static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
2826 {
2827         struct vcpu_svm *svm = to_svm(vcpu);
2828         if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb))
2829                 return kvm_complete_insn_gp(vcpu, err);
2830
2831         ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 1);
2832         ghcb_set_sw_exit_info_2(svm->sev_es.ghcb,
2833                                 X86_TRAP_GP |
2834                                 SVM_EVTINJ_TYPE_EXEPT |
2835                                 SVM_EVTINJ_VALID);
2836         return 1;
2837 }
2838
2839 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
2840 {
2841         struct vcpu_svm *svm = to_svm(vcpu);
2842         int svm_dis, chg_mask;
2843
2844         if (data & ~SVM_VM_CR_VALID_MASK)
2845                 return 1;
2846
2847         chg_mask = SVM_VM_CR_VALID_MASK;
2848
2849         if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
2850                 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
2851
2852         svm->nested.vm_cr_msr &= ~chg_mask;
2853         svm->nested.vm_cr_msr |= (data & chg_mask);
2854
2855         svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
2856
2857         /* check for svm_disable while efer.svme is set */
2858         if (svm_dis && (vcpu->arch.efer & EFER_SVME))
2859                 return 1;
2860
2861         return 0;
2862 }
2863
2864 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
2865 {
2866         struct vcpu_svm *svm = to_svm(vcpu);
2867         int r;
2868
2869         u32 ecx = msr->index;
2870         u64 data = msr->data;
2871         switch (ecx) {
2872         case MSR_AMD64_TSC_RATIO:
2873
2874                 if (!svm->tsc_scaling_enabled) {
2875
2876                         if (!msr->host_initiated)
2877                                 return 1;
2878                         /*
2879                          * In case TSC scaling is not enabled, always
2880                          * leave this MSR at the default value.
2881                          *
2882                          * Due to bug in qemu 6.2.0, it would try to set
2883                          * this msr to 0 if tsc scaling is not enabled.
2884                          * Ignore this value as well.
2885                          */
2886                         if (data != 0 && data != svm->tsc_ratio_msr)
2887                                 return 1;
2888                         break;
2889                 }
2890
2891                 if (data & SVM_TSC_RATIO_RSVD)
2892                         return 1;
2893
2894                 svm->tsc_ratio_msr = data;
2895
2896                 if (svm->tsc_scaling_enabled && is_guest_mode(vcpu))
2897                         nested_svm_update_tsc_ratio_msr(vcpu);
2898
2899                 break;
2900         case MSR_IA32_CR_PAT:
2901                 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
2902                         return 1;
2903                 vcpu->arch.pat = data;
2904                 svm->vmcb01.ptr->save.g_pat = data;
2905                 if (is_guest_mode(vcpu))
2906                         nested_vmcb02_compute_g_pat(svm);
2907                 vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
2908                 break;
2909         case MSR_IA32_SPEC_CTRL:
2910                 if (!msr->host_initiated &&
2911                     !guest_has_spec_ctrl_msr(vcpu))
2912                         return 1;
2913
2914                 if (kvm_spec_ctrl_test_value(data))
2915                         return 1;
2916
2917                 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
2918                         svm->vmcb->save.spec_ctrl = data;
2919                 else
2920                         svm->spec_ctrl = data;
2921                 if (!data)
2922                         break;
2923
2924                 /*
2925                  * For non-nested:
2926                  * When it's written (to non-zero) for the first time, pass
2927                  * it through.
2928                  *
2929                  * For nested:
2930                  * The handling of the MSR bitmap for L2 guests is done in
2931                  * nested_svm_vmrun_msrpm.
2932                  * We update the L1 MSR bit as well since it will end up
2933                  * touching the MSR anyway now.
2934                  */
2935                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
2936                 break;
2937         case MSR_IA32_PRED_CMD:
2938                 if (!msr->host_initiated &&
2939                     !guest_has_pred_cmd_msr(vcpu))
2940                         return 1;
2941
2942                 if (data & ~PRED_CMD_IBPB)
2943                         return 1;
2944                 if (!boot_cpu_has(X86_FEATURE_IBPB))
2945                         return 1;
2946                 if (!data)
2947                         break;
2948
2949                 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
2950                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
2951                 break;
2952         case MSR_AMD64_VIRT_SPEC_CTRL:
2953                 if (!msr->host_initiated &&
2954                     !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
2955                         return 1;
2956
2957                 if (data & ~SPEC_CTRL_SSBD)
2958                         return 1;
2959
2960                 svm->virt_spec_ctrl = data;
2961                 break;
2962         case MSR_STAR:
2963                 svm->vmcb01.ptr->save.star = data;
2964                 break;
2965 #ifdef CONFIG_X86_64
2966         case MSR_LSTAR:
2967                 svm->vmcb01.ptr->save.lstar = data;
2968                 break;
2969         case MSR_CSTAR:
2970                 svm->vmcb01.ptr->save.cstar = data;
2971                 break;
2972         case MSR_KERNEL_GS_BASE:
2973                 svm->vmcb01.ptr->save.kernel_gs_base = data;
2974                 break;
2975         case MSR_SYSCALL_MASK:
2976                 svm->vmcb01.ptr->save.sfmask = data;
2977                 break;
2978 #endif
2979         case MSR_IA32_SYSENTER_CS:
2980                 svm->vmcb01.ptr->save.sysenter_cs = data;
2981                 break;
2982         case MSR_IA32_SYSENTER_EIP:
2983                 svm->vmcb01.ptr->save.sysenter_eip = (u32)data;
2984                 /*
2985                  * We only intercept the MSR_IA32_SYSENTER_{EIP|ESP} msrs
2986                  * when we spoof an Intel vendor ID (for cross vendor migration).
2987                  * In this case we use this intercept to track the high
2988                  * 32 bit part of these msrs to support Intel's
2989                  * implementation of SYSENTER/SYSEXIT.
2990                  */
2991                 svm->sysenter_eip_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
2992                 break;
2993         case MSR_IA32_SYSENTER_ESP:
2994                 svm->vmcb01.ptr->save.sysenter_esp = (u32)data;
2995                 svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
2996                 break;
2997         case MSR_TSC_AUX:
2998                 /*
2999                  * TSC_AUX is usually changed only during boot and never read
3000                  * directly.  Intercept TSC_AUX instead of exposing it to the
3001                  * guest via direct_access_msrs, and switch it via user return.
3002                  */
3003                 preempt_disable();
3004                 r = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull);
3005                 preempt_enable();
3006                 if (r)
3007                         return 1;
3008
3009                 svm->tsc_aux = data;
3010                 break;
3011         case MSR_IA32_DEBUGCTLMSR:
3012                 if (!lbrv) {
3013                         vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
3014                                     __func__, data);
3015                         break;
3016                 }
3017                 if (data & DEBUGCTL_RESERVED_BITS)
3018                         return 1;
3019
3020                 if (svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK)
3021                         svm->vmcb->save.dbgctl = data;
3022                 else
3023                         svm->vmcb01.ptr->save.dbgctl = data;
3024
3025                 svm_update_lbrv(vcpu);
3026
3027                 break;
3028         case MSR_VM_HSAVE_PA:
3029                 /*
3030                  * Old kernels did not validate the value written to
3031                  * MSR_VM_HSAVE_PA.  Allow KVM_SET_MSR to set an invalid
3032                  * value to allow live migrating buggy or malicious guests
3033                  * originating from those kernels.
3034                  */
3035                 if (!msr->host_initiated && !page_address_valid(vcpu, data))
3036                         return 1;
3037
3038                 svm->nested.hsave_msr = data & PAGE_MASK;
3039                 break;
3040         case MSR_VM_CR:
3041                 return svm_set_vm_cr(vcpu, data);
3042         case MSR_VM_IGNNE:
3043                 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
3044                 break;
3045         case MSR_F10H_DECFG: {
3046                 struct kvm_msr_entry msr_entry;
3047
3048                 msr_entry.index = msr->index;
3049                 if (svm_get_msr_feature(&msr_entry))
3050                         return 1;
3051
3052                 /* Check the supported bits */
3053                 if (data & ~msr_entry.data)
3054                         return 1;
3055
3056                 /* Don't allow the guest to change a bit, #GP */
3057                 if (!msr->host_initiated && (data ^ msr_entry.data))
3058                         return 1;
3059
3060                 svm->msr_decfg = data;
3061                 break;
3062         }
3063         default:
3064                 return kvm_set_msr_common(vcpu, msr);
3065         }
3066         return 0;
3067 }
3068
3069 static int msr_interception(struct kvm_vcpu *vcpu)
3070 {
3071         if (to_svm(vcpu)->vmcb->control.exit_info_1)
3072                 return kvm_emulate_wrmsr(vcpu);
3073         else
3074                 return kvm_emulate_rdmsr(vcpu);
3075 }
3076
3077 static int interrupt_window_interception(struct kvm_vcpu *vcpu)
3078 {
3079         kvm_make_request(KVM_REQ_EVENT, vcpu);
3080         svm_clear_vintr(to_svm(vcpu));
3081
3082         /*
3083          * If not running nested, for AVIC, the only reason to end up here is ExtINTs.
3084          * In this case AVIC was temporarily disabled for
3085          * requesting the IRQ window and we have to re-enable it.
3086          *
3087          * If running nested, still remove the VM wide AVIC inhibit to
3088          * support case in which the interrupt window was requested when the
3089          * vCPU was not running nested.
3090
3091          * All vCPUs which run still run nested, will remain to have their
3092          * AVIC still inhibited due to per-cpu AVIC inhibition.
3093          */
3094         kvm_clear_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
3095
3096         ++vcpu->stat.irq_window_exits;
3097         return 1;
3098 }
3099
3100 static int pause_interception(struct kvm_vcpu *vcpu)
3101 {
3102         bool in_kernel;
3103         /*
3104          * CPL is not made available for an SEV-ES guest, therefore
3105          * vcpu->arch.preempted_in_kernel can never be true.  Just
3106          * set in_kernel to false as well.
3107          */
3108         in_kernel = !sev_es_guest(vcpu->kvm) && svm_get_cpl(vcpu) == 0;
3109
3110         grow_ple_window(vcpu);
3111
3112         kvm_vcpu_on_spin(vcpu, in_kernel);
3113         return kvm_skip_emulated_instruction(vcpu);
3114 }
3115
3116 static int invpcid_interception(struct kvm_vcpu *vcpu)
3117 {
3118         struct vcpu_svm *svm = to_svm(vcpu);
3119         unsigned long type;
3120         gva_t gva;
3121
3122         if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
3123                 kvm_queue_exception(vcpu, UD_VECTOR);
3124                 return 1;
3125         }
3126
3127         /*
3128          * For an INVPCID intercept:
3129          * EXITINFO1 provides the linear address of the memory operand.
3130          * EXITINFO2 provides the contents of the register operand.
3131          */
3132         type = svm->vmcb->control.exit_info_2;
3133         gva = svm->vmcb->control.exit_info_1;
3134
3135         return kvm_handle_invpcid(vcpu, type, gva);
3136 }
3137
3138 static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
3139         [SVM_EXIT_READ_CR0]                     = cr_interception,
3140         [SVM_EXIT_READ_CR3]                     = cr_interception,
3141         [SVM_EXIT_READ_CR4]                     = cr_interception,
3142         [SVM_EXIT_READ_CR8]                     = cr_interception,
3143         [SVM_EXIT_CR0_SEL_WRITE]                = cr_interception,
3144         [SVM_EXIT_WRITE_CR0]                    = cr_interception,
3145         [SVM_EXIT_WRITE_CR3]                    = cr_interception,
3146         [SVM_EXIT_WRITE_CR4]                    = cr_interception,
3147         [SVM_EXIT_WRITE_CR8]                    = cr8_write_interception,
3148         [SVM_EXIT_READ_DR0]                     = dr_interception,
3149         [SVM_EXIT_READ_DR1]                     = dr_interception,
3150         [SVM_EXIT_READ_DR2]                     = dr_interception,
3151         [SVM_EXIT_READ_DR3]                     = dr_interception,
3152         [SVM_EXIT_READ_DR4]                     = dr_interception,
3153         [SVM_EXIT_READ_DR5]                     = dr_interception,
3154         [SVM_EXIT_READ_DR6]                     = dr_interception,
3155         [SVM_EXIT_READ_DR7]                     = dr_interception,
3156         [SVM_EXIT_WRITE_DR0]                    = dr_interception,
3157         [SVM_EXIT_WRITE_DR1]                    = dr_interception,
3158         [SVM_EXIT_WRITE_DR2]                    = dr_interception,
3159         [SVM_EXIT_WRITE_DR3]                    = dr_interception,
3160         [SVM_EXIT_WRITE_DR4]                    = dr_interception,
3161         [SVM_EXIT_WRITE_DR5]                    = dr_interception,
3162         [SVM_EXIT_WRITE_DR6]                    = dr_interception,
3163         [SVM_EXIT_WRITE_DR7]                    = dr_interception,
3164         [SVM_EXIT_EXCP_BASE + DB_VECTOR]        = db_interception,
3165         [SVM_EXIT_EXCP_BASE + BP_VECTOR]        = bp_interception,
3166         [SVM_EXIT_EXCP_BASE + UD_VECTOR]        = ud_interception,
3167         [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
3168         [SVM_EXIT_EXCP_BASE + MC_VECTOR]        = mc_interception,
3169         [SVM_EXIT_EXCP_BASE + AC_VECTOR]        = ac_interception,
3170         [SVM_EXIT_EXCP_BASE + GP_VECTOR]        = gp_interception,
3171         [SVM_EXIT_INTR]                         = intr_interception,
3172         [SVM_EXIT_NMI]                          = nmi_interception,
3173         [SVM_EXIT_SMI]                          = smi_interception,
3174         [SVM_EXIT_VINTR]                        = interrupt_window_interception,
3175         [SVM_EXIT_RDPMC]                        = kvm_emulate_rdpmc,
3176         [SVM_EXIT_CPUID]                        = kvm_emulate_cpuid,
3177         [SVM_EXIT_IRET]                         = iret_interception,
3178         [SVM_EXIT_INVD]                         = kvm_emulate_invd,
3179         [SVM_EXIT_PAUSE]                        = pause_interception,
3180         [SVM_EXIT_HLT]                          = kvm_emulate_halt,
3181         [SVM_EXIT_INVLPG]                       = invlpg_interception,
3182         [SVM_EXIT_INVLPGA]                      = invlpga_interception,
3183         [SVM_EXIT_IOIO]                         = io_interception,
3184         [SVM_EXIT_MSR]                          = msr_interception,
3185         [SVM_EXIT_TASK_SWITCH]                  = task_switch_interception,
3186         [SVM_EXIT_SHUTDOWN]                     = shutdown_interception,
3187         [SVM_EXIT_VMRUN]                        = vmrun_interception,
3188         [SVM_EXIT_VMMCALL]                      = kvm_emulate_hypercall,
3189         [SVM_EXIT_VMLOAD]                       = vmload_interception,
3190         [SVM_EXIT_VMSAVE]                       = vmsave_interception,
3191         [SVM_EXIT_STGI]                         = stgi_interception,
3192         [SVM_EXIT_CLGI]                         = clgi_interception,
3193         [SVM_EXIT_SKINIT]                       = skinit_interception,
3194         [SVM_EXIT_RDTSCP]                       = kvm_handle_invalid_op,
3195         [SVM_EXIT_WBINVD]                       = kvm_emulate_wbinvd,
3196         [SVM_EXIT_MONITOR]                      = kvm_emulate_monitor,
3197         [SVM_EXIT_MWAIT]                        = kvm_emulate_mwait,
3198         [SVM_EXIT_XSETBV]                       = kvm_emulate_xsetbv,
3199         [SVM_EXIT_RDPRU]                        = kvm_handle_invalid_op,
3200         [SVM_EXIT_EFER_WRITE_TRAP]              = efer_trap,
3201         [SVM_EXIT_CR0_WRITE_TRAP]               = cr_trap,
3202         [SVM_EXIT_CR4_WRITE_TRAP]               = cr_trap,
3203         [SVM_EXIT_CR8_WRITE_TRAP]               = cr_trap,
3204         [SVM_EXIT_INVPCID]                      = invpcid_interception,
3205         [SVM_EXIT_NPF]                          = npf_interception,
3206         [SVM_EXIT_RSM]                          = rsm_interception,
3207         [SVM_EXIT_AVIC_INCOMPLETE_IPI]          = avic_incomplete_ipi_interception,
3208         [SVM_EXIT_AVIC_UNACCELERATED_ACCESS]    = avic_unaccelerated_access_interception,
3209         [SVM_EXIT_VMGEXIT]                      = sev_handle_vmgexit,
3210 };
3211
3212 static void dump_vmcb(struct kvm_vcpu *vcpu)
3213 {
3214         struct vcpu_svm *svm = to_svm(vcpu);
3215         struct vmcb_control_area *control = &svm->vmcb->control;
3216         struct vmcb_save_area *save = &svm->vmcb->save;
3217         struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save;
3218
3219         if (!dump_invalid_vmcb) {
3220                 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
3221                 return;
3222         }
3223
3224         pr_err("VMCB %p, last attempted VMRUN on CPU %d\n",
3225                svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu);
3226         pr_err("VMCB Control Area:\n");
3227         pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff);
3228         pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16);
3229         pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff);
3230         pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16);
3231         pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]);
3232         pr_err("%-20s%08x %08x\n", "intercepts:",
3233               control->intercepts[INTERCEPT_WORD3],
3234                control->intercepts[INTERCEPT_WORD4]);
3235         pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
3236         pr_err("%-20s%d\n", "pause filter threshold:",
3237                control->pause_filter_thresh);
3238         pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
3239         pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
3240         pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
3241         pr_err("%-20s%d\n", "asid:", control->asid);
3242         pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
3243         pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
3244         pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
3245         pr_err("%-20s%08x\n", "int_state:", control->int_state);
3246         pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
3247         pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
3248         pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
3249         pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
3250         pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
3251         pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
3252         pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
3253         pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
3254         pr_err("%-20s%016llx\n", "ghcb:", control->ghcb_gpa);
3255         pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
3256         pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
3257         pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
3258         pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
3259         pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
3260         pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
3261         pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
3262         pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa);
3263         pr_err("VMCB State Save Area:\n");
3264         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3265                "es:",
3266                save->es.selector, save->es.attrib,
3267                save->es.limit, save->es.base);
3268         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3269                "cs:",
3270                save->cs.selector, save->cs.attrib,
3271                save->cs.limit, save->cs.base);
3272         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3273                "ss:",
3274                save->ss.selector, save->ss.attrib,
3275                save->ss.limit, save->ss.base);
3276         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3277                "ds:",
3278                save->ds.selector, save->ds.attrib,
3279                save->ds.limit, save->ds.base);
3280         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3281                "fs:",
3282                save01->fs.selector, save01->fs.attrib,
3283                save01->fs.limit, save01->fs.base);
3284         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3285                "gs:",
3286                save01->gs.selector, save01->gs.attrib,
3287                save01->gs.limit, save01->gs.base);
3288         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3289                "gdtr:",
3290                save->gdtr.selector, save->gdtr.attrib,
3291                save->gdtr.limit, save->gdtr.base);
3292         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3293                "ldtr:",
3294                save01->ldtr.selector, save01->ldtr.attrib,
3295                save01->ldtr.limit, save01->ldtr.base);
3296         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3297                "idtr:",
3298                save->idtr.selector, save->idtr.attrib,
3299                save->idtr.limit, save->idtr.base);
3300         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3301                "tr:",
3302                save01->tr.selector, save01->tr.attrib,
3303                save01->tr.limit, save01->tr.base);
3304         pr_err("vmpl: %d   cpl:  %d               efer:          %016llx\n",
3305                save->vmpl, save->cpl, save->efer);
3306         pr_err("%-15s %016llx %-13s %016llx\n",
3307                "cr0:", save->cr0, "cr2:", save->cr2);
3308         pr_err("%-15s %016llx %-13s %016llx\n",
3309                "cr3:", save->cr3, "cr4:", save->cr4);
3310         pr_err("%-15s %016llx %-13s %016llx\n",
3311                "dr6:", save->dr6, "dr7:", save->dr7);
3312         pr_err("%-15s %016llx %-13s %016llx\n",
3313                "rip:", save->rip, "rflags:", save->rflags);
3314         pr_err("%-15s %016llx %-13s %016llx\n",
3315                "rsp:", save->rsp, "rax:", save->rax);
3316         pr_err("%-15s %016llx %-13s %016llx\n",
3317                "star:", save01->star, "lstar:", save01->lstar);
3318         pr_err("%-15s %016llx %-13s %016llx\n",
3319                "cstar:", save01->cstar, "sfmask:", save01->sfmask);
3320         pr_err("%-15s %016llx %-13s %016llx\n",
3321                "kernel_gs_base:", save01->kernel_gs_base,
3322                "sysenter_cs:", save01->sysenter_cs);
3323         pr_err("%-15s %016llx %-13s %016llx\n",
3324                "sysenter_esp:", save01->sysenter_esp,
3325                "sysenter_eip:", save01->sysenter_eip);
3326         pr_err("%-15s %016llx %-13s %016llx\n",
3327                "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
3328         pr_err("%-15s %016llx %-13s %016llx\n",
3329                "br_from:", save->br_from, "br_to:", save->br_to);
3330         pr_err("%-15s %016llx %-13s %016llx\n",
3331                "excp_from:", save->last_excp_from,
3332                "excp_to:", save->last_excp_to);
3333 }
3334
3335 static bool svm_check_exit_valid(u64 exit_code)
3336 {
3337         return (exit_code < ARRAY_SIZE(svm_exit_handlers) &&
3338                 svm_exit_handlers[exit_code]);
3339 }
3340
3341 static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code)
3342 {
3343         vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%llx\n", exit_code);
3344         dump_vmcb(vcpu);
3345         vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3346         vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
3347         vcpu->run->internal.ndata = 2;
3348         vcpu->run->internal.data[0] = exit_code;
3349         vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
3350         return 0;
3351 }
3352
3353 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code)
3354 {
3355         if (!svm_check_exit_valid(exit_code))
3356                 return svm_handle_invalid_exit(vcpu, exit_code);
3357
3358 #ifdef CONFIG_RETPOLINE
3359         if (exit_code == SVM_EXIT_MSR)
3360                 return msr_interception(vcpu);
3361         else if (exit_code == SVM_EXIT_VINTR)
3362                 return interrupt_window_interception(vcpu);
3363         else if (exit_code == SVM_EXIT_INTR)
3364                 return intr_interception(vcpu);
3365         else if (exit_code == SVM_EXIT_HLT)
3366                 return kvm_emulate_halt(vcpu);
3367         else if (exit_code == SVM_EXIT_NPF)
3368                 return npf_interception(vcpu);
3369 #endif
3370         return svm_exit_handlers[exit_code](vcpu);
3371 }
3372
3373 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
3374                               u64 *info1, u64 *info2,
3375                               u32 *intr_info, u32 *error_code)
3376 {
3377         struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3378
3379         *reason = control->exit_code;
3380         *info1 = control->exit_info_1;
3381         *info2 = control->exit_info_2;
3382         *intr_info = control->exit_int_info;
3383         if ((*intr_info & SVM_EXITINTINFO_VALID) &&
3384             (*intr_info & SVM_EXITINTINFO_VALID_ERR))
3385                 *error_code = control->exit_int_info_err;
3386         else
3387                 *error_code = 0;
3388 }
3389
3390 static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
3391 {
3392         struct vcpu_svm *svm = to_svm(vcpu);
3393         struct kvm_run *kvm_run = vcpu->run;
3394         u32 exit_code = svm->vmcb->control.exit_code;
3395
3396         trace_kvm_exit(vcpu, KVM_ISA_SVM);
3397
3398         /* SEV-ES guests must use the CR write traps to track CR registers. */
3399         if (!sev_es_guest(vcpu->kvm)) {
3400                 if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE))
3401                         vcpu->arch.cr0 = svm->vmcb->save.cr0;
3402                 if (npt_enabled)
3403                         vcpu->arch.cr3 = svm->vmcb->save.cr3;
3404         }
3405
3406         if (is_guest_mode(vcpu)) {
3407                 int vmexit;
3408
3409                 trace_kvm_nested_vmexit(vcpu, KVM_ISA_SVM);
3410
3411                 vmexit = nested_svm_exit_special(svm);
3412
3413                 if (vmexit == NESTED_EXIT_CONTINUE)
3414                         vmexit = nested_svm_exit_handled(svm);
3415
3416                 if (vmexit == NESTED_EXIT_DONE)
3417                         return 1;
3418         }
3419
3420         if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
3421                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3422                 kvm_run->fail_entry.hardware_entry_failure_reason
3423                         = svm->vmcb->control.exit_code;
3424                 kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
3425                 dump_vmcb(vcpu);
3426                 return 0;
3427         }
3428
3429         if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
3430             exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
3431             exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
3432             exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
3433                 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
3434                        "exit_code 0x%x\n",
3435                        __func__, svm->vmcb->control.exit_int_info,
3436                        exit_code);
3437
3438         if (exit_fastpath != EXIT_FASTPATH_NONE)
3439                 return 1;
3440
3441         return svm_invoke_exit_handler(vcpu, exit_code);
3442 }
3443
3444 static void reload_tss(struct kvm_vcpu *vcpu)
3445 {
3446         struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
3447
3448         sd->tss_desc->type = 9; /* available 32/64-bit TSS */
3449         load_TR_desc();
3450 }
3451
3452 static void pre_svm_run(struct kvm_vcpu *vcpu)
3453 {
3454         struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
3455         struct vcpu_svm *svm = to_svm(vcpu);
3456
3457         /*
3458          * If the previous vmrun of the vmcb occurred on a different physical
3459          * cpu, then mark the vmcb dirty and assign a new asid.  Hardware's
3460          * vmcb clean bits are per logical CPU, as are KVM's asid assignments.
3461          */
3462         if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) {
3463                 svm->current_vmcb->asid_generation = 0;
3464                 vmcb_mark_all_dirty(svm->vmcb);
3465                 svm->current_vmcb->cpu = vcpu->cpu;
3466         }
3467
3468         if (sev_guest(vcpu->kvm))
3469                 return pre_sev_run(svm, vcpu->cpu);
3470
3471         /* FIXME: handle wraparound of asid_generation */
3472         if (svm->current_vmcb->asid_generation != sd->asid_generation)
3473                 new_asid(svm, sd);
3474 }
3475
3476 static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3477 {
3478         struct vcpu_svm *svm = to_svm(vcpu);
3479
3480         svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3481
3482         if (svm->nmi_l1_to_l2)
3483                 return;
3484
3485         vcpu->arch.hflags |= HF_NMI_MASK;
3486         if (!sev_es_guest(vcpu->kvm))
3487                 svm_set_intercept(svm, INTERCEPT_IRET);
3488         ++vcpu->stat.nmi_injections;
3489 }
3490
3491 static void svm_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
3492 {
3493         struct vcpu_svm *svm = to_svm(vcpu);
3494         u32 type;
3495
3496         if (vcpu->arch.interrupt.soft) {
3497                 if (svm_update_soft_interrupt_rip(vcpu))
3498                         return;
3499
3500                 type = SVM_EVTINJ_TYPE_SOFT;
3501         } else {
3502                 type = SVM_EVTINJ_TYPE_INTR;
3503         }
3504
3505         trace_kvm_inj_virq(vcpu->arch.interrupt.nr,
3506                            vcpu->arch.interrupt.soft, reinjected);
3507         ++vcpu->stat.irq_injections;
3508
3509         svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3510                                        SVM_EVTINJ_VALID | type;
3511 }
3512
3513 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
3514                                      int trig_mode, int vector)
3515 {
3516         /*
3517          * apic->apicv_active must be read after vcpu->mode.
3518          * Pairs with smp_store_release in vcpu_enter_guest.
3519          */
3520         bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE);
3521
3522         /* Note, this is called iff the local APIC is in-kernel. */
3523         if (!READ_ONCE(vcpu->arch.apic->apicv_active)) {
3524                 /* Process the interrupt via kvm_check_and_inject_events(). */
3525                 kvm_make_request(KVM_REQ_EVENT, vcpu);
3526                 kvm_vcpu_kick(vcpu);
3527                 return;
3528         }
3529
3530         trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector);
3531         if (in_guest_mode) {
3532                 /*
3533                  * Signal the doorbell to tell hardware to inject the IRQ.  If
3534                  * the vCPU exits the guest before the doorbell chimes, hardware
3535                  * will automatically process AVIC interrupts at the next VMRUN.
3536                  */
3537                 avic_ring_doorbell(vcpu);
3538         } else {
3539                 /*
3540                  * Wake the vCPU if it was blocking.  KVM will then detect the
3541                  * pending IRQ when checking if the vCPU has a wake event.
3542                  */
3543                 kvm_vcpu_wake_up(vcpu);
3544         }
3545 }
3546
3547 static void svm_deliver_interrupt(struct kvm_lapic *apic,  int delivery_mode,
3548                                   int trig_mode, int vector)
3549 {
3550         kvm_lapic_set_irr(vector, apic);
3551
3552         /*
3553          * Pairs with the smp_mb_*() after setting vcpu->guest_mode in
3554          * vcpu_enter_guest() to ensure the write to the vIRR is ordered before
3555          * the read of guest_mode.  This guarantees that either VMRUN will see
3556          * and process the new vIRR entry, or that svm_complete_interrupt_delivery
3557          * will signal the doorbell if the CPU has already entered the guest.
3558          */
3559         smp_mb__after_atomic();
3560         svm_complete_interrupt_delivery(apic->vcpu, delivery_mode, trig_mode, vector);
3561 }
3562
3563 static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3564 {
3565         struct vcpu_svm *svm = to_svm(vcpu);
3566
3567         /*
3568          * SEV-ES guests must always keep the CR intercepts cleared. CR
3569          * tracking is done using the CR write traps.
3570          */
3571         if (sev_es_guest(vcpu->kvm))
3572                 return;
3573
3574         if (nested_svm_virtualize_tpr(vcpu))
3575                 return;
3576
3577         svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
3578
3579         if (irr == -1)
3580                 return;
3581
3582         if (tpr >= irr)
3583                 svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
3584 }
3585
3586 bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
3587 {
3588         struct vcpu_svm *svm = to_svm(vcpu);
3589         struct vmcb *vmcb = svm->vmcb;
3590         bool ret;
3591
3592         if (!gif_set(svm))
3593                 return true;
3594
3595         if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
3596                 return false;
3597
3598         ret = (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
3599               (vcpu->arch.hflags & HF_NMI_MASK);
3600
3601         return ret;
3602 }
3603
3604 static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
3605 {
3606         struct vcpu_svm *svm = to_svm(vcpu);
3607         if (svm->nested.nested_run_pending)
3608                 return -EBUSY;
3609
3610         if (svm_nmi_blocked(vcpu))
3611                 return 0;
3612
3613         /* An NMI must not be injected into L2 if it's supposed to VM-Exit.  */
3614         if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
3615                 return -EBUSY;
3616         return 1;
3617 }
3618
3619 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3620 {
3621         return !!(vcpu->arch.hflags & HF_NMI_MASK);
3622 }
3623
3624 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3625 {
3626         struct vcpu_svm *svm = to_svm(vcpu);
3627
3628         if (masked) {
3629                 vcpu->arch.hflags |= HF_NMI_MASK;
3630                 if (!sev_es_guest(vcpu->kvm))
3631                         svm_set_intercept(svm, INTERCEPT_IRET);
3632         } else {
3633                 vcpu->arch.hflags &= ~HF_NMI_MASK;
3634                 if (!sev_es_guest(vcpu->kvm))
3635                         svm_clr_intercept(svm, INTERCEPT_IRET);
3636         }
3637 }
3638
3639 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
3640 {
3641         struct vcpu_svm *svm = to_svm(vcpu);
3642         struct vmcb *vmcb = svm->vmcb;
3643
3644         if (!gif_set(svm))
3645                 return true;
3646
3647         if (is_guest_mode(vcpu)) {
3648                 /* As long as interrupts are being delivered...  */
3649                 if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
3650                     ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF)
3651                     : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
3652                         return true;
3653
3654                 /* ... vmexits aren't blocked by the interrupt shadow  */
3655                 if (nested_exit_on_intr(svm))
3656                         return false;
3657         } else {
3658                 if (!svm_get_if_flag(vcpu))
3659                         return true;
3660         }
3661
3662         return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK);
3663 }
3664
3665 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
3666 {
3667         struct vcpu_svm *svm = to_svm(vcpu);
3668
3669         if (svm->nested.nested_run_pending)
3670                 return -EBUSY;
3671
3672         if (svm_interrupt_blocked(vcpu))
3673                 return 0;
3674
3675         /*
3676          * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
3677          * e.g. if the IRQ arrived asynchronously after checking nested events.
3678          */
3679         if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
3680                 return -EBUSY;
3681
3682         return 1;
3683 }
3684
3685 static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
3686 {
3687         struct vcpu_svm *svm = to_svm(vcpu);
3688
3689         /*
3690          * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3691          * 1, because that's a separate STGI/VMRUN intercept.  The next time we
3692          * get that intercept, this function will be called again though and
3693          * we'll get the vintr intercept. However, if the vGIF feature is
3694          * enabled, the STGI interception will not occur. Enable the irq
3695          * window under the assumption that the hardware will set the GIF.
3696          */
3697         if (vgif || gif_set(svm)) {
3698                 /*
3699                  * IRQ window is not needed when AVIC is enabled,
3700                  * unless we have pending ExtINT since it cannot be injected
3701                  * via AVIC. In such case, KVM needs to temporarily disable AVIC,
3702                  * and fallback to injecting IRQ via V_IRQ.
3703                  *
3704                  * If running nested, AVIC is already locally inhibited
3705                  * on this vCPU, therefore there is no need to request
3706                  * the VM wide AVIC inhibition.
3707                  */
3708                 if (!is_guest_mode(vcpu))
3709                         kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
3710
3711                 svm_set_vintr(svm);
3712         }
3713 }
3714
3715 static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
3716 {
3717         struct vcpu_svm *svm = to_svm(vcpu);
3718
3719         if ((vcpu->arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) == HF_NMI_MASK)
3720                 return; /* IRET will cause a vm exit */
3721
3722         if (!gif_set(svm)) {
3723                 if (vgif)
3724                         svm_set_intercept(svm, INTERCEPT_STGI);
3725                 return; /* STGI will cause a vm exit */
3726         }
3727
3728         /*
3729          * Something prevents NMI from been injected. Single step over possible
3730          * problem (IRET or exception injection or interrupt shadow)
3731          */
3732         svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
3733         svm->nmi_singlestep = true;
3734         svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3735 }
3736
3737 static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
3738 {
3739         struct vcpu_svm *svm = to_svm(vcpu);
3740
3741         /*
3742          * Flush only the current ASID even if the TLB flush was invoked via
3743          * kvm_flush_remote_tlbs().  Although flushing remote TLBs requires all
3744          * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and
3745          * unconditionally does a TLB flush on both nested VM-Enter and nested
3746          * VM-Exit (via kvm_mmu_reset_context()).
3747          */
3748         if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
3749                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3750         else
3751                 svm->current_vmcb->asid_generation--;
3752 }
3753
3754 static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
3755 {
3756         struct vcpu_svm *svm = to_svm(vcpu);
3757
3758         invlpga(gva, svm->vmcb->control.asid);
3759 }
3760
3761 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3762 {
3763         struct vcpu_svm *svm = to_svm(vcpu);
3764
3765         if (nested_svm_virtualize_tpr(vcpu))
3766                 return;
3767
3768         if (!svm_is_intercept(svm, INTERCEPT_CR8_WRITE)) {
3769                 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
3770                 kvm_set_cr8(vcpu, cr8);
3771         }
3772 }
3773
3774 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
3775 {
3776         struct vcpu_svm *svm = to_svm(vcpu);
3777         u64 cr8;
3778
3779         if (nested_svm_virtualize_tpr(vcpu) ||
3780             kvm_vcpu_apicv_active(vcpu))
3781                 return;
3782
3783         cr8 = kvm_get_cr8(vcpu);
3784         svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
3785         svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
3786 }
3787
3788 static void svm_complete_soft_interrupt(struct kvm_vcpu *vcpu, u8 vector,
3789                                         int type)
3790 {
3791         bool is_exception = (type == SVM_EXITINTINFO_TYPE_EXEPT);
3792         bool is_soft = (type == SVM_EXITINTINFO_TYPE_SOFT);
3793         struct vcpu_svm *svm = to_svm(vcpu);
3794
3795         /*
3796          * If NRIPS is enabled, KVM must snapshot the pre-VMRUN next_rip that's
3797          * associated with the original soft exception/interrupt.  next_rip is
3798          * cleared on all exits that can occur while vectoring an event, so KVM
3799          * needs to manually set next_rip for re-injection.  Unlike the !nrips
3800          * case below, this needs to be done if and only if KVM is re-injecting
3801          * the same event, i.e. if the event is a soft exception/interrupt,
3802          * otherwise next_rip is unused on VMRUN.
3803          */
3804         if (nrips && (is_soft || (is_exception && kvm_exception_is_soft(vector))) &&
3805             kvm_is_linear_rip(vcpu, svm->soft_int_old_rip + svm->soft_int_csbase))
3806                 svm->vmcb->control.next_rip = svm->soft_int_next_rip;
3807         /*
3808          * If NRIPS isn't enabled, KVM must manually advance RIP prior to
3809          * injecting the soft exception/interrupt.  That advancement needs to
3810          * be unwound if vectoring didn't complete.  Note, the new event may
3811          * not be the injected event, e.g. if KVM injected an INTn, the INTn
3812          * hit a #NP in the guest, and the #NP encountered a #PF, the #NP will
3813          * be the reported vectored event, but RIP still needs to be unwound.
3814          */
3815         else if (!nrips && (is_soft || is_exception) &&
3816                  kvm_is_linear_rip(vcpu, svm->soft_int_next_rip + svm->soft_int_csbase))
3817                 kvm_rip_write(vcpu, svm->soft_int_old_rip);
3818 }
3819
3820 static void svm_complete_interrupts(struct kvm_vcpu *vcpu)
3821 {
3822         struct vcpu_svm *svm = to_svm(vcpu);
3823         u8 vector;
3824         int type;
3825         u32 exitintinfo = svm->vmcb->control.exit_int_info;
3826         bool nmi_l1_to_l2 = svm->nmi_l1_to_l2;
3827         bool soft_int_injected = svm->soft_int_injected;
3828
3829         svm->nmi_l1_to_l2 = false;
3830         svm->soft_int_injected = false;
3831
3832         /*
3833          * If we've made progress since setting HF_IRET_MASK, we've
3834          * executed an IRET and can allow NMI injection.
3835          */
3836         if ((vcpu->arch.hflags & HF_IRET_MASK) &&
3837             (sev_es_guest(vcpu->kvm) ||
3838              kvm_rip_read(vcpu) != svm->nmi_iret_rip)) {
3839                 vcpu->arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
3840                 kvm_make_request(KVM_REQ_EVENT, vcpu);
3841         }
3842
3843         vcpu->arch.nmi_injected = false;
3844         kvm_clear_exception_queue(vcpu);
3845         kvm_clear_interrupt_queue(vcpu);
3846
3847         if (!(exitintinfo & SVM_EXITINTINFO_VALID))
3848                 return;
3849
3850         kvm_make_request(KVM_REQ_EVENT, vcpu);
3851
3852         vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
3853         type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
3854
3855         if (soft_int_injected)
3856                 svm_complete_soft_interrupt(vcpu, vector, type);
3857
3858         switch (type) {
3859         case SVM_EXITINTINFO_TYPE_NMI:
3860                 vcpu->arch.nmi_injected = true;
3861                 svm->nmi_l1_to_l2 = nmi_l1_to_l2;
3862                 break;
3863         case SVM_EXITINTINFO_TYPE_EXEPT:
3864                 /*
3865                  * Never re-inject a #VC exception.
3866                  */
3867                 if (vector == X86_TRAP_VC)
3868                         break;
3869
3870                 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
3871                         u32 err = svm->vmcb->control.exit_int_info_err;
3872                         kvm_requeue_exception_e(vcpu, vector, err);
3873
3874                 } else
3875                         kvm_requeue_exception(vcpu, vector);
3876                 break;
3877         case SVM_EXITINTINFO_TYPE_INTR:
3878                 kvm_queue_interrupt(vcpu, vector, false);
3879                 break;
3880         case SVM_EXITINTINFO_TYPE_SOFT:
3881                 kvm_queue_interrupt(vcpu, vector, true);
3882                 break;
3883         default:
3884                 break;
3885         }
3886
3887 }
3888
3889 static void svm_cancel_injection(struct kvm_vcpu *vcpu)
3890 {
3891         struct vcpu_svm *svm = to_svm(vcpu);
3892         struct vmcb_control_area *control = &svm->vmcb->control;
3893
3894         control->exit_int_info = control->event_inj;
3895         control->exit_int_info_err = control->event_inj_err;
3896         control->event_inj = 0;
3897         svm_complete_interrupts(vcpu);
3898 }
3899
3900 static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
3901 {
3902         return 1;
3903 }
3904
3905 static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
3906 {
3907         if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
3908             to_svm(vcpu)->vmcb->control.exit_info_1)
3909                 return handle_fastpath_set_msr_irqoff(vcpu);
3910
3911         return EXIT_FASTPATH_NONE;
3912 }
3913
3914 static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
3915 {
3916         struct vcpu_svm *svm = to_svm(vcpu);
3917         unsigned long vmcb_pa = svm->current_vmcb->pa;
3918
3919         guest_state_enter_irqoff();
3920
3921         if (sev_es_guest(vcpu->kvm)) {
3922                 __svm_sev_es_vcpu_run(vmcb_pa);
3923         } else {
3924                 struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
3925
3926                 /*
3927                  * Use a single vmcb (vmcb01 because it's always valid) for
3928                  * context switching guest state via VMLOAD/VMSAVE, that way
3929                  * the state doesn't need to be copied between vmcb01 and
3930                  * vmcb02 when switching vmcbs for nested virtualization.
3931                  */
3932                 vmload(svm->vmcb01.pa);
3933                 __svm_vcpu_run(vmcb_pa, (unsigned long *)&vcpu->arch.regs);
3934                 vmsave(svm->vmcb01.pa);
3935
3936                 vmload(__sme_page_pa(sd->save_area));
3937         }
3938
3939         guest_state_exit_irqoff();
3940 }
3941
3942 static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
3943 {
3944         struct vcpu_svm *svm = to_svm(vcpu);
3945
3946         trace_kvm_entry(vcpu);
3947
3948         svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3949         svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3950         svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3951
3952         /*
3953          * Disable singlestep if we're injecting an interrupt/exception.
3954          * We don't want our modified rflags to be pushed on the stack where
3955          * we might not be able to easily reset them if we disabled NMI
3956          * singlestep later.
3957          */
3958         if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
3959                 /*
3960                  * Event injection happens before external interrupts cause a
3961                  * vmexit and interrupts are disabled here, so smp_send_reschedule
3962                  * is enough to force an immediate vmexit.
3963                  */
3964                 disable_nmi_singlestep(svm);
3965                 smp_send_reschedule(vcpu->cpu);
3966         }
3967
3968         pre_svm_run(vcpu);
3969
3970         sync_lapic_to_cr8(vcpu);
3971
3972         if (unlikely(svm->asid != svm->vmcb->control.asid)) {
3973                 svm->vmcb->control.asid = svm->asid;
3974                 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
3975         }
3976         svm->vmcb->save.cr2 = vcpu->arch.cr2;
3977
3978         svm_hv_update_vp_id(svm->vmcb, vcpu);
3979
3980         /*
3981          * Run with all-zero DR6 unless needed, so that we can get the exact cause
3982          * of a #DB.
3983          */
3984         if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
3985                 svm_set_dr6(svm, vcpu->arch.dr6);
3986         else
3987                 svm_set_dr6(svm, DR6_ACTIVE_LOW);
3988
3989         clgi();
3990         kvm_load_guest_xsave_state(vcpu);
3991
3992         kvm_wait_lapic_expire(vcpu);
3993
3994         /*
3995          * If this vCPU has touched SPEC_CTRL, restore the guest's value if
3996          * it's non-zero. Since vmentry is serialising on affected CPUs, there
3997          * is no need to worry about the conditional branch over the wrmsr
3998          * being speculatively taken.
3999          */
4000         if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
4001                 x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
4002
4003         svm_vcpu_enter_exit(vcpu);
4004
4005         /*
4006          * We do not use IBRS in the kernel. If this vCPU has used the
4007          * SPEC_CTRL MSR it may have left it on; save the value and
4008          * turn it off. This is much more efficient than blindly adding
4009          * it to the atomic save/restore list. Especially as the former
4010          * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
4011          *
4012          * For non-nested case:
4013          * If the L01 MSR bitmap does not intercept the MSR, then we need to
4014          * save it.
4015          *
4016          * For nested case:
4017          * If the L02 MSR bitmap does not intercept the MSR, then we need to
4018          * save it.
4019          */
4020         if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL) &&
4021             unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
4022                 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
4023
4024         if (!sev_es_guest(vcpu->kvm))
4025                 reload_tss(vcpu);
4026
4027         if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
4028                 x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
4029
4030         if (!sev_es_guest(vcpu->kvm)) {
4031                 vcpu->arch.cr2 = svm->vmcb->save.cr2;
4032                 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
4033                 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
4034                 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
4035         }
4036         vcpu->arch.regs_dirty = 0;
4037
4038         if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4039                 kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
4040
4041         kvm_load_host_xsave_state(vcpu);
4042         stgi();
4043
4044         /* Any pending NMI will happen here */
4045
4046         if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4047                 kvm_after_interrupt(vcpu);
4048
4049         sync_cr8_to_lapic(vcpu);
4050
4051         svm->next_rip = 0;
4052         if (is_guest_mode(vcpu)) {
4053                 nested_sync_control_from_vmcb02(svm);
4054
4055                 /* Track VMRUNs that have made past consistency checking */
4056                 if (svm->nested.nested_run_pending &&
4057                     svm->vmcb->control.exit_code != SVM_EXIT_ERR)
4058                         ++vcpu->stat.nested_run;
4059
4060                 svm->nested.nested_run_pending = 0;
4061         }
4062
4063         svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
4064         vmcb_mark_all_clean(svm->vmcb);
4065
4066         /* if exit due to PF check for async PF */
4067         if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
4068                 vcpu->arch.apf.host_apf_flags =
4069                         kvm_read_and_reset_apf_flags();
4070
4071         vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET;
4072
4073         /*
4074          * We need to handle MC intercepts here before the vcpu has a chance to
4075          * change the physical cpu
4076          */
4077         if (unlikely(svm->vmcb->control.exit_code ==
4078                      SVM_EXIT_EXCP_BASE + MC_VECTOR))
4079                 svm_handle_mce(vcpu);
4080
4081         svm_complete_interrupts(vcpu);
4082
4083         if (is_guest_mode(vcpu))
4084                 return EXIT_FASTPATH_NONE;
4085
4086         return svm_exit_handlers_fastpath(vcpu);
4087 }
4088
4089 static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
4090                              int root_level)
4091 {
4092         struct vcpu_svm *svm = to_svm(vcpu);
4093         unsigned long cr3;
4094
4095         if (npt_enabled) {
4096                 svm->vmcb->control.nested_cr3 = __sme_set(root_hpa);
4097                 vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
4098
4099                 hv_track_root_tdp(vcpu, root_hpa);
4100
4101                 cr3 = vcpu->arch.cr3;
4102         } else if (root_level >= PT64_ROOT_4LEVEL) {
4103                 cr3 = __sme_set(root_hpa) | kvm_get_active_pcid(vcpu);
4104         } else {
4105                 /* PCID in the guest should be impossible with a 32-bit MMU. */
4106                 WARN_ON_ONCE(kvm_get_active_pcid(vcpu));
4107                 cr3 = root_hpa;
4108         }
4109
4110         svm->vmcb->save.cr3 = cr3;
4111         vmcb_mark_dirty(svm->vmcb, VMCB_CR);
4112 }
4113
4114 static int is_disabled(void)
4115 {
4116         u64 vm_cr;
4117
4118         rdmsrl(MSR_VM_CR, vm_cr);
4119         if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
4120                 return 1;
4121
4122         return 0;
4123 }
4124
4125 static void
4126 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
4127 {
4128         /*
4129          * Patch in the VMMCALL instruction:
4130          */
4131         hypercall[0] = 0x0f;
4132         hypercall[1] = 0x01;
4133         hypercall[2] = 0xd9;
4134 }
4135
4136 static int __init svm_check_processor_compat(void)
4137 {
4138         return 0;
4139 }
4140
4141 /*
4142  * The kvm parameter can be NULL (module initialization, or invocation before
4143  * VM creation). Be sure to check the kvm parameter before using it.
4144  */
4145 static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
4146 {
4147         switch (index) {
4148         case MSR_IA32_MCG_EXT_CTL:
4149         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
4150                 return false;
4151         case MSR_IA32_SMBASE:
4152                 /* SEV-ES guests do not support SMM, so report false */
4153                 if (kvm && sev_es_guest(kvm))
4154                         return false;
4155                 break;
4156         default:
4157                 break;
4158         }
4159
4160         return true;
4161 }
4162
4163 static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
4164 {
4165         struct vcpu_svm *svm = to_svm(vcpu);
4166         struct kvm_cpuid_entry2 *best;
4167
4168         vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
4169                                     boot_cpu_has(X86_FEATURE_XSAVE) &&
4170                                     boot_cpu_has(X86_FEATURE_XSAVES);
4171
4172         /* Update nrips enabled cache */
4173         svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
4174                              guest_cpuid_has(vcpu, X86_FEATURE_NRIPS);
4175
4176         svm->tsc_scaling_enabled = tsc_scaling && guest_cpuid_has(vcpu, X86_FEATURE_TSCRATEMSR);
4177         svm->lbrv_enabled = lbrv && guest_cpuid_has(vcpu, X86_FEATURE_LBRV);
4178
4179         svm->v_vmload_vmsave_enabled = vls && guest_cpuid_has(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
4180
4181         svm->pause_filter_enabled = kvm_cpu_cap_has(X86_FEATURE_PAUSEFILTER) &&
4182                         guest_cpuid_has(vcpu, X86_FEATURE_PAUSEFILTER);
4183
4184         svm->pause_threshold_enabled = kvm_cpu_cap_has(X86_FEATURE_PFTHRESHOLD) &&
4185                         guest_cpuid_has(vcpu, X86_FEATURE_PFTHRESHOLD);
4186
4187         svm->vgif_enabled = vgif && guest_cpuid_has(vcpu, X86_FEATURE_VGIF);
4188
4189         svm_recalc_instruction_intercepts(vcpu, svm);
4190
4191         /* For sev guests, the memory encryption bit is not reserved in CR3.  */
4192         if (sev_guest(vcpu->kvm)) {
4193                 best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
4194                 if (best)
4195                         vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
4196         }
4197
4198         init_vmcb_after_set_cpuid(vcpu);
4199 }
4200
4201 static bool svm_has_wbinvd_exit(void)
4202 {
4203         return true;
4204 }
4205
4206 #define PRE_EX(exit)  { .exit_code = (exit), \
4207                         .stage = X86_ICPT_PRE_EXCEPT, }
4208 #define POST_EX(exit) { .exit_code = (exit), \
4209                         .stage = X86_ICPT_POST_EXCEPT, }
4210 #define POST_MEM(exit) { .exit_code = (exit), \
4211                         .stage = X86_ICPT_POST_MEMACCESS, }
4212
4213 static const struct __x86_intercept {
4214         u32 exit_code;
4215         enum x86_intercept_stage stage;
4216 } x86_intercept_map[] = {
4217         [x86_intercept_cr_read]         = POST_EX(SVM_EXIT_READ_CR0),
4218         [x86_intercept_cr_write]        = POST_EX(SVM_EXIT_WRITE_CR0),
4219         [x86_intercept_clts]            = POST_EX(SVM_EXIT_WRITE_CR0),
4220         [x86_intercept_lmsw]            = POST_EX(SVM_EXIT_WRITE_CR0),
4221         [x86_intercept_smsw]            = POST_EX(SVM_EXIT_READ_CR0),
4222         [x86_intercept_dr_read]         = POST_EX(SVM_EXIT_READ_DR0),
4223         [x86_intercept_dr_write]        = POST_EX(SVM_EXIT_WRITE_DR0),
4224         [x86_intercept_sldt]            = POST_EX(SVM_EXIT_LDTR_READ),
4225         [x86_intercept_str]             = POST_EX(SVM_EXIT_TR_READ),
4226         [x86_intercept_lldt]            = POST_EX(SVM_EXIT_LDTR_WRITE),
4227         [x86_intercept_ltr]             = POST_EX(SVM_EXIT_TR_WRITE),
4228         [x86_intercept_sgdt]            = POST_EX(SVM_EXIT_GDTR_READ),
4229         [x86_intercept_sidt]            = POST_EX(SVM_EXIT_IDTR_READ),
4230         [x86_intercept_lgdt]            = POST_EX(SVM_EXIT_GDTR_WRITE),
4231         [x86_intercept_lidt]            = POST_EX(SVM_EXIT_IDTR_WRITE),
4232         [x86_intercept_vmrun]           = POST_EX(SVM_EXIT_VMRUN),
4233         [x86_intercept_vmmcall]         = POST_EX(SVM_EXIT_VMMCALL),
4234         [x86_intercept_vmload]          = POST_EX(SVM_EXIT_VMLOAD),
4235         [x86_intercept_vmsave]          = POST_EX(SVM_EXIT_VMSAVE),
4236         [x86_intercept_stgi]            = POST_EX(SVM_EXIT_STGI),
4237         [x86_intercept_clgi]            = POST_EX(SVM_EXIT_CLGI),
4238         [x86_intercept_skinit]          = POST_EX(SVM_EXIT_SKINIT),
4239         [x86_intercept_invlpga]         = POST_EX(SVM_EXIT_INVLPGA),
4240         [x86_intercept_rdtscp]          = POST_EX(SVM_EXIT_RDTSCP),
4241         [x86_intercept_monitor]         = POST_MEM(SVM_EXIT_MONITOR),
4242         [x86_intercept_mwait]           = POST_EX(SVM_EXIT_MWAIT),
4243         [x86_intercept_invlpg]          = POST_EX(SVM_EXIT_INVLPG),
4244         [x86_intercept_invd]            = POST_EX(SVM_EXIT_INVD),
4245         [x86_intercept_wbinvd]          = POST_EX(SVM_EXIT_WBINVD),
4246         [x86_intercept_wrmsr]           = POST_EX(SVM_EXIT_MSR),
4247         [x86_intercept_rdtsc]           = POST_EX(SVM_EXIT_RDTSC),
4248         [x86_intercept_rdmsr]           = POST_EX(SVM_EXIT_MSR),
4249         [x86_intercept_rdpmc]           = POST_EX(SVM_EXIT_RDPMC),
4250         [x86_intercept_cpuid]           = PRE_EX(SVM_EXIT_CPUID),
4251         [x86_intercept_rsm]             = PRE_EX(SVM_EXIT_RSM),
4252         [x86_intercept_pause]           = PRE_EX(SVM_EXIT_PAUSE),
4253         [x86_intercept_pushf]           = PRE_EX(SVM_EXIT_PUSHF),
4254         [x86_intercept_popf]            = PRE_EX(SVM_EXIT_POPF),
4255         [x86_intercept_intn]            = PRE_EX(SVM_EXIT_SWINT),
4256         [x86_intercept_iret]            = PRE_EX(SVM_EXIT_IRET),
4257         [x86_intercept_icebp]           = PRE_EX(SVM_EXIT_ICEBP),
4258         [x86_intercept_hlt]             = POST_EX(SVM_EXIT_HLT),
4259         [x86_intercept_in]              = POST_EX(SVM_EXIT_IOIO),
4260         [x86_intercept_ins]             = POST_EX(SVM_EXIT_IOIO),
4261         [x86_intercept_out]             = POST_EX(SVM_EXIT_IOIO),
4262         [x86_intercept_outs]            = POST_EX(SVM_EXIT_IOIO),
4263         [x86_intercept_xsetbv]          = PRE_EX(SVM_EXIT_XSETBV),
4264 };
4265
4266 #undef PRE_EX
4267 #undef POST_EX
4268 #undef POST_MEM
4269
4270 static int svm_check_intercept(struct kvm_vcpu *vcpu,
4271                                struct x86_instruction_info *info,
4272                                enum x86_intercept_stage stage,
4273                                struct x86_exception *exception)
4274 {
4275         struct vcpu_svm *svm = to_svm(vcpu);
4276         int vmexit, ret = X86EMUL_CONTINUE;
4277         struct __x86_intercept icpt_info;
4278         struct vmcb *vmcb = svm->vmcb;
4279
4280         if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
4281                 goto out;
4282
4283         icpt_info = x86_intercept_map[info->intercept];
4284
4285         if (stage != icpt_info.stage)
4286                 goto out;
4287
4288         switch (icpt_info.exit_code) {
4289         case SVM_EXIT_READ_CR0:
4290                 if (info->intercept == x86_intercept_cr_read)
4291                         icpt_info.exit_code += info->modrm_reg;
4292                 break;
4293         case SVM_EXIT_WRITE_CR0: {
4294                 unsigned long cr0, val;
4295
4296                 if (info->intercept == x86_intercept_cr_write)
4297                         icpt_info.exit_code += info->modrm_reg;
4298
4299                 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
4300                     info->intercept == x86_intercept_clts)
4301                         break;
4302
4303                 if (!(vmcb12_is_intercept(&svm->nested.ctl,
4304                                         INTERCEPT_SELECTIVE_CR0)))
4305                         break;
4306
4307                 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
4308                 val = info->src_val  & ~SVM_CR0_SELECTIVE_MASK;
4309
4310                 if (info->intercept == x86_intercept_lmsw) {
4311                         cr0 &= 0xfUL;
4312                         val &= 0xfUL;
4313                         /* lmsw can't clear PE - catch this here */
4314                         if (cr0 & X86_CR0_PE)
4315                                 val |= X86_CR0_PE;
4316                 }
4317
4318                 if (cr0 ^ val)
4319                         icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
4320
4321                 break;
4322         }
4323         case SVM_EXIT_READ_DR0:
4324         case SVM_EXIT_WRITE_DR0:
4325                 icpt_info.exit_code += info->modrm_reg;
4326                 break;
4327         case SVM_EXIT_MSR:
4328                 if (info->intercept == x86_intercept_wrmsr)
4329                         vmcb->control.exit_info_1 = 1;
4330                 else
4331                         vmcb->control.exit_info_1 = 0;
4332                 break;
4333         case SVM_EXIT_PAUSE:
4334                 /*
4335                  * We get this for NOP only, but pause
4336                  * is rep not, check this here
4337                  */
4338                 if (info->rep_prefix != REPE_PREFIX)
4339                         goto out;
4340                 break;
4341         case SVM_EXIT_IOIO: {
4342                 u64 exit_info;
4343                 u32 bytes;
4344
4345                 if (info->intercept == x86_intercept_in ||
4346                     info->intercept == x86_intercept_ins) {
4347                         exit_info = ((info->src_val & 0xffff) << 16) |
4348                                 SVM_IOIO_TYPE_MASK;
4349                         bytes = info->dst_bytes;
4350                 } else {
4351                         exit_info = (info->dst_val & 0xffff) << 16;
4352                         bytes = info->src_bytes;
4353                 }
4354
4355                 if (info->intercept == x86_intercept_outs ||
4356                     info->intercept == x86_intercept_ins)
4357                         exit_info |= SVM_IOIO_STR_MASK;
4358
4359                 if (info->rep_prefix)
4360                         exit_info |= SVM_IOIO_REP_MASK;
4361
4362                 bytes = min(bytes, 4u);
4363
4364                 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
4365
4366                 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
4367
4368                 vmcb->control.exit_info_1 = exit_info;
4369                 vmcb->control.exit_info_2 = info->next_rip;
4370
4371                 break;
4372         }
4373         default:
4374                 break;
4375         }
4376
4377         /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
4378         if (static_cpu_has(X86_FEATURE_NRIPS))
4379                 vmcb->control.next_rip  = info->next_rip;
4380         vmcb->control.exit_code = icpt_info.exit_code;
4381         vmexit = nested_svm_exit_handled(svm);
4382
4383         ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
4384                                            : X86EMUL_CONTINUE;
4385
4386 out:
4387         return ret;
4388 }
4389
4390 static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
4391 {
4392         if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
4393                 vcpu->arch.at_instruction_boundary = true;
4394 }
4395
4396 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
4397 {
4398         if (!kvm_pause_in_guest(vcpu->kvm))
4399                 shrink_ple_window(vcpu);
4400 }
4401
4402 static void svm_setup_mce(struct kvm_vcpu *vcpu)
4403 {
4404         /* [63:9] are reserved. */
4405         vcpu->arch.mcg_cap &= 0x1ff;
4406 }
4407
4408 bool svm_smi_blocked(struct kvm_vcpu *vcpu)
4409 {
4410         struct vcpu_svm *svm = to_svm(vcpu);
4411
4412         /* Per APM Vol.2 15.22.2 "Response to SMI" */
4413         if (!gif_set(svm))
4414                 return true;
4415
4416         return is_smm(vcpu);
4417 }
4418
4419 static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
4420 {
4421         struct vcpu_svm *svm = to_svm(vcpu);
4422         if (svm->nested.nested_run_pending)
4423                 return -EBUSY;
4424
4425         if (svm_smi_blocked(vcpu))
4426                 return 0;
4427
4428         /* An SMI must not be injected into L2 if it's supposed to VM-Exit.  */
4429         if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
4430                 return -EBUSY;
4431
4432         return 1;
4433 }
4434
4435 static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
4436 {
4437         struct vcpu_svm *svm = to_svm(vcpu);
4438         struct kvm_host_map map_save;
4439         int ret;
4440
4441         if (!is_guest_mode(vcpu))
4442                 return 0;
4443
4444         /* FED8h - SVM Guest */
4445         put_smstate(u64, smstate, 0x7ed8, 1);
4446         /* FEE0h - SVM Guest VMCB Physical Address */
4447         put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
4448
4449         svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4450         svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4451         svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4452
4453         ret = nested_svm_simple_vmexit(svm, SVM_EXIT_SW);
4454         if (ret)
4455                 return ret;
4456
4457         /*
4458          * KVM uses VMCB01 to store L1 host state while L2 runs but
4459          * VMCB01 is going to be used during SMM and thus the state will
4460          * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
4461          * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
4462          * format of the area is identical to guest save area offsetted
4463          * by 0x400 (matches the offset of 'struct vmcb_save_area'
4464          * within 'struct vmcb'). Note: HSAVE area may also be used by
4465          * L1 hypervisor to save additional host context (e.g. KVM does
4466          * that, see svm_prepare_switch_to_guest()) which must be
4467          * preserved.
4468          */
4469         if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
4470                          &map_save) == -EINVAL)
4471                 return 1;
4472
4473         BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
4474
4475         svm_copy_vmrun_state(map_save.hva + 0x400,
4476                              &svm->vmcb01.ptr->save);
4477
4478         kvm_vcpu_unmap(vcpu, &map_save, true);
4479         return 0;
4480 }
4481
4482 static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
4483 {
4484         struct vcpu_svm *svm = to_svm(vcpu);
4485         struct kvm_host_map map, map_save;
4486         u64 saved_efer, vmcb12_gpa;
4487         struct vmcb *vmcb12;
4488         int ret;
4489
4490         if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
4491                 return 0;
4492
4493         /* Non-zero if SMI arrived while vCPU was in guest mode. */
4494         if (!GET_SMSTATE(u64, smstate, 0x7ed8))
4495                 return 0;
4496
4497         if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
4498                 return 1;
4499
4500         saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
4501         if (!(saved_efer & EFER_SVME))
4502                 return 1;
4503
4504         vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
4505         if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
4506                 return 1;
4507
4508         ret = 1;
4509         if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL)
4510                 goto unmap_map;
4511
4512         if (svm_allocate_nested(svm))
4513                 goto unmap_save;
4514
4515         /*
4516          * Restore L1 host state from L1 HSAVE area as VMCB01 was
4517          * used during SMM (see svm_enter_smm())
4518          */
4519
4520         svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
4521
4522         /*
4523          * Enter the nested guest now
4524          */
4525
4526         vmcb_mark_all_dirty(svm->vmcb01.ptr);
4527
4528         vmcb12 = map.hva;
4529         nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
4530         nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
4531         ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
4532
4533         if (ret)
4534                 goto unmap_save;
4535
4536         svm->nested.nested_run_pending = 1;
4537
4538 unmap_save:
4539         kvm_vcpu_unmap(vcpu, &map_save, true);
4540 unmap_map:
4541         kvm_vcpu_unmap(vcpu, &map, true);
4542         return ret;
4543 }
4544
4545 static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
4546 {
4547         struct vcpu_svm *svm = to_svm(vcpu);
4548
4549         if (!gif_set(svm)) {
4550                 if (vgif)
4551                         svm_set_intercept(svm, INTERCEPT_STGI);
4552                 /* STGI will cause a vm exit */
4553         } else {
4554                 /* We must be in SMM; RSM will cause a vmexit anyway.  */
4555         }
4556 }
4557
4558 static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
4559                                         void *insn, int insn_len)
4560 {
4561         bool smep, smap, is_user;
4562         unsigned long cr4;
4563         u64 error_code;
4564
4565         /* Emulation is always possible when KVM has access to all guest state. */
4566         if (!sev_guest(vcpu->kvm))
4567                 return true;
4568
4569         /* #UD and #GP should never be intercepted for SEV guests. */
4570         WARN_ON_ONCE(emul_type & (EMULTYPE_TRAP_UD |
4571                                   EMULTYPE_TRAP_UD_FORCED |
4572                                   EMULTYPE_VMWARE_GP));
4573
4574         /*
4575          * Emulation is impossible for SEV-ES guests as KVM doesn't have access
4576          * to guest register state.
4577          */
4578         if (sev_es_guest(vcpu->kvm))
4579                 return false;
4580
4581         /*
4582          * Emulation is possible if the instruction is already decoded, e.g.
4583          * when completing I/O after returning from userspace.
4584          */
4585         if (emul_type & EMULTYPE_NO_DECODE)
4586                 return true;
4587
4588         /*
4589          * Emulation is possible for SEV guests if and only if a prefilled
4590          * buffer containing the bytes of the intercepted instruction is
4591          * available. SEV guest memory is encrypted with a guest specific key
4592          * and cannot be decrypted by KVM, i.e. KVM would read cyphertext and
4593          * decode garbage.
4594          *
4595          * Inject #UD if KVM reached this point without an instruction buffer.
4596          * In practice, this path should never be hit by a well-behaved guest,
4597          * e.g. KVM doesn't intercept #UD or #GP for SEV guests, but this path
4598          * is still theoretically reachable, e.g. via unaccelerated fault-like
4599          * AVIC access, and needs to be handled by KVM to avoid putting the
4600          * guest into an infinite loop.   Injecting #UD is somewhat arbitrary,
4601          * but its the least awful option given lack of insight into the guest.
4602          */
4603         if (unlikely(!insn)) {
4604                 kvm_queue_exception(vcpu, UD_VECTOR);
4605                 return false;
4606         }
4607
4608         /*
4609          * Emulate for SEV guests if the insn buffer is not empty.  The buffer
4610          * will be empty if the DecodeAssist microcode cannot fetch bytes for
4611          * the faulting instruction because the code fetch itself faulted, e.g.
4612          * the guest attempted to fetch from emulated MMIO or a guest page
4613          * table used to translate CS:RIP resides in emulated MMIO.
4614          */
4615         if (likely(insn_len))
4616                 return true;
4617
4618         /*
4619          * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
4620          *
4621          * Errata:
4622          * When CPU raises #NPF on guest data access and vCPU CR4.SMAP=1, it is
4623          * possible that CPU microcode implementing DecodeAssist will fail to
4624          * read guest memory at CS:RIP and vmcb.GuestIntrBytes will incorrectly
4625          * be '0'.  This happens because microcode reads CS:RIP using a _data_
4626          * loap uop with CPL=0 privileges.  If the load hits a SMAP #PF, ucode
4627          * gives up and does not fill the instruction bytes buffer.
4628          *
4629          * As above, KVM reaches this point iff the VM is an SEV guest, the CPU
4630          * supports DecodeAssist, a #NPF was raised, KVM's page fault handler
4631          * triggered emulation (e.g. for MMIO), and the CPU returned 0 in the
4632          * GuestIntrBytes field of the VMCB.
4633          *
4634          * This does _not_ mean that the erratum has been encountered, as the
4635          * DecodeAssist will also fail if the load for CS:RIP hits a legitimate
4636          * #PF, e.g. if the guest attempt to execute from emulated MMIO and
4637          * encountered a reserved/not-present #PF.
4638          *
4639          * To hit the erratum, the following conditions must be true:
4640          *    1. CR4.SMAP=1 (obviously).
4641          *    2. CR4.SMEP=0 || CPL=3.  If SMEP=1 and CPL<3, the erratum cannot
4642          *       have been hit as the guest would have encountered a SMEP
4643          *       violation #PF, not a #NPF.
4644          *    3. The #NPF is not due to a code fetch, in which case failure to
4645          *       retrieve the instruction bytes is legitimate (see abvoe).
4646          *
4647          * In addition, don't apply the erratum workaround if the #NPF occurred
4648          * while translating guest page tables (see below).
4649          */
4650         error_code = to_svm(vcpu)->vmcb->control.exit_info_1;
4651         if (error_code & (PFERR_GUEST_PAGE_MASK | PFERR_FETCH_MASK))
4652                 goto resume_guest;
4653
4654         cr4 = kvm_read_cr4(vcpu);
4655         smep = cr4 & X86_CR4_SMEP;
4656         smap = cr4 & X86_CR4_SMAP;
4657         is_user = svm_get_cpl(vcpu) == 3;
4658         if (smap && (!smep || is_user)) {
4659                 pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n");
4660
4661                 /*
4662                  * If the fault occurred in userspace, arbitrarily inject #GP
4663                  * to avoid killing the guest and to hopefully avoid confusing
4664                  * the guest kernel too much, e.g. injecting #PF would not be
4665                  * coherent with respect to the guest's page tables.  Request
4666                  * triple fault if the fault occurred in the kernel as there's
4667                  * no fault that KVM can inject without confusing the guest.
4668                  * In practice, the triple fault is moot as no sane SEV kernel
4669                  * will execute from user memory while also running with SMAP=1.
4670                  */
4671                 if (is_user)
4672                         kvm_inject_gp(vcpu, 0);
4673                 else
4674                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4675         }
4676
4677 resume_guest:
4678         /*
4679          * If the erratum was not hit, simply resume the guest and let it fault
4680          * again.  While awful, e.g. the vCPU may get stuck in an infinite loop
4681          * if the fault is at CPL=0, it's the lesser of all evils.  Exiting to
4682          * userspace will kill the guest, and letting the emulator read garbage
4683          * will yield random behavior and potentially corrupt the guest.
4684          *
4685          * Simply resuming the guest is technically not a violation of the SEV
4686          * architecture.  AMD's APM states that all code fetches and page table
4687          * accesses for SEV guest are encrypted, regardless of the C-Bit.  The
4688          * APM also states that encrypted accesses to MMIO are "ignored", but
4689          * doesn't explicitly define "ignored", i.e. doing nothing and letting
4690          * the guest spin is technically "ignoring" the access.
4691          */
4692         return false;
4693 }
4694
4695 static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
4696 {
4697         struct vcpu_svm *svm = to_svm(vcpu);
4698
4699         return !gif_set(svm);
4700 }
4701
4702 static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
4703 {
4704         if (!sev_es_guest(vcpu->kvm))
4705                 return kvm_vcpu_deliver_sipi_vector(vcpu, vector);
4706
4707         sev_vcpu_deliver_sipi_vector(vcpu, vector);
4708 }
4709
4710 static void svm_vm_destroy(struct kvm *kvm)
4711 {
4712         avic_vm_destroy(kvm);
4713         sev_vm_destroy(kvm);
4714 }
4715
4716 static int svm_vm_init(struct kvm *kvm)
4717 {
4718         if (!pause_filter_count || !pause_filter_thresh)
4719                 kvm->arch.pause_in_guest = true;
4720
4721         if (enable_apicv) {
4722                 int ret = avic_vm_init(kvm);
4723                 if (ret)
4724                         return ret;
4725         }
4726
4727         return 0;
4728 }
4729
4730 static struct kvm_x86_ops svm_x86_ops __initdata = {
4731         .name = "kvm_amd",
4732
4733         .hardware_unsetup = svm_hardware_unsetup,
4734         .hardware_enable = svm_hardware_enable,
4735         .hardware_disable = svm_hardware_disable,
4736         .has_emulated_msr = svm_has_emulated_msr,
4737
4738         .vcpu_create = svm_vcpu_create,
4739         .vcpu_free = svm_vcpu_free,
4740         .vcpu_reset = svm_vcpu_reset,
4741
4742         .vm_size = sizeof(struct kvm_svm),
4743         .vm_init = svm_vm_init,
4744         .vm_destroy = svm_vm_destroy,
4745
4746         .prepare_switch_to_guest = svm_prepare_switch_to_guest,
4747         .vcpu_load = svm_vcpu_load,
4748         .vcpu_put = svm_vcpu_put,
4749         .vcpu_blocking = avic_vcpu_blocking,
4750         .vcpu_unblocking = avic_vcpu_unblocking,
4751
4752         .update_exception_bitmap = svm_update_exception_bitmap,
4753         .get_msr_feature = svm_get_msr_feature,
4754         .get_msr = svm_get_msr,
4755         .set_msr = svm_set_msr,
4756         .get_segment_base = svm_get_segment_base,
4757         .get_segment = svm_get_segment,
4758         .set_segment = svm_set_segment,
4759         .get_cpl = svm_get_cpl,
4760         .get_cs_db_l_bits = svm_get_cs_db_l_bits,
4761         .set_cr0 = svm_set_cr0,
4762         .post_set_cr3 = sev_post_set_cr3,
4763         .is_valid_cr4 = svm_is_valid_cr4,
4764         .set_cr4 = svm_set_cr4,
4765         .set_efer = svm_set_efer,
4766         .get_idt = svm_get_idt,
4767         .set_idt = svm_set_idt,
4768         .get_gdt = svm_get_gdt,
4769         .set_gdt = svm_set_gdt,
4770         .set_dr7 = svm_set_dr7,
4771         .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
4772         .cache_reg = svm_cache_reg,
4773         .get_rflags = svm_get_rflags,
4774         .set_rflags = svm_set_rflags,
4775         .get_if_flag = svm_get_if_flag,
4776
4777         .flush_tlb_all = svm_flush_tlb_current,
4778         .flush_tlb_current = svm_flush_tlb_current,
4779         .flush_tlb_gva = svm_flush_tlb_gva,
4780         .flush_tlb_guest = svm_flush_tlb_current,
4781
4782         .vcpu_pre_run = svm_vcpu_pre_run,
4783         .vcpu_run = svm_vcpu_run,
4784         .handle_exit = svm_handle_exit,
4785         .skip_emulated_instruction = svm_skip_emulated_instruction,
4786         .update_emulated_instruction = NULL,
4787         .set_interrupt_shadow = svm_set_interrupt_shadow,
4788         .get_interrupt_shadow = svm_get_interrupt_shadow,
4789         .patch_hypercall = svm_patch_hypercall,
4790         .inject_irq = svm_inject_irq,
4791         .inject_nmi = svm_inject_nmi,
4792         .inject_exception = svm_inject_exception,
4793         .cancel_injection = svm_cancel_injection,
4794         .interrupt_allowed = svm_interrupt_allowed,
4795         .nmi_allowed = svm_nmi_allowed,
4796         .get_nmi_mask = svm_get_nmi_mask,
4797         .set_nmi_mask = svm_set_nmi_mask,
4798         .enable_nmi_window = svm_enable_nmi_window,
4799         .enable_irq_window = svm_enable_irq_window,
4800         .update_cr8_intercept = svm_update_cr8_intercept,
4801         .set_virtual_apic_mode = avic_set_virtual_apic_mode,
4802         .refresh_apicv_exec_ctrl = avic_refresh_apicv_exec_ctrl,
4803         .check_apicv_inhibit_reasons = avic_check_apicv_inhibit_reasons,
4804         .apicv_post_state_restore = avic_apicv_post_state_restore,
4805
4806         .get_exit_info = svm_get_exit_info,
4807
4808         .vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
4809
4810         .has_wbinvd_exit = svm_has_wbinvd_exit,
4811
4812         .get_l2_tsc_offset = svm_get_l2_tsc_offset,
4813         .get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier,
4814         .write_tsc_offset = svm_write_tsc_offset,
4815         .write_tsc_multiplier = svm_write_tsc_multiplier,
4816
4817         .load_mmu_pgd = svm_load_mmu_pgd,
4818
4819         .check_intercept = svm_check_intercept,
4820         .handle_exit_irqoff = svm_handle_exit_irqoff,
4821
4822         .request_immediate_exit = __kvm_request_immediate_exit,
4823
4824         .sched_in = svm_sched_in,
4825
4826         .nested_ops = &svm_nested_ops,
4827
4828         .deliver_interrupt = svm_deliver_interrupt,
4829         .pi_update_irte = avic_pi_update_irte,
4830         .setup_mce = svm_setup_mce,
4831
4832         .smi_allowed = svm_smi_allowed,
4833         .enter_smm = svm_enter_smm,
4834         .leave_smm = svm_leave_smm,
4835         .enable_smi_window = svm_enable_smi_window,
4836
4837         .mem_enc_ioctl = sev_mem_enc_ioctl,
4838         .mem_enc_register_region = sev_mem_enc_register_region,
4839         .mem_enc_unregister_region = sev_mem_enc_unregister_region,
4840         .guest_memory_reclaimed = sev_guest_memory_reclaimed,
4841
4842         .vm_copy_enc_context_from = sev_vm_copy_enc_context_from,
4843         .vm_move_enc_context_from = sev_vm_move_enc_context_from,
4844
4845         .can_emulate_instruction = svm_can_emulate_instruction,
4846
4847         .apic_init_signal_blocked = svm_apic_init_signal_blocked,
4848
4849         .msr_filter_changed = svm_msr_filter_changed,
4850         .complete_emulated_msr = svm_complete_emulated_msr,
4851
4852         .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
4853         .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons,
4854 };
4855
4856 /*
4857  * The default MMIO mask is a single bit (excluding the present bit),
4858  * which could conflict with the memory encryption bit. Check for
4859  * memory encryption support and override the default MMIO mask if
4860  * memory encryption is enabled.
4861  */
4862 static __init void svm_adjust_mmio_mask(void)
4863 {
4864         unsigned int enc_bit, mask_bit;
4865         u64 msr, mask;
4866
4867         /* If there is no memory encryption support, use existing mask */
4868         if (cpuid_eax(0x80000000) < 0x8000001f)
4869                 return;
4870
4871         /* If memory encryption is not enabled, use existing mask */
4872         rdmsrl(MSR_AMD64_SYSCFG, msr);
4873         if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
4874                 return;
4875
4876         enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
4877         mask_bit = boot_cpu_data.x86_phys_bits;
4878
4879         /* Increment the mask bit if it is the same as the encryption bit */
4880         if (enc_bit == mask_bit)
4881                 mask_bit++;
4882
4883         /*
4884          * If the mask bit location is below 52, then some bits above the
4885          * physical addressing limit will always be reserved, so use the
4886          * rsvd_bits() function to generate the mask. This mask, along with
4887          * the present bit, will be used to generate a page fault with
4888          * PFER.RSV = 1.
4889          *
4890          * If the mask bit location is 52 (or above), then clear the mask.
4891          */
4892         mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
4893
4894         kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
4895 }
4896
4897 static __init void svm_set_cpu_caps(void)
4898 {
4899         kvm_set_cpu_caps();
4900
4901         kvm_caps.supported_xss = 0;
4902
4903         /* CPUID 0x80000001 and 0x8000000A (SVM features) */
4904         if (nested) {
4905                 kvm_cpu_cap_set(X86_FEATURE_SVM);
4906                 kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN);
4907
4908                 if (nrips)
4909                         kvm_cpu_cap_set(X86_FEATURE_NRIPS);
4910
4911                 if (npt_enabled)
4912                         kvm_cpu_cap_set(X86_FEATURE_NPT);
4913
4914                 if (tsc_scaling)
4915                         kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR);
4916
4917                 if (vls)
4918                         kvm_cpu_cap_set(X86_FEATURE_V_VMSAVE_VMLOAD);
4919                 if (lbrv)
4920                         kvm_cpu_cap_set(X86_FEATURE_LBRV);
4921
4922                 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER))
4923                         kvm_cpu_cap_set(X86_FEATURE_PAUSEFILTER);
4924
4925                 if (boot_cpu_has(X86_FEATURE_PFTHRESHOLD))
4926                         kvm_cpu_cap_set(X86_FEATURE_PFTHRESHOLD);
4927
4928                 if (vgif)
4929                         kvm_cpu_cap_set(X86_FEATURE_VGIF);
4930
4931                 /* Nested VM can receive #VMEXIT instead of triggering #GP */
4932                 kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
4933         }
4934
4935         /* CPUID 0x80000008 */
4936         if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
4937             boot_cpu_has(X86_FEATURE_AMD_SSBD))
4938                 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
4939
4940         /* AMD PMU PERFCTR_CORE CPUID */
4941         if (enable_pmu && boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
4942                 kvm_cpu_cap_set(X86_FEATURE_PERFCTR_CORE);
4943
4944         /* CPUID 0x8000001F (SME/SEV features) */
4945         sev_set_cpu_caps();
4946 }
4947
4948 static __init int svm_hardware_setup(void)
4949 {
4950         int cpu;
4951         struct page *iopm_pages;
4952         void *iopm_va;
4953         int r;
4954         unsigned int order = get_order(IOPM_SIZE);
4955
4956         /*
4957          * NX is required for shadow paging and for NPT if the NX huge pages
4958          * mitigation is enabled.
4959          */
4960         if (!boot_cpu_has(X86_FEATURE_NX)) {
4961                 pr_err_ratelimited("NX (Execute Disable) not supported\n");
4962                 return -EOPNOTSUPP;
4963         }
4964         kvm_enable_efer_bits(EFER_NX);
4965
4966         iopm_pages = alloc_pages(GFP_KERNEL, order);
4967
4968         if (!iopm_pages)
4969                 return -ENOMEM;
4970
4971         iopm_va = page_address(iopm_pages);
4972         memset(iopm_va, 0xff, PAGE_SIZE * (1 << order));
4973         iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
4974
4975         init_msrpm_offsets();
4976
4977         kvm_caps.supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS |
4978                                      XFEATURE_MASK_BNDCSR);
4979
4980         if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
4981                 kvm_enable_efer_bits(EFER_FFXSR);
4982
4983         if (tsc_scaling) {
4984                 if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
4985                         tsc_scaling = false;
4986                 } else {
4987                         pr_info("TSC scaling supported\n");
4988                         kvm_caps.has_tsc_control = true;
4989                 }
4990         }
4991         kvm_caps.max_tsc_scaling_ratio = SVM_TSC_RATIO_MAX;
4992         kvm_caps.tsc_scaling_ratio_frac_bits = 32;
4993
4994         tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
4995
4996         /* Check for pause filtering support */
4997         if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
4998                 pause_filter_count = 0;
4999                 pause_filter_thresh = 0;
5000         } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
5001                 pause_filter_thresh = 0;
5002         }
5003
5004         if (nested) {
5005                 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
5006                 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
5007         }
5008
5009         /*
5010          * KVM's MMU doesn't support using 2-level paging for itself, and thus
5011          * NPT isn't supported if the host is using 2-level paging since host
5012          * CR4 is unchanged on VMRUN.
5013          */
5014         if (!IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_X86_PAE))
5015                 npt_enabled = false;
5016
5017         if (!boot_cpu_has(X86_FEATURE_NPT))
5018                 npt_enabled = false;
5019
5020         /* Force VM NPT level equal to the host's paging level */
5021         kvm_configure_mmu(npt_enabled, get_npt_level(),
5022                           get_npt_level(), PG_LEVEL_1G);
5023         pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
5024
5025         /* Setup shadow_me_value and shadow_me_mask */
5026         kvm_mmu_set_me_spte_mask(sme_me_mask, sme_me_mask);
5027
5028         svm_adjust_mmio_mask();
5029
5030         /*
5031          * Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
5032          * may be modified by svm_adjust_mmio_mask()).
5033          */
5034         sev_hardware_setup();
5035
5036         svm_hv_hardware_setup();
5037
5038         for_each_possible_cpu(cpu) {
5039                 r = svm_cpu_init(cpu);
5040                 if (r)
5041                         goto err;
5042         }
5043
5044         if (nrips) {
5045                 if (!boot_cpu_has(X86_FEATURE_NRIPS))
5046                         nrips = false;
5047         }
5048
5049         enable_apicv = avic = avic && avic_hardware_setup(&svm_x86_ops);
5050
5051         if (!enable_apicv) {
5052                 svm_x86_ops.vcpu_blocking = NULL;
5053                 svm_x86_ops.vcpu_unblocking = NULL;
5054                 svm_x86_ops.vcpu_get_apicv_inhibit_reasons = NULL;
5055         }
5056
5057         if (vls) {
5058                 if (!npt_enabled ||
5059                     !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
5060                     !IS_ENABLED(CONFIG_X86_64)) {
5061                         vls = false;
5062                 } else {
5063                         pr_info("Virtual VMLOAD VMSAVE supported\n");
5064                 }
5065         }
5066
5067         if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK))
5068                 svm_gp_erratum_intercept = false;
5069
5070         if (vgif) {
5071                 if (!boot_cpu_has(X86_FEATURE_VGIF))
5072                         vgif = false;
5073                 else
5074                         pr_info("Virtual GIF supported\n");
5075         }
5076
5077         if (lbrv) {
5078                 if (!boot_cpu_has(X86_FEATURE_LBRV))
5079                         lbrv = false;
5080                 else
5081                         pr_info("LBR virtualization supported\n");
5082         }
5083
5084         if (!enable_pmu)
5085                 pr_info("PMU virtualization is disabled\n");
5086
5087         svm_set_cpu_caps();
5088
5089         /*
5090          * It seems that on AMD processors PTE's accessed bit is
5091          * being set by the CPU hardware before the NPF vmexit.
5092          * This is not expected behaviour and our tests fail because
5093          * of it.
5094          * A workaround here is to disable support for
5095          * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled.
5096          * In this case userspace can know if there is support using
5097          * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle
5098          * it
5099          * If future AMD CPU models change the behaviour described above,
5100          * this variable can be changed accordingly
5101          */
5102         allow_smaller_maxphyaddr = !npt_enabled;
5103
5104         return 0;
5105
5106 err:
5107         svm_hardware_unsetup();
5108         return r;
5109 }
5110
5111
5112 static struct kvm_x86_init_ops svm_init_ops __initdata = {
5113         .cpu_has_kvm_support = has_svm,
5114         .disabled_by_bios = is_disabled,
5115         .hardware_setup = svm_hardware_setup,
5116         .check_processor_compatibility = svm_check_processor_compat,
5117
5118         .runtime_ops = &svm_x86_ops,
5119         .pmu_ops = &amd_pmu_ops,
5120 };
5121
5122 static int __init svm_init(void)
5123 {
5124         __unused_size_checks();
5125
5126         return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
5127                         __alignof__(struct vcpu_svm), THIS_MODULE);
5128 }
5129
5130 static void __exit svm_exit(void)
5131 {
5132         kvm_exit();
5133 }
5134
5135 module_init(svm_init)
5136 module_exit(svm_exit)