KVM: x86: hyper-v: Introduce TLB flush fifo
[linux-2.6-block.git] / arch / x86 / kvm / svm / svm.c
index 098f04bec8ef3ad89bcbf0d33826477dc95d1cf9..91352d69284524c36ab0012a1c6ad2827ca45ffe 100644 (file)
@@ -6,6 +6,7 @@
 #include "mmu.h"
 #include "kvm_cache_regs.h"
 #include "x86.h"
+#include "smm.h"
 #include "cpuid.h"
 #include "pmu.h"
 
@@ -346,12 +347,6 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
        return 0;
 }
 
-static int is_external_interrupt(u32 info)
-{
-       info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
-       return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
-}
-
 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -2714,8 +2709,6 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
                if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
                        msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
                break;
-       case MSR_IA32_PERF_CAPABILITIES:
-               return 0;
        default:
                return KVM_MSR_RET_INVALID;
        }
@@ -3426,15 +3419,6 @@ static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
                return 0;
        }
 
-       if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
-           exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
-           exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
-           exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
-               printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
-                      "exit_code 0x%x\n",
-                      __func__, svm->vmcb->control.exit_int_info,
-                      exit_code);
-
        if (exit_fastpath != EXIT_FASTPATH_NONE)
                return 1;
 
@@ -3738,6 +3722,13 @@ static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
+       /*
+        * Unlike VMX, SVM doesn't provide a way to flush only NPT TLB entries.
+        * A TLB flush for the current ASID flushes both "host" and "guest" TLB
+        * entries, and thus is a superset of Hyper-V's fine grained flushing.
+        */
+       kvm_hv_vcpu_purge_flush_tlb(vcpu);
+
        /*
         * Flush only the current ASID even if the TLB flush was invoked via
         * kvm_flush_remote_tlbs().  Although flushing remote TLBs requires all
@@ -4117,6 +4108,8 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
        case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
                return false;
        case MSR_IA32_SMBASE:
+               if (!IS_ENABLED(CONFIG_KVM_SMM))
+                       return false;
                /* SEV-ES guests do not support SMM, so report false */
                if (kvm && sev_es_guest(kvm))
                        return false;
@@ -4373,6 +4366,7 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu)
        vcpu->arch.mcg_cap &= 0x1ff;
 }
 
+#ifdef CONFIG_KVM_SMM
 bool svm_smi_blocked(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -4400,7 +4394,7 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
        return 1;
 }
 
-static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        struct kvm_host_map map_save;
@@ -4409,10 +4403,16 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
        if (!is_guest_mode(vcpu))
                return 0;
 
-       /* FED8h - SVM Guest */
-       put_smstate(u64, smstate, 0x7ed8, 1);
-       /* FEE0h - SVM Guest VMCB Physical Address */
-       put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
+       /*
+        * 32-bit SMRAM format doesn't preserve EFER and SVM state.  Userspace is
+        * responsible for ensuring nested SVM and SMIs are mutually exclusive.
+        */
+
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
+               return 1;
+
+       smram->smram64.svm_guest_flag = 1;
+       smram->smram64.svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa;
 
        svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
        svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
@@ -4434,8 +4434,7 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
         * that, see svm_prepare_switch_to_guest()) which must be
         * preserved.
         */
-       if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
-                        &map_save) == -EINVAL)
+       if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save))
                return 1;
 
        BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
@@ -4447,34 +4446,33 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
        return 0;
 }
 
-static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
+static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        struct kvm_host_map map, map_save;
-       u64 saved_efer, vmcb12_gpa;
        struct vmcb *vmcb12;
        int ret;
 
+       const struct kvm_smram_state_64 *smram64 = &smram->smram64;
+
        if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
                return 0;
 
        /* Non-zero if SMI arrived while vCPU was in guest mode. */
-       if (!GET_SMSTATE(u64, smstate, 0x7ed8))
+       if (!smram64->svm_guest_flag)
                return 0;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
                return 1;
 
-       saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
-       if (!(saved_efer & EFER_SVME))
+       if (!(smram64->efer & EFER_SVME))
                return 1;
 
-       vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
-       if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
+       if (kvm_vcpu_map(vcpu, gpa_to_gfn(smram64->svm_guest_vmcb_gpa), &map))
                return 1;
 
        ret = 1;
-       if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL)
+       if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save))
                goto unmap_map;
 
        if (svm_allocate_nested(svm))
@@ -4496,7 +4494,7 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
        vmcb12 = map.hva;
        nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
        nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
-       ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
+       ret = enter_svm_guest_mode(vcpu, smram64->svm_guest_vmcb_gpa, vmcb12, false);
 
        if (ret)
                goto unmap_save;
@@ -4522,6 +4520,7 @@ static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
                /* We must be in SMM; RSM will cause a vmexit anyway.  */
        }
 }
+#endif
 
 static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
                                        void *insn, int insn_len)
@@ -4797,10 +4796,12 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
        .pi_update_irte = avic_pi_update_irte,
        .setup_mce = svm_setup_mce,
 
+#ifdef CONFIG_KVM_SMM
        .smi_allowed = svm_smi_allowed,
        .enter_smm = svm_enter_smm,
        .leave_smm = svm_leave_smm,
        .enable_smi_window = svm_enable_smi_window,
+#endif
 
        .mem_enc_ioctl = sev_mem_enc_ioctl,
        .mem_enc_register_region = sev_mem_enc_register_region,
@@ -4866,6 +4867,7 @@ static __init void svm_set_cpu_caps(void)
 {
        kvm_set_cpu_caps();
 
+       kvm_caps.supported_perf_cap = 0;
        kvm_caps.supported_xss = 0;
 
        /* CPUID 0x80000001 and 0x8000000A (SVM features) */