Merge tag 'kvm-4.15-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-2.6-block.git] / arch / x86 / kvm / vmx.c
index a6f4f095f8f4eb4aa5b4bae2a21dd66cccd824e7..7c3522a989d0b37713a802be82ee1f265fe64c9a 100644 (file)
@@ -486,6 +486,14 @@ struct nested_vmx {
        u64 nested_vmx_cr4_fixed1;
        u64 nested_vmx_vmcs_enum;
        u64 nested_vmx_vmfunc_controls;
+
+       /* SMM related state */
+       struct {
+               /* in VMX operation on SMM entry? */
+               bool vmxon;
+               /* in guest mode on SMM entry? */
+               bool guest_mode;
+       } smm;
 };
 
 #define POSTED_INTR_ON  0
@@ -900,16 +908,13 @@ static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
 static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
 static bool vmx_xsaves_supported(void);
-static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
 static void vmx_set_segment(struct kvm_vcpu *vcpu,
                            struct kvm_segment *var, int seg);
 static void vmx_get_segment(struct kvm_vcpu *vcpu,
                            struct kvm_segment *var, int seg);
 static bool guest_state_valid(struct kvm_vcpu *vcpu);
 static u32 vmx_segment_access_rights(struct kvm_segment *var);
-static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
-static int alloc_identity_pagetable(struct kvm *kvm);
 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
@@ -1598,18 +1603,15 @@ static inline void vpid_sync_context(int vpid)
 
 static inline void ept_sync_global(void)
 {
-       if (cpu_has_vmx_invept_global())
-               __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
+       __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
 }
 
 static inline void ept_sync_context(u64 eptp)
 {
-       if (enable_ept) {
-               if (cpu_has_vmx_invept_context())
-                       __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
-               else
-                       ept_sync_global();
-       }
+       if (cpu_has_vmx_invept_context())
+               __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
+       else
+               ept_sync_global();
 }
 
 static __always_inline void vmcs_check16(unsigned long field)
@@ -2831,8 +2833,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
                                SECONDARY_EXEC_ENABLE_PML;
                        vmx->nested.nested_vmx_ept_caps |= VMX_EPT_AD_BIT;
                }
-       } else
-               vmx->nested.nested_vmx_ept_caps = 0;
+       }
 
        if (cpu_has_vmx_vmfunc()) {
                vmx->nested.nested_vmx_secondary_ctls_high |=
@@ -2841,8 +2842,9 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
                 * Advertise EPTP switching unconditionally
                 * since we emulate it
                 */
-               vmx->nested.nested_vmx_vmfunc_controls =
-                       VMX_VMFUNC_EPTP_SWITCHING;
+               if (enable_ept)
+                       vmx->nested.nested_vmx_vmfunc_controls =
+                               VMX_VMFUNC_EPTP_SWITCHING;
        }
 
        /*
@@ -2856,8 +2858,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
                        SECONDARY_EXEC_ENABLE_VPID;
                vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
                        VMX_VPID_EXTENT_SUPPORTED_MASK;
-       } else
-               vmx->nested.nested_vmx_vpid_caps = 0;
+       }
 
        if (enable_unrestricted_guest)
                vmx->nested.nested_vmx_secondary_ctls_high |=
@@ -3544,7 +3545,8 @@ static int hardware_enable(void)
                wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
        }
        kvm_cpu_vmxon(phys_addr);
-       ept_sync_global();
+       if (enable_ept)
+               ept_sync_global();
 
        return 0;
 }
@@ -3657,8 +3659,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
                        SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
                        SECONDARY_EXEC_SHADOW_VMCS |
                        SECONDARY_EXEC_XSAVES |
-                       SECONDARY_EXEC_RDSEED |
-                       SECONDARY_EXEC_RDRAND |
+                       SECONDARY_EXEC_RDSEED_EXITING |
+                       SECONDARY_EXEC_RDRAND_EXITING |
                        SECONDARY_EXEC_ENABLE_PML |
                        SECONDARY_EXEC_TSC_SCALING |
                        SECONDARY_EXEC_ENABLE_VMFUNC;
@@ -3679,14 +3681,25 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
                                SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
                                SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
 
+       rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP,
+               &vmx_capability.ept, &vmx_capability.vpid);
+
        if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
                /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
                   enabled */
                _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
                                             CPU_BASED_CR3_STORE_EXITING |
                                             CPU_BASED_INVLPG_EXITING);
-               rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
-                     vmx_capability.ept, vmx_capability.vpid);
+       } else if (vmx_capability.ept) {
+               vmx_capability.ept = 0;
+               pr_warn_once("EPT CAP should not exist if not support "
+                               "1-setting enable EPT VM-execution control\n");
+       }
+       if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) &&
+               vmx_capability.vpid) {
+               vmx_capability.vpid = 0;
+               pr_warn_once("VPID CAP should not exist if not support "
+                               "1-setting enable VPID VM-execution control\n");
        }
 
        min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT;
@@ -4781,18 +4794,18 @@ static int init_rmode_identity_map(struct kvm *kvm)
        kvm_pfn_t identity_map_pfn;
        u32 tmp;
 
-       if (!enable_ept)
-               return 0;
-
        /* Protect kvm->arch.ept_identity_pagetable_done. */
        mutex_lock(&kvm->slots_lock);
 
        if (likely(kvm->arch.ept_identity_pagetable_done))
                goto out2;
 
+       if (!kvm->arch.ept_identity_map_addr)
+               kvm->arch.ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR;
        identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT;
 
-       r = alloc_identity_pagetable(kvm);
+       r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
+                                   kvm->arch.ept_identity_map_addr, PAGE_SIZE);
        if (r < 0)
                goto out2;
 
@@ -4864,20 +4877,6 @@ out:
        return r;
 }
 
-static int alloc_identity_pagetable(struct kvm *kvm)
-{
-       /* Called with kvm->slots_lock held. */
-
-       int r = 0;
-
-       BUG_ON(kvm->arch.ept_identity_pagetable_done);
-
-       r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
-                                   kvm->arch.ept_identity_map_addr, PAGE_SIZE);
-
-       return r;
-}
-
 static int allocate_vpid(void)
 {
        int vpid;
@@ -5282,13 +5281,13 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx)
 static bool vmx_rdrand_supported(void)
 {
        return vmcs_config.cpu_based_2nd_exec_ctrl &
-               SECONDARY_EXEC_RDRAND;
+               SECONDARY_EXEC_RDRAND_EXITING;
 }
 
 static bool vmx_rdseed_supported(void)
 {
        return vmcs_config.cpu_based_2nd_exec_ctrl &
-               SECONDARY_EXEC_RDSEED;
+               SECONDARY_EXEC_RDSEED_EXITING;
 }
 
 static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
@@ -5382,30 +5381,30 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
        if (vmx_rdrand_supported()) {
                bool rdrand_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDRAND);
                if (rdrand_enabled)
-                       exec_control &= ~SECONDARY_EXEC_RDRAND;
+                       exec_control &= ~SECONDARY_EXEC_RDRAND_EXITING;
 
                if (nested) {
                        if (rdrand_enabled)
                                vmx->nested.nested_vmx_secondary_ctls_high |=
-                                       SECONDARY_EXEC_RDRAND;
+                                       SECONDARY_EXEC_RDRAND_EXITING;
                        else
                                vmx->nested.nested_vmx_secondary_ctls_high &=
-                                       ~SECONDARY_EXEC_RDRAND;
+                                       ~SECONDARY_EXEC_RDRAND_EXITING;
                }
        }
 
        if (vmx_rdseed_supported()) {
                bool rdseed_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDSEED);
                if (rdseed_enabled)
-                       exec_control &= ~SECONDARY_EXEC_RDSEED;
+                       exec_control &= ~SECONDARY_EXEC_RDSEED_EXITING;
 
                if (nested) {
                        if (rdseed_enabled)
                                vmx->nested.nested_vmx_secondary_ctls_high |=
-                                       SECONDARY_EXEC_RDSEED;
+                                       SECONDARY_EXEC_RDSEED_EXITING;
                        else
                                vmx->nested.nested_vmx_secondary_ctls_high &=
-                                       ~SECONDARY_EXEC_RDSEED;
+                                       ~SECONDARY_EXEC_RDSEED_EXITING;
                }
        }
 
@@ -5426,7 +5425,7 @@ static void ept_set_mmio_spte_mask(void)
 /*
  * Sets up the vmcs for emulated real mode.
  */
-static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
+static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
 {
 #ifdef CONFIG_X86_64
        unsigned long a;
@@ -5539,8 +5538,6 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
                vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
                vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
        }
-
-       return 0;
 }
 
 static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
@@ -5604,6 +5601,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
        vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
        vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
        vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0);
+       if (kvm_mpx_supported())
+               vmcs_write64(GUEST_BNDCFGS, 0);
 
        setup_msrs(vmx);
 
@@ -5912,8 +5911,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
                cr2 = vmcs_readl(EXIT_QUALIFICATION);
                /* EPT won't cause page fault directly */
                WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept);
-               return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0,
-                               true);
+               return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
        }
 
        ex_no = intr_info & INTR_INFO_VECTOR_MASK;
@@ -6747,16 +6745,14 @@ static __init int hardware_setup(void)
 
        if (!cpu_has_vmx_ept() ||
            !cpu_has_vmx_ept_4levels() ||
-           !cpu_has_vmx_ept_mt_wb()) {
+           !cpu_has_vmx_ept_mt_wb() ||
+           !cpu_has_vmx_invept_global())
                enable_ept = 0;
-               enable_unrestricted_guest = 0;
-               enable_ept_ad_bits = 0;
-       }
 
        if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
                enable_ept_ad_bits = 0;
 
-       if (!cpu_has_vmx_unrestricted_guest())
+       if (!cpu_has_vmx_unrestricted_guest() || !enable_ept)
                enable_unrestricted_guest = 0;
 
        if (!cpu_has_vmx_flexpriority())
@@ -6776,8 +6772,13 @@ static __init int hardware_setup(void)
        if (enable_ept && !cpu_has_vmx_ept_2m_page())
                kvm_disable_largepages();
 
-       if (!cpu_has_vmx_ple())
+       if (!cpu_has_vmx_ple()) {
                ple_gap = 0;
+               ple_window = 0;
+               ple_window_grow = 0;
+               ple_window_max = 0;
+               ple_window_shrink = 0;
+       }
 
        if (!cpu_has_vmx_apicv()) {
                enable_apicv = 0;
@@ -8415,9 +8416,9 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
        case EXIT_REASON_RDPMC:
                return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
        case EXIT_REASON_RDRAND:
-               return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND);
+               return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
        case EXIT_REASON_RDSEED:
-               return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED);
+               return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
        case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
                return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
        case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
@@ -9475,7 +9476,6 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
        vmx->loaded_vmcs = vmcs;
        vmx_vcpu_put(vcpu);
        vmx_vcpu_load(vcpu, cpu);
-       vcpu->cpu = cpu;
        put_cpu();
 }
 
@@ -9556,11 +9556,9 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
        cpu = get_cpu();
        vmx_vcpu_load(&vmx->vcpu, cpu);
        vmx->vcpu.cpu = cpu;
-       err = vmx_vcpu_setup(vmx);
+       vmx_vcpu_setup(vmx);
        vmx_vcpu_put(&vmx->vcpu);
        put_cpu();
-       if (err)
-               goto free_vmcs;
        if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
                err = alloc_apic_access_page(kvm);
                if (err)
@@ -9568,9 +9566,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
        }
 
        if (enable_ept) {
-               if (!kvm->arch.ept_identity_map_addr)
-                       kvm->arch.ept_identity_map_addr =
-                               VMX_EPT_IDENTITY_PAGETABLE_ADDR;
                err = init_rmode_identity_map(kvm);
                if (err)
                        goto free_vmcs;
@@ -11325,6 +11320,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
        vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
        vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
        vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
+       vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
+       vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
 
        /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1.  */
        if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
@@ -11421,8 +11418,11 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
        leave_guest_mode(vcpu);
 
        if (likely(!vmx->fail)) {
-               prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
-                              exit_qualification);
+               if (exit_reason == -1)
+                       sync_vmcs12(vcpu, vmcs12);
+               else
+                       prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
+                                      exit_qualification);
 
                if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr,
                                         vmcs12->vm_exit_msr_store_count))
@@ -11486,7 +11486,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
         */
        kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
 
-       if (enable_shadow_vmcs)
+       if (enable_shadow_vmcs && exit_reason != -1)
                vmx->nested.sync_shadow_vmcs = true;
 
        /* in case we halted in L2 */
@@ -11510,12 +11510,13 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
                                INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
                }
 
-               trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
-                                              vmcs12->exit_qualification,
-                                              vmcs12->idt_vectoring_info_field,
-                                              vmcs12->vm_exit_intr_info,
-                                              vmcs12->vm_exit_intr_error_code,
-                                              KVM_ISA_VMX);
+               if (exit_reason != -1)
+                       trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
+                                                      vmcs12->exit_qualification,
+                                                      vmcs12->idt_vectoring_info_field,
+                                                      vmcs12->vm_exit_intr_info,
+                                                      vmcs12->vm_exit_intr_error_code,
+                                                      KVM_ISA_VMX);
 
                load_vmcs12_host_state(vcpu, vmcs12);
 
@@ -11938,6 +11939,54 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
                        ~FEATURE_CONTROL_LMCE;
 }
 
+static int vmx_smi_allowed(struct kvm_vcpu *vcpu)
+{
+       /* we need a nested vmexit to enter SMM, postpone if run is pending */
+       if (to_vmx(vcpu)->nested.nested_run_pending)
+               return 0;
+       return 1;
+}
+
+static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+       vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
+       if (vmx->nested.smm.guest_mode)
+               nested_vmx_vmexit(vcpu, -1, 0, 0);
+
+       vmx->nested.smm.vmxon = vmx->nested.vmxon;
+       vmx->nested.vmxon = false;
+       return 0;
+}
+
+static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       int ret;
+
+       if (vmx->nested.smm.vmxon) {
+               vmx->nested.vmxon = true;
+               vmx->nested.smm.vmxon = false;
+       }
+
+       if (vmx->nested.smm.guest_mode) {
+               vcpu->arch.hflags &= ~HF_SMM_MASK;
+               ret = enter_vmx_non_root_mode(vcpu, false);
+               vcpu->arch.hflags |= HF_SMM_MASK;
+               if (ret)
+                       return ret;
+
+               vmx->nested.smm.guest_mode = false;
+       }
+       return 0;
+}
+
+static int enable_smi_window(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
 static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
        .cpu_has_kvm_support = cpu_has_kvm_support,
        .disabled_by_bios = vmx_disabled_by_bios,
@@ -12063,6 +12112,11 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 #endif
 
        .setup_mce = vmx_setup_mce,
+
+       .smi_allowed = vmx_smi_allowed,
+       .pre_enter_smm = vmx_pre_enter_smm,
+       .pre_leave_smm = vmx_pre_leave_smm,
+       .enable_smi_window = enable_smi_window,
 };
 
 static int __init vmx_init(void)