Merge tag 'x86-bugs-2022-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / arch / x86 / kvm / vmx / vmx.c
index 6e8fb36bc49a6a07828637fbfb33ff4334dd6778..6df17ef81905f5a14f60ab0eda9ea8787b81b53c 100644 (file)
@@ -2505,7 +2505,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
                                &_cpu_based_exec_control) < 0)
                return -EIO;
 #ifdef CONFIG_X86_64
-       if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
+       if (_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)
                _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
                                           ~CPU_BASED_CR8_STORE_EXITING;
 #endif
@@ -3009,7 +3009,7 @@ static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
 
        if (enable_ept)
                ept_sync_context(construct_eptp(vcpu, root_hpa,
-                                               mmu->shadow_root_level));
+                                               mmu->root_role.level));
        else
                vpid_sync_context(vmx_get_current_vpid(vcpu));
 }
@@ -4446,7 +4446,7 @@ static void init_vmcs(struct vcpu_vmx *vmx)
        if (cpu_has_secondary_exec_ctrls())
                secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx));
 
-       if (kvm_vcpu_apicv_active(&vmx->vcpu)) {
+       if (enable_apicv && lapic_in_kernel(&vmx->vcpu)) {
                vmcs_write64(EOI_EXIT_BITMAP0, 0);
                vmcs_write64(EOI_EXIT_BITMAP1, 0);
                vmcs_write64(EOI_EXIT_BITMAP2, 0);
@@ -5473,9 +5473,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
        error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
                      ? PFERR_FETCH_MASK : 0;
        /* ept page table entry is present? */
-       error_code |= (exit_qualification &
-                      (EPT_VIOLATION_READABLE | EPT_VIOLATION_WRITABLE |
-                       EPT_VIOLATION_EXECUTABLE))
+       error_code |= (exit_qualification & EPT_VIOLATION_RWX_MASK)
                      ? PFERR_PRESENT_MASK : 0;
 
        error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) != 0 ?
@@ -6284,7 +6282,7 @@ static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
        int size = PAGE_SIZE << L1D_CACHE_ORDER;
 
        /*
-        * This code is only executed when the the flush mode is 'cond' or
+        * This code is only executed when the flush mode is 'cond' or
         * 'always'
         */
        if (static_branch_likely(&vmx_l1d_flush_cond)) {
@@ -6612,6 +6610,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
                return;
 
        handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
+       vcpu->arch.at_instruction_boundary = true;
 }
 
 static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
@@ -7893,7 +7892,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .cpu_dirty_log_size = PML_ENTITY_NUM,
        .update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
 
-       .pmu_ops = &intel_pmu_ops,
        .nested_ops = &vmx_nested_ops,
 
        .pi_update_irte = vmx_pi_update_irte,
@@ -7926,7 +7924,7 @@ static unsigned int vmx_handle_intel_pt_intr(void)
        struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
 
        /* '0' on failure so that the !PT case can use a RET0 static call. */
-       if (!kvm_arch_pmi_in_guest(vcpu))
+       if (!vcpu || !kvm_handling_nmi_from_guest(vcpu))
                return 0;
 
        kvm_make_request(KVM_REQ_PMI, vcpu);
@@ -7961,6 +7959,31 @@ static __init void vmx_setup_user_return_msrs(void)
                kvm_add_user_return_msr(vmx_uret_msrs_list[i]);
 }
 
+static void __init vmx_setup_me_spte_mask(void)
+{
+       u64 me_mask = 0;
+
+       /*
+        * kvm_get_shadow_phys_bits() returns shadow_phys_bits.  Use
+        * the former to avoid exposing shadow_phys_bits.
+        *
+        * On pre-MKTME system, boot_cpu_data.x86_phys_bits equals to
+        * shadow_phys_bits.  On MKTME and/or TDX capable systems,
+        * boot_cpu_data.x86_phys_bits holds the actual physical address
+        * w/o the KeyID bits, and shadow_phys_bits equals to MAXPHYADDR
+        * reported by CPUID.  Those bits between are KeyID bits.
+        */
+       if (boot_cpu_data.x86_phys_bits != kvm_get_shadow_phys_bits())
+               me_mask = rsvd_bits(boot_cpu_data.x86_phys_bits,
+                       kvm_get_shadow_phys_bits() - 1);
+       /*
+        * Unlike SME, host kernel doesn't support setting up any
+        * MKTME KeyID on Intel platforms.  No memory encryption
+        * bits should be included into the SPTE.
+        */
+       kvm_mmu_set_me_spte_mask(0, me_mask);
+}
+
 static struct kvm_x86_init_ops vmx_init_ops __initdata;
 
 static __init int hardware_setup(void)
@@ -8063,6 +8086,12 @@ static __init int hardware_setup(void)
                kvm_mmu_set_ept_masks(enable_ept_ad_bits,
                                      cpu_has_vmx_ept_execute_only());
 
+       /*
+        * Setup shadow_me_value/shadow_me_mask to include MKTME KeyID
+        * bits to shadow_zero_check.
+        */
+       vmx_setup_me_spte_mask();
+
        kvm_configure_mmu(enable_ept, 0, vmx_get_max_tdp_level(),
                          ept_caps_to_lpage_level(vmx_capability.ept));
 
@@ -8147,6 +8176,7 @@ static struct kvm_x86_init_ops vmx_init_ops __initdata = {
        .handle_intel_pt_intr = NULL,
 
        .runtime_ops = &vmx_x86_ops,
+       .pmu_ops = &intel_pmu_ops,
 };
 
 static void vmx_cleanup_l1d_flush(void)