KVM: nVMX: Expose load IA32_PERF_GLOBAL_CTRL VM-{Entry,Exit} control
authorOliver Upton <oupton@google.com>
Thu, 14 Nov 2019 00:17:20 +0000 (16:17 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 15 Nov 2019 10:44:16 +0000 (11:44 +0100)
The "load IA32_PERF_GLOBAL_CTRL" bit for VM-entry and VM-exit should
only be exposed to the guest if IA32_PERF_GLOBAL_CTRL is a valid MSR.
Create a new helper to allow pmu_refresh() to update the VM-Entry and
VM-Exit controls to ensure PMU values are initialized when performing
the is_valid_msr() check.

Suggested-by: Jim Mattson <jmattson@google.com>
Co-developed-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
Signed-off-by: Oliver Upton <oupton@google.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
Reviewed-by: Peter Shier <pshier@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/nested.h
arch/x86/kvm/vmx/pmu_intel.c

index abef0dbe94bb89ab2a83cffade589ffc43d028fc..c6f5e5821d4c34ba27f226f9f1b85672ce84a8c7 100644 (file)
@@ -4359,6 +4359,27 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
        return 0;
 }
 
+void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx;
+
+       if (!nested_vmx_allowed(vcpu))
+               return;
+
+       vmx = to_vmx(vcpu);
+       if (kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) {
+               vmx->nested.msrs.entry_ctls_high |=
+                               VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+               vmx->nested.msrs.exit_ctls_high |=
+                               VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
+       } else {
+               vmx->nested.msrs.entry_ctls_high &=
+                               ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+               vmx->nested.msrs.exit_ctls_high &=
+                               ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+       }
+}
+
 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
 {
        gva_t gva;
index 4cf1d40da15fef9283e6055c17a30d4d32e3e785..19e6015722a9d91e587f607544c5295f7126b3f6 100644 (file)
@@ -22,6 +22,7 @@ int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
                        u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
+void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu);
 
 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
 {
index 0990a12a76a8753f18efd8e6dea7f8cd89582856..7023138b1cb0059151d1e742a60fb198963f6f3f 100644 (file)
@@ -15,6 +15,7 @@
 #include "x86.h"
 #include "cpuid.h"
 #include "lapic.h"
+#include "nested.h"
 #include "pmu.h"
 
 static struct kvm_event_hw_type_mapping intel_arch_events[] = {
@@ -335,6 +336,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
                0, pmu->nr_arch_gp_counters);
        bitmap_set(pmu->all_valid_pmc_idx,
                INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
+
+       nested_vmx_pmu_entry_exit_ctls_update(vcpu);
 }
 
 static void intel_pmu_init(struct kvm_vcpu *vcpu)