KVM: x86/pmu: Use separate array for defining "PMU MSRs to save"
authorSean Christopherson <seanjc@google.com>
Tue, 24 Jan 2023 23:49:02 +0000 (23:49 +0000)
committerSean Christopherson <seanjc@google.com>
Fri, 27 Jan 2023 02:03:42 +0000 (18:03 -0800)
Move all potential to-be-saved PMU MSRs into a separate array so that a
future patch can easily omit all PMU MSRs from the list when the PMU is
disabled.

No functional change intended.

Link: https://lore.kernel.org/r/20230124234905.3774678-4-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/x86.c

index d4a610ffe2b8c8b1c3225f074a9aa1b7e0820bb5..9b6e1af63531fb0305f35ca2e7a2e5671e96596e 100644 (file)
@@ -1419,7 +1419,7 @@ EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc);
  * may depend on host virtualization features rather than host cpu features.
  */
 
-static const u32 msrs_to_save_all[] = {
+static const u32 msrs_to_save_base[] = {
        MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
        MSR_STAR,
 #ifdef CONFIG_X86_64
@@ -1436,6 +1436,10 @@ static const u32 msrs_to_save_all[] = {
        MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
        MSR_IA32_UMWAIT_CONTROL,
 
+       MSR_IA32_XFD, MSR_IA32_XFD_ERR,
+};
+
+static const u32 msrs_to_save_pmu[] = {
        MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
        MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
        MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
@@ -1460,11 +1464,10 @@ static const u32 msrs_to_save_all[] = {
        MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
        MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
        MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
-
-       MSR_IA32_XFD, MSR_IA32_XFD_ERR,
 };
 
-static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
+static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) +
+                       ARRAY_SIZE(msrs_to_save_pmu)];
 static unsigned num_msrs_to_save;
 
 static const u32 emulated_msrs_all[] = {
@@ -6994,84 +6997,92 @@ out:
        return r;
 }
 
-static void kvm_init_msr_list(void)
+static void kvm_probe_msr_to_save(u32 msr_index)
 {
        u32 dummy[2];
+
+       if (rdmsr_safe(msr_index, &dummy[0], &dummy[1]))
+               return;
+
+       /*
+        * Even MSRs that are valid in the host may not be exposed to guests in
+        * some cases.
+        */
+       switch (msr_index) {
+       case MSR_IA32_BNDCFGS:
+               if (!kvm_mpx_supported())
+                       return;
+               break;
+       case MSR_TSC_AUX:
+               if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) &&
+                   !kvm_cpu_cap_has(X86_FEATURE_RDPID))
+                       return;
+               break;
+       case MSR_IA32_UMWAIT_CONTROL:
+               if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG))
+                       return;
+               break;
+       case MSR_IA32_RTIT_CTL:
+       case MSR_IA32_RTIT_STATUS:
+               if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT))
+                       return;
+               break;
+       case MSR_IA32_RTIT_CR3_MATCH:
+               if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
+                   !intel_pt_validate_hw_cap(PT_CAP_cr3_filtering))
+                       return;
+               break;
+       case MSR_IA32_RTIT_OUTPUT_BASE:
+       case MSR_IA32_RTIT_OUTPUT_MASK:
+               if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
+                   (!intel_pt_validate_hw_cap(PT_CAP_topa_output) &&
+                    !intel_pt_validate_hw_cap(PT_CAP_single_range_output)))
+                       return;
+               break;
+       case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
+               if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
+                   (msr_index - MSR_IA32_RTIT_ADDR0_A >=
+                    intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2))
+                       return;
+               break;
+       case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR_MAX:
+               if (msr_index - MSR_ARCH_PERFMON_PERFCTR0 >=
+                   kvm_pmu_cap.num_counters_gp)
+                       return;
+               break;
+       case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL_MAX:
+               if (msr_index - MSR_ARCH_PERFMON_EVENTSEL0 >=
+                   kvm_pmu_cap.num_counters_gp)
+                       return;
+               break;
+       case MSR_IA32_XFD:
+       case MSR_IA32_XFD_ERR:
+               if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
+                       return;
+               break;
+       default:
+               break;
+       }
+
+       msrs_to_save[num_msrs_to_save++] = msr_index;
+}
+
+static void kvm_init_msr_list(void)
+{
        unsigned i;
 
        BUILD_BUG_ON_MSG(KVM_PMC_MAX_FIXED != 3,
-                        "Please update the fixed PMCs in msrs_to_saved_all[]");
+                        "Please update the fixed PMCs in msrs_to_save_pmu[]");
 
        num_msrs_to_save = 0;
        num_emulated_msrs = 0;
        num_msr_based_features = 0;
 
-       for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) {
-               if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0)
-                       continue;
-
-               /*
-                * Even MSRs that are valid in the host may not be exposed
-                * to the guests in some cases.
-                */
-               switch (msrs_to_save_all[i]) {
-               case MSR_IA32_BNDCFGS:
-                       if (!kvm_mpx_supported())
-                               continue;
-                       break;
-               case MSR_TSC_AUX:
-                       if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) &&
-                           !kvm_cpu_cap_has(X86_FEATURE_RDPID))
-                               continue;
-                       break;
-               case MSR_IA32_UMWAIT_CONTROL:
-                       if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG))
-                               continue;
-                       break;
-               case MSR_IA32_RTIT_CTL:
-               case MSR_IA32_RTIT_STATUS:
-                       if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT))
-                               continue;
-                       break;
-               case MSR_IA32_RTIT_CR3_MATCH:
-                       if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
-                           !intel_pt_validate_hw_cap(PT_CAP_cr3_filtering))
-                               continue;
-                       break;
-               case MSR_IA32_RTIT_OUTPUT_BASE:
-               case MSR_IA32_RTIT_OUTPUT_MASK:
-                       if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
-                               (!intel_pt_validate_hw_cap(PT_CAP_topa_output) &&
-                                !intel_pt_validate_hw_cap(PT_CAP_single_range_output)))
-                               continue;
-                       break;
-               case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
-                       if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
-                               msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >=
-                               intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)
-                               continue;
-                       break;
-               case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR_MAX:
-                       if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
-                           kvm_pmu_cap.num_counters_gp)
-                               continue;
-                       break;
-               case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL_MAX:
-                       if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
-                           kvm_pmu_cap.num_counters_gp)
-                               continue;
-                       break;
-               case MSR_IA32_XFD:
-               case MSR_IA32_XFD_ERR:
-                       if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
-                               continue;
-                       break;
-               default:
-                       break;
-               }
+       for (i = 0; i < ARRAY_SIZE(msrs_to_save_base); i++)
+               kvm_probe_msr_to_save(msrs_to_save_base[i]);
 
-               msrs_to_save[num_msrs_to_save++] = msrs_to_save_all[i];
-       }
+       for (i = 0; i < ARRAY_SIZE(msrs_to_save_pmu); i++)
+               kvm_probe_msr_to_save(msrs_to_save_pmu[i]);
 
        for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) {
                if (!static_call(kvm_x86_has_emulated_msr)(NULL, emulated_msrs_all[i]))