KVM: x86: Update vPMCs when retiring instructions
authorEric Hankland <ehankland@google.com>
Tue, 30 Nov 2021 07:42:20 +0000 (15:42 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 7 Jan 2022 15:44:42 +0000 (10:44 -0500)
When KVM retires a guest instruction through emulation, increment any
vPMCs that are configured to monitor "instructions retired," and
update the sample period of those counters so that they will overflow
at the right time.

Signed-off-by: Eric Hankland <ehankland@google.com>
[jmattson:
  - Split the code to increment "branch instructions retired" into a
    separate commit.
  - Added 'static' to kvm_pmu_incr_counter() definition.
  - Modified kvm_pmu_incr_counter() to check pmc->perf_event->state ==
    PERF_EVENT_STATE_ACTIVE.
]
Fixes: f5132b01386b ("KVM: Expose a version 2 architectural PMU to a guests")
Signed-off-by: Jim Mattson <jmattson@google.com>
[likexu:
  - Drop checks for pmc->perf_event or event state or event type
  - Increase a counter once its umask bits and the first 8 select bits are matched
  - Rewrite kvm_pmu_incr_counter() with a less invasive approach to the host perf;
  - Rename kvm_pmu_record_event to kvm_pmu_trigger_event;
  - Add counter enable and CPL check for kvm_pmu_trigger_event();
]
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Like Xu <likexu@tencent.com>
Message-Id: <20211130074221.93635-6-likexu@tencent.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/pmu.c
arch/x86/kvm/pmu.h
arch/x86/kvm/x86.c

index a20207ee4014c2d592ec88ac033bfc947e44c810..8abdadb7e22ad0b565c17a8cc142da4bdb047adf 100644 (file)
@@ -482,6 +482,66 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
        kvm_pmu_reset(vcpu);
 }
 
+static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
+{
+       struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+       u64 prev_count;
+
+       prev_count = pmc->counter;
+       pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
+
+       reprogram_counter(pmu, pmc->idx);
+       if (pmc->counter < prev_count)
+               __kvm_perf_overflow(pmc, false);
+}
+
+static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
+       unsigned int perf_hw_id)
+{
+       u64 old_eventsel = pmc->eventsel;
+       unsigned int config;
+
+       pmc->eventsel &= (ARCH_PERFMON_EVENTSEL_EVENT | ARCH_PERFMON_EVENTSEL_UMASK);
+       config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
+       pmc->eventsel = old_eventsel;
+       return config == perf_hw_id;
+}
+
+static inline bool cpl_is_matched(struct kvm_pmc *pmc)
+{
+       bool select_os, select_user;
+       u64 config = pmc->current_config;
+
+       if (pmc_is_gp(pmc)) {
+               select_os = config & ARCH_PERFMON_EVENTSEL_OS;
+               select_user = config & ARCH_PERFMON_EVENTSEL_USR;
+       } else {
+               select_os = config & 0x1;
+               select_user = config & 0x2;
+       }
+
+       return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
+}
+
+void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
+{
+       struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+       struct kvm_pmc *pmc;
+       int i;
+
+       for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
+               pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i);
+
+               if (!pmc || !pmc_is_enabled(pmc) || !pmc_speculative_in_use(pmc))
+                       continue;
+
+               /* Ignore checks for edge detect, pin control, invert and CMASK bits */
+               if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc))
+                       kvm_pmu_incr_counter(pmc);
+       }
+}
+EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
+
 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
 {
        struct kvm_pmu_event_filter tmp, *filter;
index c91d9725aafdf1cac9d89b70e940ac7ed83a1738..7a7b8d5b775e9173c71eb23d1504542a7e119b4d 100644 (file)
@@ -157,6 +157,7 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu);
 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
 void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
+void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id);
 
 bool is_vmware_backdoor_pmc(u32 pmc_idx);
 
index 9ddad9493cb8880b78fab0a151f0b6eb23cac970..4fbde3a9b9d0d27a304fbaa0184d2035f2f8f715 100644 (file)
@@ -7990,6 +7990,8 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
        if (unlikely(!r))
                return 0;
 
+       kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS);
+
        /*
         * rflags is the old, "raw" value of the flags.  The new value has
         * not been saved yet.
@@ -8252,6 +8254,7 @@ writeback:
                vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
                if (!ctxt->have_exception ||
                    exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
+                       kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS);
                        kvm_rip_write(vcpu, ctxt->eip);
                        if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
                                r = kvm_vcpu_do_singlestep(vcpu);