KVM: Expose a version 2 architectural PMU to a guests
authorGleb Natapov <gleb@redhat.com>
Thu, 10 Nov 2011 12:57:22 +0000 (14:57 +0200)
committerAvi Kivity <avi@redhat.com>
Tue, 27 Dec 2011 09:24:29 +0000 (11:24 +0200)
Use perf_events to emulate an architectural PMU, version 2.

Based on PMU version 1 emulation by Avi Kivity.

[avi: adjust for cpuid.c]
[jan: fix anonymous field initialization for older gcc]

Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/Kconfig
arch/x86/kvm/Makefile
arch/x86/kvm/cpuid.c
arch/x86/kvm/pmu.c [new file with mode: 0644]
arch/x86/kvm/x86.c
include/linux/kvm_host.h

index 020413afb2856bc27a75797b9c600a00cce2cc3c..fb60ffdb4e4332b0c5a0abe194e6368f1f64e904 100644 (file)
 #include <linux/mmu_notifier.h>
 #include <linux/tracepoint.h>
 #include <linux/cpumask.h>
+#include <linux/irq_work.h>
 
 #include <linux/kvm.h>
 #include <linux/kvm_para.h>
 #include <linux/kvm_types.h>
+#include <linux/perf_event.h>
 
 #include <asm/pvclock-abi.h>
 #include <asm/desc.h>
@@ -291,6 +293,37 @@ struct kvm_mmu {
        u64 pdptrs[4]; /* pae */
 };
 
+enum pmc_type {
+       KVM_PMC_GP = 0,
+       KVM_PMC_FIXED,
+};
+
+struct kvm_pmc {
+       enum pmc_type type;
+       u8 idx;
+       u64 counter;
+       u64 eventsel;
+       struct perf_event *perf_event;
+       struct kvm_vcpu *vcpu;
+};
+
+struct kvm_pmu {
+       unsigned nr_arch_gp_counters;
+       unsigned nr_arch_fixed_counters;
+       unsigned available_event_types;
+       u64 fixed_ctr_ctrl;
+       u64 global_ctrl;
+       u64 global_status;
+       u64 global_ovf_ctrl;
+       u64 counter_bitmask[2];
+       u64 global_ctrl_mask;
+       u8 version;
+       struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC];
+       struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED];
+       struct irq_work irq_work;
+       u64 reprogram_pmi;
+};
+
 struct kvm_vcpu_arch {
        /*
         * rip and regs accesses must go through
@@ -424,6 +457,8 @@ struct kvm_vcpu_arch {
        unsigned access;
        gfn_t mmio_gfn;
 
+       struct kvm_pmu pmu;
+
        /* used for guest single stepping over the given code position */
        unsigned long singlestep_rip;
 
@@ -891,4 +926,17 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
 
 void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
 
+int kvm_is_in_guest(void);
+
+void kvm_pmu_init(struct kvm_vcpu *vcpu);
+void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
+void kvm_pmu_reset(struct kvm_vcpu *vcpu);
+void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
+bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
+int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
+int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
+int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
+void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
+void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
+
 #endif /* _ASM_X86_KVM_HOST_H */
index ca4d49ed9a6c162d639ff82980cc2d64e3a88a25..1a7fe868f375cb6bccb281599359b8c2dbad8073 100644 (file)
@@ -35,6 +35,7 @@ config KVM
        select KVM_MMIO
        select TASKSTATS
        select TASK_DELAY_ACCT
+       select PERF_EVENTS
        ---help---
          Support hosting fully virtualized guest machines using hardware
          virtualization extensions.  You will need a fairly recent
index 161b76ae87c43a1b3eab457b7c07152c0f1b2fc3..4f579e8dcacf6747a7e3a34db765bf112233680f 100644 (file)
@@ -12,7 +12,7 @@ kvm-$(CONFIG_IOMMU_API)       += $(addprefix ../../../virt/kvm/, iommu.o)
 kvm-$(CONFIG_KVM_ASYNC_PF)     += $(addprefix ../../../virt/kvm/, async_pf.o)
 
 kvm-y                  += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
-                          i8254.o timer.o cpuid.o
+                          i8254.o timer.o cpuid.o pmu.o
 kvm-intel-y            += vmx.o
 kvm-amd-y              += svm.o
 
index ca63032bf03d2e9eabeff37b8f31936b9b2a6d5a..e70be46f50fca50196f94fef1e0266ff1475b81c 100644 (file)
@@ -45,6 +45,8 @@ void kvm_update_cpuid(struct kvm_vcpu *vcpu)
                else
                        apic->lapic_timer.timer_mode_mask = 1 << 17;
        }
+
+       kvm_pmu_cpuid_update(vcpu);
 }
 
 static int is_efer_nx(void)
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
new file mode 100644 (file)
index 0000000..7aad544
--- /dev/null
@@ -0,0 +1,533 @@
+/*
+ * Kernel-based Virtual Machine -- Performane Monitoring Unit support
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates.
+ *
+ * Authors:
+ *   Avi Kivity   <avi@redhat.com>
+ *   Gleb Natapov <gleb@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kvm_host.h>
+#include <linux/perf_event.h>
+#include "x86.h"
+#include "cpuid.h"
+#include "lapic.h"
+
+static struct kvm_arch_event_perf_mapping {
+       u8 eventsel;
+       u8 unit_mask;
+       unsigned event_type;
+       bool inexact;
+} arch_events[] = {
+       /* Index must match CPUID 0x0A.EBX bit vector */
+       [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
+       [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
+       [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES  },
+       [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
+       [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
+       [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
+       [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
+};
+
+/* mapping between fixed pmc index and arch_events array */
+int fixed_pmc_events[] = {1, 0, 2};
+
+static bool pmc_is_gp(struct kvm_pmc *pmc)
+{
+       return pmc->type == KVM_PMC_GP;
+}
+
+static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
+{
+       struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
+
+       return pmu->counter_bitmask[pmc->type];
+}
+
+static inline bool pmc_enabled(struct kvm_pmc *pmc)
+{
+       struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
+       return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
+}
+
+static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
+                                        u32 base)
+{
+       if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
+               return &pmu->gp_counters[msr - base];
+       return NULL;
+}
+
+static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
+{
+       int base = MSR_CORE_PERF_FIXED_CTR0;
+       if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
+               return &pmu->fixed_counters[msr - base];
+       return NULL;
+}
+
+static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
+{
+       return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx);
+}
+
+static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
+{
+       if (idx < X86_PMC_IDX_FIXED)
+               return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
+       else
+               return get_fixed_pmc_idx(pmu, idx - X86_PMC_IDX_FIXED);
+}
+
+void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.apic)
+               kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
+}
+
+static void trigger_pmi(struct irq_work *irq_work)
+{
+       struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu,
+                       irq_work);
+       struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu,
+                       arch.pmu);
+
+       kvm_deliver_pmi(vcpu);
+}
+
+static void kvm_perf_overflow(struct perf_event *perf_event,
+                             struct perf_sample_data *data,
+                             struct pt_regs *regs)
+{
+       struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+       struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
+       __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
+}
+
+static void kvm_perf_overflow_intr(struct perf_event *perf_event,
+               struct perf_sample_data *data, struct pt_regs *regs)
+{
+       struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+       struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
+       if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
+               kvm_perf_overflow(perf_event, data, regs);
+               kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
+               /*
+                * Inject PMI. If vcpu was in a guest mode during NMI PMI
+                * can be ejected on a guest mode re-entry. Otherwise we can't
+                * be sure that vcpu wasn't executing hlt instruction at the
+                * time of vmexit and is not going to re-enter guest mode until,
+                * woken up. So we should wake it, but this is impossible from
+                * NMI context. Do it from irq work instead.
+                */
+               if (!kvm_is_in_guest())
+                       irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
+               else
+                       kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
+       }
+}
+
+static u64 read_pmc(struct kvm_pmc *pmc)
+{
+       u64 counter, enabled, running;
+
+       counter = pmc->counter;
+
+       if (pmc->perf_event)
+               counter += perf_event_read_value(pmc->perf_event,
+                                                &enabled, &running);
+
+       /* FIXME: Scaling needed? */
+
+       return counter & pmc_bitmask(pmc);
+}
+
+static void stop_counter(struct kvm_pmc *pmc)
+{
+       if (pmc->perf_event) {
+               pmc->counter = read_pmc(pmc);
+               perf_event_release_kernel(pmc->perf_event);
+               pmc->perf_event = NULL;
+       }
+}
+
+static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
+               unsigned config, bool exclude_user, bool exclude_kernel,
+               bool intr)
+{
+       struct perf_event *event;
+       struct perf_event_attr attr = {
+               .type = type,
+               .size = sizeof(attr),
+               .pinned = true,
+               .exclude_idle = true,
+               .exclude_host = 1,
+               .exclude_user = exclude_user,
+               .exclude_kernel = exclude_kernel,
+               .config = config,
+       };
+
+       attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
+
+       event = perf_event_create_kernel_counter(&attr, -1, current,
+                                                intr ? kvm_perf_overflow_intr :
+                                                kvm_perf_overflow, pmc);
+       if (IS_ERR(event)) {
+               printk_once("kvm: pmu event creation failed %ld\n",
+                               PTR_ERR(event));
+               return;
+       }
+
+       pmc->perf_event = event;
+       clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
+}
+
+static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
+               u8 unit_mask)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(arch_events); i++)
+               if (arch_events[i].eventsel == event_select
+                               && arch_events[i].unit_mask == unit_mask
+                               && (pmu->available_event_types & (1 << i)))
+                       break;
+
+       if (i == ARRAY_SIZE(arch_events))
+               return PERF_COUNT_HW_MAX;
+
+       return arch_events[i].event_type;
+}
+
+static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+{
+       unsigned config, type = PERF_TYPE_RAW;
+       u8 event_select, unit_mask;
+
+       pmc->eventsel = eventsel;
+
+       stop_counter(pmc);
+
+       if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_enabled(pmc))
+               return;
+
+       event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
+       unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
+
+       if (!(event_select & (ARCH_PERFMON_EVENTSEL_EDGE |
+                               ARCH_PERFMON_EVENTSEL_INV |
+                               ARCH_PERFMON_EVENTSEL_CMASK))) {
+               config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
+                               unit_mask);
+               if (config != PERF_COUNT_HW_MAX)
+                       type = PERF_TYPE_HARDWARE;
+       }
+
+       if (type == PERF_TYPE_RAW)
+               config = eventsel & X86_RAW_EVENT_MASK;
+
+       reprogram_counter(pmc, type, config,
+                       !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
+                       !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
+                       eventsel & ARCH_PERFMON_EVENTSEL_INT);
+}
+
+static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
+{
+       unsigned en = en_pmi & 0x3;
+       bool pmi = en_pmi & 0x8;
+
+       stop_counter(pmc);
+
+       if (!en || !pmc_enabled(pmc))
+               return;
+
+       reprogram_counter(pmc, PERF_TYPE_HARDWARE,
+                       arch_events[fixed_pmc_events[idx]].event_type,
+                       !(en & 0x2), /* exclude user */
+                       !(en & 0x1), /* exclude kernel */
+                       pmi);
+}
+
+static inline u8 fixed_en_pmi(u64 ctrl, int idx)
+{
+       return (ctrl >> (idx * 4)) & 0xf;
+}
+
+static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
+{
+       int i;
+
+       for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
+               u8 en_pmi = fixed_en_pmi(data, i);
+               struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
+
+               if (fixed_en_pmi(pmu->fixed_ctr_ctrl, i) == en_pmi)
+                       continue;
+
+               reprogram_fixed_counter(pmc, en_pmi, i);
+       }
+
+       pmu->fixed_ctr_ctrl = data;
+}
+
+static void reprogram_idx(struct kvm_pmu *pmu, int idx)
+{
+       struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx);
+
+       if (!pmc)
+               return;
+
+       if (pmc_is_gp(pmc))
+               reprogram_gp_counter(pmc, pmc->eventsel);
+       else {
+               int fidx = idx - X86_PMC_IDX_FIXED;
+               reprogram_fixed_counter(pmc,
+                               fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
+       }
+}
+
+static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
+{
+       int bit;
+       u64 diff = pmu->global_ctrl ^ data;
+
+       pmu->global_ctrl = data;
+
+       for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
+               reprogram_idx(pmu, bit);
+}
+
+bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       int ret;
+
+       switch (msr) {
+       case MSR_CORE_PERF_FIXED_CTR_CTRL:
+       case MSR_CORE_PERF_GLOBAL_STATUS:
+       case MSR_CORE_PERF_GLOBAL_CTRL:
+       case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+               ret = pmu->version > 1;
+               break;
+       default:
+               ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
+                       || get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
+                       || get_fixed_pmc(pmu, msr);
+               break;
+       }
+       return ret;
+}
+
+int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       struct kvm_pmc *pmc;
+
+       switch (index) {
+       case MSR_CORE_PERF_FIXED_CTR_CTRL:
+               *data = pmu->fixed_ctr_ctrl;
+               return 0;
+       case MSR_CORE_PERF_GLOBAL_STATUS:
+               *data = pmu->global_status;
+               return 0;
+       case MSR_CORE_PERF_GLOBAL_CTRL:
+               *data = pmu->global_ctrl;
+               return 0;
+       case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+               *data = pmu->global_ovf_ctrl;
+               return 0;
+       default:
+               if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
+                               (pmc = get_fixed_pmc(pmu, index))) {
+                       *data = read_pmc(pmc);
+                       return 0;
+               } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
+                       *data = pmc->eventsel;
+                       return 0;
+               }
+       }
+       return 1;
+}
+
+int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       struct kvm_pmc *pmc;
+
+       switch (index) {
+       case MSR_CORE_PERF_FIXED_CTR_CTRL:
+               if (pmu->fixed_ctr_ctrl == data)
+                       return 0;
+               if (!(data & 0xfffffffffffff444)) {
+                       reprogram_fixed_counters(pmu, data);
+                       return 0;
+               }
+               break;
+       case MSR_CORE_PERF_GLOBAL_STATUS:
+               break; /* RO MSR */
+       case MSR_CORE_PERF_GLOBAL_CTRL:
+               if (pmu->global_ctrl == data)
+                       return 0;
+               if (!(data & pmu->global_ctrl_mask)) {
+                       global_ctrl_changed(pmu, data);
+                       return 0;
+               }
+               break;
+       case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+               if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
+                       pmu->global_status &= ~data;
+                       pmu->global_ovf_ctrl = data;
+                       return 0;
+               }
+               break;
+       default:
+               if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
+                               (pmc = get_fixed_pmc(pmu, index))) {
+                       data = (s64)(s32)data;
+                       pmc->counter += data - read_pmc(pmc);
+                       return 0;
+               } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
+                       if (data == pmc->eventsel)
+                               return 0;
+                       if (!(data & 0xffffffff00200000ull)) {
+                               reprogram_gp_counter(pmc, data);
+                               return 0;
+                       }
+               }
+       }
+       return 1;
+}
+
+int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       bool fast_mode = pmc & (1u << 31);
+       bool fixed = pmc & (1u << 30);
+       struct kvm_pmc *counters;
+       u64 ctr;
+
+       pmc &= (3u << 30) - 1;
+       if (!fixed && pmc >= pmu->nr_arch_gp_counters)
+               return 1;
+       if (fixed && pmc >= pmu->nr_arch_fixed_counters)
+               return 1;
+       counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
+       ctr = read_pmc(&counters[pmc]);
+       if (fast_mode)
+               ctr = (u32)ctr;
+       *data = ctr;
+
+       return 0;
+}
+
+void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       struct kvm_cpuid_entry2 *entry;
+       unsigned bitmap_len;
+
+       pmu->nr_arch_gp_counters = 0;
+       pmu->nr_arch_fixed_counters = 0;
+       pmu->counter_bitmask[KVM_PMC_GP] = 0;
+       pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
+       pmu->version = 0;
+
+       entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
+       if (!entry)
+               return;
+
+       pmu->version = entry->eax & 0xff;
+       if (!pmu->version)
+               return;
+
+       pmu->nr_arch_gp_counters = min((int)(entry->eax >> 8) & 0xff,
+                       X86_PMC_MAX_GENERIC);
+       pmu->counter_bitmask[KVM_PMC_GP] =
+               ((u64)1 << ((entry->eax >> 16) & 0xff)) - 1;
+       bitmap_len = (entry->eax >> 24) & 0xff;
+       pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1);
+
+       if (pmu->version == 1) {
+               pmu->global_ctrl = (1 << pmu->nr_arch_gp_counters) - 1;
+               return;
+       }
+
+       pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
+                       X86_PMC_MAX_FIXED);
+       pmu->counter_bitmask[KVM_PMC_FIXED] =
+               ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
+       pmu->global_ctrl_mask = ~(((1 << pmu->nr_arch_gp_counters) - 1)
+                       | (((1ull << pmu->nr_arch_fixed_counters) - 1)
+                               << X86_PMC_IDX_FIXED));
+}
+
+void kvm_pmu_init(struct kvm_vcpu *vcpu)
+{
+       int i;
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+
+       memset(pmu, 0, sizeof(*pmu));
+       for (i = 0; i < X86_PMC_MAX_GENERIC; i++) {
+               pmu->gp_counters[i].type = KVM_PMC_GP;
+               pmu->gp_counters[i].vcpu = vcpu;
+               pmu->gp_counters[i].idx = i;
+       }
+       for (i = 0; i < X86_PMC_MAX_FIXED; i++) {
+               pmu->fixed_counters[i].type = KVM_PMC_FIXED;
+               pmu->fixed_counters[i].vcpu = vcpu;
+               pmu->fixed_counters[i].idx = i + X86_PMC_IDX_FIXED;
+       }
+       init_irq_work(&pmu->irq_work, trigger_pmi);
+       kvm_pmu_cpuid_update(vcpu);
+}
+
+void kvm_pmu_reset(struct kvm_vcpu *vcpu)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       int i;
+
+       irq_work_sync(&pmu->irq_work);
+       for (i = 0; i < X86_PMC_MAX_GENERIC; i++) {
+               struct kvm_pmc *pmc = &pmu->gp_counters[i];
+               stop_counter(pmc);
+               pmc->counter = pmc->eventsel = 0;
+       }
+
+       for (i = 0; i < X86_PMC_MAX_FIXED; i++)
+               stop_counter(&pmu->fixed_counters[i]);
+
+       pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
+               pmu->global_ovf_ctrl = 0;
+}
+
+void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
+{
+       kvm_pmu_reset(vcpu);
+}
+
+void kvm_handle_pmu_event(struct kvm_vcpu *vcpu)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       u64 bitmask;
+       int bit;
+
+       bitmask = pmu->reprogram_pmi;
+
+       for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
+               struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
+
+               if (unlikely(!pmc || !pmc->perf_event)) {
+                       clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
+                       continue;
+               }
+
+               reprogram_idx(pmu, bit);
+       }
+}
index 0a646e2b57c56d2ad85f18961da09b9f9cf65c07..08ae951ecc5ca9ce2f027fe73841097fa52a0a5a 100644 (file)
@@ -1602,8 +1602,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
         * which we perfectly emulate ;-). Any other value should be at least
         * reported, some guests depend on them.
         */
-       case MSR_P6_EVNTSEL0:
-       case MSR_P6_EVNTSEL1:
        case MSR_K7_EVNTSEL0:
        case MSR_K7_EVNTSEL1:
        case MSR_K7_EVNTSEL2:
@@ -1615,8 +1613,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
        /* at least RHEL 4 unconditionally writes to the perfctr registers,
         * so we ignore writes to make it happy.
         */
-       case MSR_P6_PERFCTR0:
-       case MSR_P6_PERFCTR1:
        case MSR_K7_PERFCTR0:
        case MSR_K7_PERFCTR1:
        case MSR_K7_PERFCTR2:
@@ -1653,6 +1649,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
        default:
                if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
                        return xen_hvm_config(vcpu, data);
+               if (kvm_pmu_msr(vcpu, msr))
+                       return kvm_pmu_set_msr(vcpu, msr, data);
                if (!ignore_msrs) {
                        pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
                                msr, data);
@@ -1815,10 +1813,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        case MSR_K8_SYSCFG:
        case MSR_K7_HWCR:
        case MSR_VM_HSAVE_PA:
-       case MSR_P6_PERFCTR0:
-       case MSR_P6_PERFCTR1:
-       case MSR_P6_EVNTSEL0:
-       case MSR_P6_EVNTSEL1:
        case MSR_K7_EVNTSEL0:
        case MSR_K7_PERFCTR0:
        case MSR_K8_INT_PENDING_MSG:
@@ -1929,6 +1923,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
                data = 0xbe702111;
                break;
        default:
+               if (kvm_pmu_msr(vcpu, msr))
+                       return kvm_pmu_get_msr(vcpu, msr, pdata);
                if (!ignore_msrs) {
                        pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
                        return 1;
@@ -4650,7 +4646,7 @@ static void kvm_timer_init(void)
 
 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
 
-static int kvm_is_in_guest(void)
+int kvm_is_in_guest(void)
 {
        return __this_cpu_read(current_vcpu) != NULL;
 }
@@ -5114,6 +5110,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        process_nmi(vcpu);
                req_immediate_exit =
                        kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
+               if (kvm_check_request(KVM_REQ_PMU, vcpu))
+                       kvm_handle_pmu_event(vcpu);
+               if (kvm_check_request(KVM_REQ_PMI, vcpu))
+                       kvm_deliver_pmi(vcpu);
        }
 
        r = kvm_mmu_reload(vcpu);
@@ -5850,6 +5850,8 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
        kvm_async_pf_hash_reset(vcpu);
        vcpu->arch.apf.halted = false;
 
+       kvm_pmu_reset(vcpu);
+
        return kvm_x86_ops->vcpu_reset(vcpu);
 }
 
@@ -5934,6 +5936,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
                goto fail_free_mce_banks;
 
        kvm_async_pf_hash_reset(vcpu);
+       kvm_pmu_init(vcpu);
 
        return 0;
 fail_free_mce_banks:
@@ -5952,6 +5955,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 {
        int idx;
 
+       kvm_pmu_destroy(vcpu);
        kfree(vcpu->arch.mce_banks);
        kvm_free_lapic(vcpu);
        idx = srcu_read_lock(&vcpu->kvm->srcu);
index 7a080383935f3ae13925d2d49a0c4e08b7bcf2f1..900c76337e8f387b1dcf183260e43c1625536500 100644 (file)
@@ -52,6 +52,8 @@
 #define KVM_REQ_STEAL_UPDATE      13
 #define KVM_REQ_NMI               14
 #define KVM_REQ_IMMEDIATE_EXIT    15
+#define KVM_REQ_PMU               16
+#define KVM_REQ_PMI               17
 
 #define KVM_USERSPACE_IRQ_SOURCE_ID    0