RISC-V: KVM: Add skeleton support for perf
authorAtish Patra <atishp@rivosinc.com>
Tue, 7 Feb 2023 09:55:22 +0000 (01:55 -0800)
committerAnup Patel <anup@brainfault.org>
Tue, 7 Feb 2023 15:05:51 +0000 (20:35 +0530)
This patch only adds barebone structure of perf implementation. Most
of the function returns zero at this point and will be implemented
fully in the future.

Reviewed-by: Anup Patel <anup@brainfault.org>
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Signed-off-by: Atish Patra <atishp@rivosinc.com>
Signed-off-by: Anup Patel <anup@brainfault.org>
arch/riscv/include/asm/kvm_host.h
arch/riscv/include/asm/kvm_vcpu_pmu.h [new file with mode: 0644]
arch/riscv/kvm/Makefile
arch/riscv/kvm/vcpu.c
arch/riscv/kvm/vcpu_pmu.c [new file with mode: 0644]

index 93f43a3e7886538d882c12accf04c9738c3b1985..b90be9a2cc24555ba21d38f62e0c85b590f9ddbe 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/kvm_vcpu_insn.h>
 #include <asm/kvm_vcpu_sbi.h>
 #include <asm/kvm_vcpu_timer.h>
+#include <asm/kvm_vcpu_pmu.h>
 
 #define KVM_MAX_VCPUS                  1024
 
@@ -228,6 +229,9 @@ struct kvm_vcpu_arch {
 
        /* Don't run the VCPU (blocked) */
        bool pause;
+
+       /* Performance monitoring context */
+       struct kvm_pmu pmu_context;
 };
 
 static inline void kvm_arch_hardware_unsetup(void) {}
diff --git a/arch/riscv/include/asm/kvm_vcpu_pmu.h b/arch/riscv/include/asm/kvm_vcpu_pmu.h
new file mode 100644 (file)
index 0000000..c4cd5cd
--- /dev/null
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Rivos Inc
+ *
+ * Authors:
+ *     Atish Patra <atishp@rivosinc.com>
+ */
+
+#ifndef __KVM_VCPU_RISCV_PMU_H
+#define __KVM_VCPU_RISCV_PMU_H
+
+#include <linux/perf/riscv_pmu.h>
+#include <asm/sbi.h>
+
+#ifdef CONFIG_RISCV_PMU_SBI
+#define RISCV_KVM_MAX_FW_CTRS  32
+#define RISCV_KVM_MAX_HW_CTRS  32
+#define RISCV_KVM_MAX_COUNTERS (RISCV_KVM_MAX_HW_CTRS + RISCV_KVM_MAX_FW_CTRS)
+static_assert(RISCV_KVM_MAX_COUNTERS <= 64);
+
+/* Per virtual pmu counter data */
+struct kvm_pmc {
+       u8 idx;
+       struct perf_event *perf_event;
+       u64 counter_val;
+       union sbi_pmu_ctr_info cinfo;
+       /* Event monitoring status */
+       bool started;
+};
+
+/* PMU data structure per vcpu */
+struct kvm_pmu {
+       struct kvm_pmc pmc[RISCV_KVM_MAX_COUNTERS];
+       /* Number of the virtual firmware counters available */
+       int num_fw_ctrs;
+       /* Number of the virtual hardware counters available */
+       int num_hw_ctrs;
+       /* A flag to indicate that pmu initialization is done */
+       bool init_done;
+       /* Bit map of all the virtual counter used */
+       DECLARE_BITMAP(pmc_in_use, RISCV_KVM_MAX_COUNTERS);
+};
+
+#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context)
+#define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu_context))
+
+int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx,
+                               struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
+                                unsigned long ctr_mask, unsigned long flags, u64 ival,
+                                struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
+                               unsigned long ctr_mask, unsigned long flags,
+                               struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base,
+                                    unsigned long ctr_mask, unsigned long flags,
+                                    unsigned long eidx, u64 evtdata,
+                                    struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
+                               struct kvm_vcpu_sbi_return *retdata);
+void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
+
+#else
+struct kvm_pmu {
+};
+
+static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
+static inline void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) {}
+static inline void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) {}
+#endif /* CONFIG_RISCV_PMU_SBI */
+#endif /* !__KVM_VCPU_RISCV_PMU_H */
index 019df9208bdd8dcf7bf61ad6e6d97762ac95a1c9..5de1053a5ce45f2d9e8078038f45f0c1ab46bd3f 100644 (file)
@@ -25,3 +25,4 @@ kvm-y += vcpu_sbi_base.o
 kvm-y += vcpu_sbi_replace.o
 kvm-y += vcpu_sbi_hsm.o
 kvm-y += vcpu_timer.o
+kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_pmu.o
index 7c08567097f0a875ebe11afa00b8f29f372e130e..7d010b0be54e1364a714c2b712b64c067c947c11 100644 (file)
@@ -138,6 +138,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
        WRITE_ONCE(vcpu->arch.irqs_pending, 0);
        WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
 
+       kvm_riscv_vcpu_pmu_reset(vcpu);
+
        vcpu->arch.hfence_head = 0;
        vcpu->arch.hfence_tail = 0;
        memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue));
@@ -194,6 +196,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
        /* Setup VCPU timer */
        kvm_riscv_vcpu_timer_init(vcpu);
 
+       /* setup performance monitoring */
+       kvm_riscv_vcpu_pmu_init(vcpu);
+
        /* Reset VCPU */
        kvm_riscv_reset_vcpu(vcpu);
 
@@ -216,6 +221,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
        /* Cleanup VCPU timer */
        kvm_riscv_vcpu_timer_deinit(vcpu);
 
+       kvm_riscv_vcpu_pmu_deinit(vcpu);
+
        /* Free unused pages pre-allocated for G-stage page table mappings */
        kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
 }
diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c
new file mode 100644 (file)
index 0000000..dc3c118
--- /dev/null
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 Rivos Inc
+ *
+ * Authors:
+ *     Atish Patra <atishp@rivosinc.com>
+ */
+
+#define pr_fmt(fmt)    "riscv-kvm-pmu: " fmt
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/perf/riscv_pmu.h>
+#include <asm/csr.h>
+#include <asm/kvm_vcpu_sbi.h>
+#include <asm/kvm_vcpu_pmu.h>
+
+#define kvm_pmu_num_counters(pmu) ((pmu)->num_hw_ctrs + (pmu)->num_fw_ctrs)
+
+int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu,
+                               struct kvm_vcpu_sbi_return *retdata)
+{
+       struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
+
+       retdata->out_val = kvm_pmu_num_counters(kvpmu);
+
+       return 0;
+}
+
+int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx,
+                               struct kvm_vcpu_sbi_return *retdata)
+{
+       struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
+
+       if (cidx > RISCV_KVM_MAX_COUNTERS || cidx == 1) {
+               retdata->err_val = SBI_ERR_INVALID_PARAM;
+               return 0;
+       }
+
+       retdata->out_val = kvpmu->pmc[cidx].cinfo.value;
+
+       return 0;
+}
+
+int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
+                                unsigned long ctr_mask, unsigned long flags, u64 ival,
+                                struct kvm_vcpu_sbi_return *retdata)
+{
+       /* TODO */
+       return 0;
+}
+
+int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
+                               unsigned long ctr_mask, unsigned long flags,
+                               struct kvm_vcpu_sbi_return *retdata)
+{
+       /* TODO */
+       return 0;
+}
+
+int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base,
+                                    unsigned long ctr_mask, unsigned long flags,
+                                    unsigned long eidx, u64 evtdata,
+                                    struct kvm_vcpu_sbi_return *retdata)
+{
+       /* TODO */
+       return 0;
+}
+
+int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
+                               struct kvm_vcpu_sbi_return *retdata)
+{
+       /* TODO */
+       return 0;
+}
+
+void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu)
+{
+       int i = 0, ret, num_hw_ctrs = 0, hpm_width = 0;
+       struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
+       struct kvm_pmc *pmc;
+
+       ret = riscv_pmu_get_hpm_info(&hpm_width, &num_hw_ctrs);
+       if (ret < 0 || !hpm_width || !num_hw_ctrs)
+               return;
+
+       /*
+        * Increase the number of hardware counters to offset the time counter.
+        */
+       kvpmu->num_hw_ctrs = num_hw_ctrs + 1;
+       kvpmu->num_fw_ctrs = SBI_PMU_FW_MAX;
+
+       if (kvpmu->num_hw_ctrs > RISCV_KVM_MAX_HW_CTRS) {
+               pr_warn_once("Limiting the hardware counters to 32 as specified by the ISA");
+               kvpmu->num_hw_ctrs = RISCV_KVM_MAX_HW_CTRS;
+       }
+
+       /*
+        * There is no correlation between the logical hardware counter and virtual counters.
+        * However, we need to encode a hpmcounter CSR in the counter info field so that
+        * KVM can trap n emulate the read. This works well in the migration use case as
+        * KVM doesn't care if the actual hpmcounter is available in the hardware or not.
+        */
+       for (i = 0; i < kvm_pmu_num_counters(kvpmu); i++) {
+               /* TIME CSR shouldn't be read from perf interface */
+               if (i == 1)
+                       continue;
+               pmc = &kvpmu->pmc[i];
+               pmc->idx = i;
+               if (i < kvpmu->num_hw_ctrs) {
+                       pmc->cinfo.type = SBI_PMU_CTR_TYPE_HW;
+                       if (i < 3)
+                               /* CY, IR counters */
+                               pmc->cinfo.width = 63;
+                       else
+                               pmc->cinfo.width = hpm_width;
+                       /*
+                        * The CSR number doesn't have any relation with the logical
+                        * hardware counters. The CSR numbers are encoded sequentially
+                        * to avoid maintaining a map between the virtual counter
+                        * and CSR number.
+                        */
+                       pmc->cinfo.csr = CSR_CYCLE + i;
+               } else {
+                       pmc->cinfo.type = SBI_PMU_CTR_TYPE_FW;
+                       pmc->cinfo.width = BITS_PER_LONG - 1;
+               }
+       }
+
+       kvpmu->init_done = true;
+}
+
+void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu)
+{
+       /* TODO */
+}
+
+void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu)
+{
+       kvm_riscv_vcpu_pmu_deinit(vcpu);
+}