x86/xen: Add Hygon Dhyana support to Xen
authorPu Wen <puwen@hygon.cn>
Sun, 23 Sep 2018 09:36:46 +0000 (17:36 +0800)
committerBorislav Petkov <bp@suse.de>
Thu, 27 Sep 2018 16:28:59 +0000 (18:28 +0200)
To make Xen work on the Hygon platform, reuse AMD's Xen support code
path for Hygon Dhyana CPU.

There are six core performance events counters per thread, so there are
six MSRs for these counters. Also there are four legacy PMC MSRs, they
are aliases of the counters.

In this version, use the legacy and safe version of MSR access. Tested
successfully with VPMU enabled in Xen on Hygon platform by testing with
perf.

Signed-off-by: Pu Wen <puwen@hygon.cn>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: jgross@suse.com
Cc: tglx@linutronix.de
Cc: mingo@redhat.com
Cc: hpa@zytor.com
Cc: x86@kernel.org
Cc: thomas.lendacky@amd.com
Cc: xen-devel@lists.xenproject.org
Link: https://lkml.kernel.org/r/311bf41f08f24550aa6c5da3f1e03a68d3b89dac.1537533369.git.puwen@hygon.cn
arch/x86/xen/pmu.c

index 7d00d4ad44d44d62f9a6a089a2c8d4a649e6aa08..9403854cde317b45a890f34e5763bd86e3b832f4 100644 (file)
@@ -90,6 +90,12 @@ static void xen_pmu_arch_init(void)
                        k7_counters_mirrored = 0;
                        break;
                }
+       } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
+               amd_num_counters = F10H_NUM_COUNTERS;
+               amd_counters_base = MSR_K7_PERFCTR0;
+               amd_ctrls_base = MSR_K7_EVNTSEL0;
+               amd_msr_step = 1;
+               k7_counters_mirrored = 0;
        } else {
                uint32_t eax, ebx, ecx, edx;
 
@@ -285,7 +291,7 @@ static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read)
 
 bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err)
 {
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
                if (is_amd_pmu_msr(msr)) {
                        if (!xen_amd_pmu_emulate(msr, val, 1))
                                *val = native_read_msr_safe(msr, err);
@@ -308,7 +314,7 @@ bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
 {
        uint64_t val = ((uint64_t)high << 32) | low;
 
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
                if (is_amd_pmu_msr(msr)) {
                        if (!xen_amd_pmu_emulate(msr, &val, 0))
                                *err = native_write_msr_safe(msr, low, high);
@@ -379,7 +385,7 @@ static unsigned long long xen_intel_read_pmc(int counter)
 
 unsigned long long xen_read_pmc(int counter)
 {
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
                return xen_amd_read_pmc(counter);
        else
                return xen_intel_read_pmc(counter);