Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / arch / x86 / kvm / pmu_amd.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM PMU support for AMD
4  *
5  * Copyright 2015, Red Hat, Inc. and/or its affiliates.
6  *
7  * Author:
8  *   Wei Huang <wei@redhat.com>
9  *
10  * Implementation is based on pmu_intel.c file
11  */
12 #include <linux/types.h>
13 #include <linux/kvm_host.h>
14 #include <linux/perf_event.h>
15 #include "x86.h"
16 #include "cpuid.h"
17 #include "lapic.h"
18 #include "pmu.h"
19
20 enum pmu_type {
21         PMU_TYPE_COUNTER = 0,
22         PMU_TYPE_EVNTSEL,
23 };
24
25 enum index {
26         INDEX_ZERO = 0,
27         INDEX_ONE,
28         INDEX_TWO,
29         INDEX_THREE,
30         INDEX_FOUR,
31         INDEX_FIVE,
32         INDEX_ERROR,
33 };
34
35 /* duplicated from amd_perfmon_event_map, K7 and above should work. */
36 static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
37         [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
38         [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
39         [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
40         [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
41         [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
42         [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
43         [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
44         [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
45 };
46
47 static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
48 {
49         struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
50
51         if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
52                 if (type == PMU_TYPE_COUNTER)
53                         return MSR_F15H_PERF_CTR;
54                 else
55                         return MSR_F15H_PERF_CTL;
56         } else {
57                 if (type == PMU_TYPE_COUNTER)
58                         return MSR_K7_PERFCTR0;
59                 else
60                         return MSR_K7_EVNTSEL0;
61         }
62 }
63
64 static enum index msr_to_index(u32 msr)
65 {
66         switch (msr) {
67         case MSR_F15H_PERF_CTL0:
68         case MSR_F15H_PERF_CTR0:
69         case MSR_K7_EVNTSEL0:
70         case MSR_K7_PERFCTR0:
71                 return INDEX_ZERO;
72         case MSR_F15H_PERF_CTL1:
73         case MSR_F15H_PERF_CTR1:
74         case MSR_K7_EVNTSEL1:
75         case MSR_K7_PERFCTR1:
76                 return INDEX_ONE;
77         case MSR_F15H_PERF_CTL2:
78         case MSR_F15H_PERF_CTR2:
79         case MSR_K7_EVNTSEL2:
80         case MSR_K7_PERFCTR2:
81                 return INDEX_TWO;
82         case MSR_F15H_PERF_CTL3:
83         case MSR_F15H_PERF_CTR3:
84         case MSR_K7_EVNTSEL3:
85         case MSR_K7_PERFCTR3:
86                 return INDEX_THREE;
87         case MSR_F15H_PERF_CTL4:
88         case MSR_F15H_PERF_CTR4:
89                 return INDEX_FOUR;
90         case MSR_F15H_PERF_CTL5:
91         case MSR_F15H_PERF_CTR5:
92                 return INDEX_FIVE;
93         default:
94                 return INDEX_ERROR;
95         }
96 }
97
98 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
99                                              enum pmu_type type)
100 {
101         switch (msr) {
102         case MSR_F15H_PERF_CTL0:
103         case MSR_F15H_PERF_CTL1:
104         case MSR_F15H_PERF_CTL2:
105         case MSR_F15H_PERF_CTL3:
106         case MSR_F15H_PERF_CTL4:
107         case MSR_F15H_PERF_CTL5:
108         case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
109                 if (type != PMU_TYPE_EVNTSEL)
110                         return NULL;
111                 break;
112         case MSR_F15H_PERF_CTR0:
113         case MSR_F15H_PERF_CTR1:
114         case MSR_F15H_PERF_CTR2:
115         case MSR_F15H_PERF_CTR3:
116         case MSR_F15H_PERF_CTR4:
117         case MSR_F15H_PERF_CTR5:
118         case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
119                 if (type != PMU_TYPE_COUNTER)
120                         return NULL;
121                 break;
122         default:
123                 return NULL;
124         }
125
126         return &pmu->gp_counters[msr_to_index(msr)];
127 }
128
129 static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
130                                     u8 event_select,
131                                     u8 unit_mask)
132 {
133         int i;
134
135         for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
136                 if (amd_event_mapping[i].eventsel == event_select
137                     && amd_event_mapping[i].unit_mask == unit_mask)
138                         break;
139
140         if (i == ARRAY_SIZE(amd_event_mapping))
141                 return PERF_COUNT_HW_MAX;
142
143         return amd_event_mapping[i].event_type;
144 }
145
146 /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
147 static unsigned amd_find_fixed_event(int idx)
148 {
149         return PERF_COUNT_HW_MAX;
150 }
151
152 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
153  * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
154  */
155 static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
156 {
157         return true;
158 }
159
160 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
161 {
162         unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
163         struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
164
165         if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
166                 /*
167                  * The idx is contiguous. The MSRs are not. The counter MSRs
168                  * are interleaved with the event select MSRs.
169                  */
170                 pmc_idx *= 2;
171         }
172
173         return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
174 }
175
176 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
177 static int amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
178 {
179         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
180
181         idx &= ~(3u << 30);
182
183         return (idx >= pmu->nr_arch_gp_counters);
184 }
185
186 /* idx is the ECX register of RDPMC instruction */
187 static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
188         unsigned int idx, u64 *mask)
189 {
190         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
191         struct kvm_pmc *counters;
192
193         idx &= ~(3u << 30);
194         if (idx >= pmu->nr_arch_gp_counters)
195                 return NULL;
196         counters = pmu->gp_counters;
197
198         return &counters[idx];
199 }
200
201 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
202 {
203         /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough.  */
204         return false;
205 }
206
207 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
208 {
209         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
210         struct kvm_pmc *pmc;
211
212         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
213         pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
214
215         return pmc;
216 }
217
218 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
219 {
220         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
221         struct kvm_pmc *pmc;
222
223         /* MSR_PERFCTRn */
224         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
225         if (pmc) {
226                 *data = pmc_read_counter(pmc);
227                 return 0;
228         }
229         /* MSR_EVNTSELn */
230         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
231         if (pmc) {
232                 *data = pmc->eventsel;
233                 return 0;
234         }
235
236         return 1;
237 }
238
239 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
240 {
241         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
242         struct kvm_pmc *pmc;
243         u32 msr = msr_info->index;
244         u64 data = msr_info->data;
245
246         /* MSR_PERFCTRn */
247         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
248         if (pmc) {
249                 pmc->counter += data - pmc_read_counter(pmc);
250                 return 0;
251         }
252         /* MSR_EVNTSELn */
253         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
254         if (pmc) {
255                 if (data == pmc->eventsel)
256                         return 0;
257                 if (!(data & pmu->reserved_bits)) {
258                         reprogram_gp_counter(pmc, data);
259                         return 0;
260                 }
261         }
262
263         return 1;
264 }
265
266 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
267 {
268         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
269
270         if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
271                 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
272         else
273                 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
274
275         pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
276         pmu->reserved_bits = 0xffffffff00200000ull;
277         pmu->version = 1;
278         /* not applicable to AMD; but clean them to prevent any fall out */
279         pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
280         pmu->nr_arch_fixed_counters = 0;
281         pmu->global_status = 0;
282         bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
283 }
284
285 static void amd_pmu_init(struct kvm_vcpu *vcpu)
286 {
287         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
288         int i;
289
290         BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
291
292         for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
293                 pmu->gp_counters[i].type = KVM_PMC_GP;
294                 pmu->gp_counters[i].vcpu = vcpu;
295                 pmu->gp_counters[i].idx = i;
296                 pmu->gp_counters[i].current_config = 0;
297         }
298 }
299
300 static void amd_pmu_reset(struct kvm_vcpu *vcpu)
301 {
302         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
303         int i;
304
305         for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
306                 struct kvm_pmc *pmc = &pmu->gp_counters[i];
307
308                 pmc_stop_counter(pmc);
309                 pmc->counter = pmc->eventsel = 0;
310         }
311 }
312
313 struct kvm_pmu_ops amd_pmu_ops = {
314         .find_arch_event = amd_find_arch_event,
315         .find_fixed_event = amd_find_fixed_event,
316         .pmc_is_enabled = amd_pmc_is_enabled,
317         .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
318         .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
319         .msr_idx_to_pmc = amd_msr_idx_to_pmc,
320         .is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
321         .is_valid_msr = amd_is_valid_msr,
322         .get_msr = amd_pmu_get_msr,
323         .set_msr = amd_pmu_set_msr,
324         .refresh = amd_pmu_refresh,
325         .init = amd_pmu_init,
326         .reset = amd_pmu_reset,
327 };