Merge tag 'pmdomain-v6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/linux-pm
[linux-block.git] / arch / x86 / kvm / vmx / pmu_intel.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM PMU support for Intel CPUs
4  *
5  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
6  *
7  * Authors:
8  *   Avi Kivity   <avi@redhat.com>
9  *   Gleb Natapov <gleb@redhat.com>
10  */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/types.h>
14 #include <linux/kvm_host.h>
15 #include <linux/perf_event.h>
16 #include <asm/perf_event.h>
17 #include "x86.h"
18 #include "cpuid.h"
19 #include "lapic.h"
20 #include "nested.h"
21 #include "pmu.h"
22
23 #define MSR_PMC_FULL_WIDTH_BIT      (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
24
25 enum intel_pmu_architectural_events {
26         /*
27          * The order of the architectural events matters as support for each
28          * event is enumerated via CPUID using the index of the event.
29          */
30         INTEL_ARCH_CPU_CYCLES,
31         INTEL_ARCH_INSTRUCTIONS_RETIRED,
32         INTEL_ARCH_REFERENCE_CYCLES,
33         INTEL_ARCH_LLC_REFERENCES,
34         INTEL_ARCH_LLC_MISSES,
35         INTEL_ARCH_BRANCHES_RETIRED,
36         INTEL_ARCH_BRANCHES_MISPREDICTED,
37
38         NR_REAL_INTEL_ARCH_EVENTS,
39
40         /*
41          * Pseudo-architectural event used to implement IA32_FIXED_CTR2, a.k.a.
42          * TSC reference cycles.  The architectural reference cycles event may
43          * or may not actually use the TSC as the reference, e.g. might use the
44          * core crystal clock or the bus clock (yeah, "architectural").
45          */
46         PSEUDO_ARCH_REFERENCE_CYCLES = NR_REAL_INTEL_ARCH_EVENTS,
47         NR_INTEL_ARCH_EVENTS,
48 };
49
50 static struct {
51         u8 eventsel;
52         u8 unit_mask;
53 } const intel_arch_events[] = {
54         [INTEL_ARCH_CPU_CYCLES]                 = { 0x3c, 0x00 },
55         [INTEL_ARCH_INSTRUCTIONS_RETIRED]       = { 0xc0, 0x00 },
56         [INTEL_ARCH_REFERENCE_CYCLES]           = { 0x3c, 0x01 },
57         [INTEL_ARCH_LLC_REFERENCES]             = { 0x2e, 0x4f },
58         [INTEL_ARCH_LLC_MISSES]                 = { 0x2e, 0x41 },
59         [INTEL_ARCH_BRANCHES_RETIRED]           = { 0xc4, 0x00 },
60         [INTEL_ARCH_BRANCHES_MISPREDICTED]      = { 0xc5, 0x00 },
61         [PSEUDO_ARCH_REFERENCE_CYCLES]          = { 0x00, 0x03 },
62 };
63
64 /* mapping between fixed pmc index and intel_arch_events array */
65 static int fixed_pmc_events[] = {
66         [0] = INTEL_ARCH_INSTRUCTIONS_RETIRED,
67         [1] = INTEL_ARCH_CPU_CYCLES,
68         [2] = PSEUDO_ARCH_REFERENCE_CYCLES,
69 };
70
71 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
72 {
73         struct kvm_pmc *pmc;
74         u8 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
75         int i;
76
77         pmu->fixed_ctr_ctrl = data;
78         for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
79                 u8 new_ctrl = fixed_ctrl_field(data, i);
80                 u8 old_ctrl = fixed_ctrl_field(old_fixed_ctr_ctrl, i);
81
82                 if (old_ctrl == new_ctrl)
83                         continue;
84
85                 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
86
87                 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
88                 kvm_pmu_request_counter_reprogram(pmc);
89         }
90 }
91
92 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
93 {
94         if (pmc_idx < INTEL_PMC_IDX_FIXED) {
95                 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
96                                   MSR_P6_EVNTSEL0);
97         } else {
98                 u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
99
100                 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
101         }
102 }
103
104 static bool intel_hw_event_available(struct kvm_pmc *pmc)
105 {
106         struct kvm_pmu *pmu = pmc_to_pmu(pmc);
107         u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
108         u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
109         int i;
110
111         BUILD_BUG_ON(ARRAY_SIZE(intel_arch_events) != NR_INTEL_ARCH_EVENTS);
112
113         /*
114          * Disallow events reported as unavailable in guest CPUID.  Note, this
115          * doesn't apply to pseudo-architectural events.
116          */
117         for (i = 0; i < NR_REAL_INTEL_ARCH_EVENTS; i++) {
118                 if (intel_arch_events[i].eventsel != event_select ||
119                     intel_arch_events[i].unit_mask != unit_mask)
120                         continue;
121
122                 return pmu->available_event_types & BIT(i);
123         }
124
125         return true;
126 }
127
128 static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
129 {
130         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
131         bool fixed = idx & (1u << 30);
132
133         idx &= ~(3u << 30);
134
135         return fixed ? idx < pmu->nr_arch_fixed_counters
136                      : idx < pmu->nr_arch_gp_counters;
137 }
138
139 static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
140                                             unsigned int idx, u64 *mask)
141 {
142         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
143         bool fixed = idx & (1u << 30);
144         struct kvm_pmc *counters;
145         unsigned int num_counters;
146
147         idx &= ~(3u << 30);
148         if (fixed) {
149                 counters = pmu->fixed_counters;
150                 num_counters = pmu->nr_arch_fixed_counters;
151         } else {
152                 counters = pmu->gp_counters;
153                 num_counters = pmu->nr_arch_gp_counters;
154         }
155         if (idx >= num_counters)
156                 return NULL;
157         *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
158         return &counters[array_index_nospec(idx, num_counters)];
159 }
160
161 static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
162 {
163         if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
164                 return 0;
165
166         return vcpu->arch.perf_capabilities;
167 }
168
169 static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
170 {
171         return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0;
172 }
173
174 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
175 {
176         if (!fw_writes_is_enabled(pmu_to_vcpu(pmu)))
177                 return NULL;
178
179         return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
180 }
181
182 static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
183 {
184         struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
185         bool ret = false;
186
187         if (!intel_pmu_lbr_is_enabled(vcpu))
188                 return ret;
189
190         ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) ||
191                 (index >= records->from && index < records->from + records->nr) ||
192                 (index >= records->to && index < records->to + records->nr);
193
194         if (!ret && records->info)
195                 ret = (index >= records->info && index < records->info + records->nr);
196
197         return ret;
198 }
199
200 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
201 {
202         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
203         u64 perf_capabilities;
204         int ret;
205
206         switch (msr) {
207         case MSR_CORE_PERF_FIXED_CTR_CTRL:
208                 return kvm_pmu_has_perf_global_ctrl(pmu);
209         case MSR_IA32_PEBS_ENABLE:
210                 ret = vcpu_get_perf_capabilities(vcpu) & PERF_CAP_PEBS_FORMAT;
211                 break;
212         case MSR_IA32_DS_AREA:
213                 ret = guest_cpuid_has(vcpu, X86_FEATURE_DS);
214                 break;
215         case MSR_PEBS_DATA_CFG:
216                 perf_capabilities = vcpu_get_perf_capabilities(vcpu);
217                 ret = (perf_capabilities & PERF_CAP_PEBS_BASELINE) &&
218                         ((perf_capabilities & PERF_CAP_PEBS_FORMAT) > 3);
219                 break;
220         default:
221                 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
222                         get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
223                         get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) ||
224                         intel_pmu_is_valid_lbr_msr(vcpu, msr);
225                 break;
226         }
227
228         return ret;
229 }
230
231 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
232 {
233         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
234         struct kvm_pmc *pmc;
235
236         pmc = get_fixed_pmc(pmu, msr);
237         pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
238         pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
239
240         return pmc;
241 }
242
243 static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu)
244 {
245         struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
246
247         if (lbr_desc->event) {
248                 perf_event_release_kernel(lbr_desc->event);
249                 lbr_desc->event = NULL;
250                 vcpu_to_pmu(vcpu)->event_count--;
251         }
252 }
253
254 int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
255 {
256         struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
257         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
258         struct perf_event *event;
259
260         /*
261          * The perf_event_attr is constructed in the minimum efficient way:
262          * - set 'pinned = true' to make it task pinned so that if another
263          *   cpu pinned event reclaims LBR, the event->oncpu will be set to -1;
264          * - set '.exclude_host = true' to record guest branches behavior;
265          *
266          * - set '.config = INTEL_FIXED_VLBR_EVENT' to indicates host perf
267          *   schedule the event without a real HW counter but a fake one;
268          *   check is_guest_lbr_event() and __intel_get_event_constraints();
269          *
270          * - set 'sample_type = PERF_SAMPLE_BRANCH_STACK' and
271          *   'branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
272          *   PERF_SAMPLE_BRANCH_USER' to configure it as a LBR callstack
273          *   event, which helps KVM to save/restore guest LBR records
274          *   during host context switches and reduces quite a lot overhead,
275          *   check branch_user_callstack() and intel_pmu_lbr_sched_task();
276          */
277         struct perf_event_attr attr = {
278                 .type = PERF_TYPE_RAW,
279                 .size = sizeof(attr),
280                 .config = INTEL_FIXED_VLBR_EVENT,
281                 .sample_type = PERF_SAMPLE_BRANCH_STACK,
282                 .pinned = true,
283                 .exclude_host = true,
284                 .branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
285                                         PERF_SAMPLE_BRANCH_USER,
286         };
287
288         if (unlikely(lbr_desc->event)) {
289                 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
290                 return 0;
291         }
292
293         event = perf_event_create_kernel_counter(&attr, -1,
294                                                 current, NULL, NULL);
295         if (IS_ERR(event)) {
296                 pr_debug_ratelimited("%s: failed %ld\n",
297                                         __func__, PTR_ERR(event));
298                 return PTR_ERR(event);
299         }
300         lbr_desc->event = event;
301         pmu->event_count++;
302         __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
303         return 0;
304 }
305
306 /*
307  * It's safe to access LBR msrs from guest when they have not
308  * been passthrough since the host would help restore or reset
309  * the LBR msrs records when the guest LBR event is scheduled in.
310  */
311 static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu,
312                                      struct msr_data *msr_info, bool read)
313 {
314         struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
315         u32 index = msr_info->index;
316
317         if (!intel_pmu_is_valid_lbr_msr(vcpu, index))
318                 return false;
319
320         if (!lbr_desc->event && intel_pmu_create_guest_lbr_event(vcpu) < 0)
321                 goto dummy;
322
323         /*
324          * Disable irq to ensure the LBR feature doesn't get reclaimed by the
325          * host at the time the value is read from the msr, and this avoids the
326          * host LBR value to be leaked to the guest. If LBR has been reclaimed,
327          * return 0 on guest reads.
328          */
329         local_irq_disable();
330         if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) {
331                 if (read)
332                         rdmsrl(index, msr_info->data);
333                 else
334                         wrmsrl(index, msr_info->data);
335                 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
336                 local_irq_enable();
337                 return true;
338         }
339         clear_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
340         local_irq_enable();
341
342 dummy:
343         if (read)
344                 msr_info->data = 0;
345         return true;
346 }
347
348 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
349 {
350         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
351         struct kvm_pmc *pmc;
352         u32 msr = msr_info->index;
353
354         switch (msr) {
355         case MSR_CORE_PERF_FIXED_CTR_CTRL:
356                 msr_info->data = pmu->fixed_ctr_ctrl;
357                 break;
358         case MSR_IA32_PEBS_ENABLE:
359                 msr_info->data = pmu->pebs_enable;
360                 break;
361         case MSR_IA32_DS_AREA:
362                 msr_info->data = pmu->ds_area;
363                 break;
364         case MSR_PEBS_DATA_CFG:
365                 msr_info->data = pmu->pebs_data_cfg;
366                 break;
367         default:
368                 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
369                     (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
370                         u64 val = pmc_read_counter(pmc);
371                         msr_info->data =
372                                 val & pmu->counter_bitmask[KVM_PMC_GP];
373                         break;
374                 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
375                         u64 val = pmc_read_counter(pmc);
376                         msr_info->data =
377                                 val & pmu->counter_bitmask[KVM_PMC_FIXED];
378                         break;
379                 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
380                         msr_info->data = pmc->eventsel;
381                         break;
382                 } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true)) {
383                         break;
384                 }
385                 return 1;
386         }
387
388         return 0;
389 }
390
391 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
392 {
393         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
394         struct kvm_pmc *pmc;
395         u32 msr = msr_info->index;
396         u64 data = msr_info->data;
397         u64 reserved_bits, diff;
398
399         switch (msr) {
400         case MSR_CORE_PERF_FIXED_CTR_CTRL:
401                 if (data & pmu->fixed_ctr_ctrl_mask)
402                         return 1;
403
404                 if (pmu->fixed_ctr_ctrl != data)
405                         reprogram_fixed_counters(pmu, data);
406                 break;
407         case MSR_IA32_PEBS_ENABLE:
408                 if (data & pmu->pebs_enable_mask)
409                         return 1;
410
411                 if (pmu->pebs_enable != data) {
412                         diff = pmu->pebs_enable ^ data;
413                         pmu->pebs_enable = data;
414                         reprogram_counters(pmu, diff);
415                 }
416                 break;
417         case MSR_IA32_DS_AREA:
418                 if (is_noncanonical_address(data, vcpu))
419                         return 1;
420
421                 pmu->ds_area = data;
422                 break;
423         case MSR_PEBS_DATA_CFG:
424                 if (data & pmu->pebs_data_cfg_mask)
425                         return 1;
426
427                 pmu->pebs_data_cfg = data;
428                 break;
429         default:
430                 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
431                     (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
432                         if ((msr & MSR_PMC_FULL_WIDTH_BIT) &&
433                             (data & ~pmu->counter_bitmask[KVM_PMC_GP]))
434                                 return 1;
435
436                         if (!msr_info->host_initiated &&
437                             !(msr & MSR_PMC_FULL_WIDTH_BIT))
438                                 data = (s64)(s32)data;
439                         pmc_write_counter(pmc, data);
440                         pmc_update_sample_period(pmc);
441                         break;
442                 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
443                         pmc_write_counter(pmc, data);
444                         pmc_update_sample_period(pmc);
445                         break;
446                 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
447                         reserved_bits = pmu->reserved_bits;
448                         if ((pmc->idx == 2) &&
449                             (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
450                                 reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
451                         if (data & reserved_bits)
452                                 return 1;
453
454                         if (data != pmc->eventsel) {
455                                 pmc->eventsel = data;
456                                 kvm_pmu_request_counter_reprogram(pmc);
457                         }
458                         break;
459                 } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) {
460                         break;
461                 }
462                 /* Not a known PMU MSR. */
463                 return 1;
464         }
465
466         return 0;
467 }
468
469 static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)
470 {
471         int i;
472
473         BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_events) != KVM_PMC_MAX_FIXED);
474
475         for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
476                 int index = array_index_nospec(i, KVM_PMC_MAX_FIXED);
477                 struct kvm_pmc *pmc = &pmu->fixed_counters[index];
478                 u32 event = fixed_pmc_events[index];
479
480                 pmc->eventsel = (intel_arch_events[event].unit_mask << 8) |
481                                  intel_arch_events[event].eventsel;
482         }
483 }
484
485 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
486 {
487         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
488         struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
489         struct kvm_cpuid_entry2 *entry;
490         union cpuid10_eax eax;
491         union cpuid10_edx edx;
492         u64 perf_capabilities;
493         u64 counter_mask;
494         int i;
495
496         pmu->nr_arch_gp_counters = 0;
497         pmu->nr_arch_fixed_counters = 0;
498         pmu->counter_bitmask[KVM_PMC_GP] = 0;
499         pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
500         pmu->version = 0;
501         pmu->reserved_bits = 0xffffffff00200000ull;
502         pmu->raw_event_mask = X86_RAW_EVENT_MASK;
503         pmu->global_ctrl_mask = ~0ull;
504         pmu->global_status_mask = ~0ull;
505         pmu->fixed_ctr_ctrl_mask = ~0ull;
506         pmu->pebs_enable_mask = ~0ull;
507         pmu->pebs_data_cfg_mask = ~0ull;
508
509         memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
510
511         /*
512          * Setting passthrough of LBR MSRs is done only in the VM-Entry loop,
513          * and PMU refresh is disallowed after the vCPU has run, i.e. this code
514          * should never be reached while KVM is passing through MSRs.
515          */
516         if (KVM_BUG_ON(lbr_desc->msr_passthrough, vcpu->kvm))
517                 return;
518
519         entry = kvm_find_cpuid_entry(vcpu, 0xa);
520         if (!entry || !vcpu->kvm->arch.enable_pmu)
521                 return;
522         eax.full = entry->eax;
523         edx.full = entry->edx;
524
525         pmu->version = eax.split.version_id;
526         if (!pmu->version)
527                 return;
528
529         pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
530                                          kvm_pmu_cap.num_counters_gp);
531         eax.split.bit_width = min_t(int, eax.split.bit_width,
532                                     kvm_pmu_cap.bit_width_gp);
533         pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
534         eax.split.mask_length = min_t(int, eax.split.mask_length,
535                                       kvm_pmu_cap.events_mask_len);
536         pmu->available_event_types = ~entry->ebx &
537                                         ((1ull << eax.split.mask_length) - 1);
538
539         if (pmu->version == 1) {
540                 pmu->nr_arch_fixed_counters = 0;
541         } else {
542                 pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed,
543                                                     kvm_pmu_cap.num_counters_fixed);
544                 edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed,
545                                                   kvm_pmu_cap.bit_width_fixed);
546                 pmu->counter_bitmask[KVM_PMC_FIXED] =
547                         ((u64)1 << edx.split.bit_width_fixed) - 1;
548                 setup_fixed_pmc_eventsel(pmu);
549         }
550
551         for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
552                 pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
553         counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
554                 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED));
555         pmu->global_ctrl_mask = counter_mask;
556
557         /*
558          * GLOBAL_STATUS and GLOBAL_OVF_CONTROL (a.k.a. GLOBAL_STATUS_RESET)
559          * share reserved bit definitions.  The kernel just happens to use
560          * OVF_CTRL for the names.
561          */
562         pmu->global_status_mask = pmu->global_ctrl_mask
563                         & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
564                             MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
565         if (vmx_pt_mode_is_host_guest())
566                 pmu->global_status_mask &=
567                                 ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
568
569         entry = kvm_find_cpuid_entry_index(vcpu, 7, 0);
570         if (entry &&
571             (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
572             (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
573                 pmu->reserved_bits ^= HSW_IN_TX;
574                 pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
575         }
576
577         bitmap_set(pmu->all_valid_pmc_idx,
578                 0, pmu->nr_arch_gp_counters);
579         bitmap_set(pmu->all_valid_pmc_idx,
580                 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
581
582         perf_capabilities = vcpu_get_perf_capabilities(vcpu);
583         if (cpuid_model_is_consistent(vcpu) &&
584             (perf_capabilities & PMU_CAP_LBR_FMT))
585                 x86_perf_get_lbr(&lbr_desc->records);
586         else
587                 lbr_desc->records.nr = 0;
588
589         if (lbr_desc->records.nr)
590                 bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
591
592         if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
593                 if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
594                         pmu->pebs_enable_mask = counter_mask;
595                         pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
596                         for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
597                                 pmu->fixed_ctr_ctrl_mask &=
598                                         ~(1ULL << (INTEL_PMC_IDX_FIXED + i * 4));
599                         }
600                         pmu->pebs_data_cfg_mask = ~0xff00000full;
601                 } else {
602                         pmu->pebs_enable_mask =
603                                 ~((1ull << pmu->nr_arch_gp_counters) - 1);
604                 }
605         }
606 }
607
608 static void intel_pmu_init(struct kvm_vcpu *vcpu)
609 {
610         int i;
611         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
612         struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
613
614         for (i = 0; i < KVM_INTEL_PMC_MAX_GENERIC; i++) {
615                 pmu->gp_counters[i].type = KVM_PMC_GP;
616                 pmu->gp_counters[i].vcpu = vcpu;
617                 pmu->gp_counters[i].idx = i;
618                 pmu->gp_counters[i].current_config = 0;
619         }
620
621         for (i = 0; i < KVM_PMC_MAX_FIXED; i++) {
622                 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
623                 pmu->fixed_counters[i].vcpu = vcpu;
624                 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
625                 pmu->fixed_counters[i].current_config = 0;
626         }
627
628         lbr_desc->records.nr = 0;
629         lbr_desc->event = NULL;
630         lbr_desc->msr_passthrough = false;
631 }
632
633 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
634 {
635         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
636         struct kvm_pmc *pmc = NULL;
637         int i;
638
639         for (i = 0; i < KVM_INTEL_PMC_MAX_GENERIC; i++) {
640                 pmc = &pmu->gp_counters[i];
641
642                 pmc_stop_counter(pmc);
643                 pmc->counter = pmc->prev_counter = pmc->eventsel = 0;
644         }
645
646         for (i = 0; i < KVM_PMC_MAX_FIXED; i++) {
647                 pmc = &pmu->fixed_counters[i];
648
649                 pmc_stop_counter(pmc);
650                 pmc->counter = pmc->prev_counter = 0;
651         }
652
653         pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
654
655         intel_pmu_release_guest_lbr_event(vcpu);
656 }
657
658 /*
659  * Emulate LBR_On_PMI behavior for 1 < pmu.version < 4.
660  *
661  * If Freeze_LBR_On_PMI = 1, the LBR is frozen on PMI and
662  * the KVM emulates to clear the LBR bit (bit 0) in IA32_DEBUGCTL.
663  *
664  * Guest needs to re-enable LBR to resume branches recording.
665  */
666 static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu)
667 {
668         u64 data = vmcs_read64(GUEST_IA32_DEBUGCTL);
669
670         if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) {
671                 data &= ~DEBUGCTLMSR_LBR;
672                 vmcs_write64(GUEST_IA32_DEBUGCTL, data);
673         }
674 }
675
676 static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
677 {
678         u8 version = vcpu_to_pmu(vcpu)->version;
679
680         if (!intel_pmu_lbr_is_enabled(vcpu))
681                 return;
682
683         if (version > 1 && version < 4)
684                 intel_pmu_legacy_freezing_lbrs_on_pmi(vcpu);
685 }
686
687 static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
688 {
689         struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
690         int i;
691
692         for (i = 0; i < lbr->nr; i++) {
693                 vmx_set_intercept_for_msr(vcpu, lbr->from + i, MSR_TYPE_RW, set);
694                 vmx_set_intercept_for_msr(vcpu, lbr->to + i, MSR_TYPE_RW, set);
695                 if (lbr->info)
696                         vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set);
697         }
698
699         vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set);
700         vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set);
701 }
702
703 static inline void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
704 {
705         struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
706
707         if (!lbr_desc->msr_passthrough)
708                 return;
709
710         vmx_update_intercept_for_lbr_msrs(vcpu, true);
711         lbr_desc->msr_passthrough = false;
712 }
713
714 static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
715 {
716         struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
717
718         if (lbr_desc->msr_passthrough)
719                 return;
720
721         vmx_update_intercept_for_lbr_msrs(vcpu, false);
722         lbr_desc->msr_passthrough = true;
723 }
724
725 /*
726  * Higher priority host perf events (e.g. cpu pinned) could reclaim the
727  * pmu resources (e.g. LBR) that were assigned to the guest. This is
728  * usually done via ipi calls (more details in perf_install_in_context).
729  *
730  * Before entering the non-root mode (with irq disabled here), double
731  * confirm that the pmu features enabled to the guest are not reclaimed
732  * by higher priority host events. Otherwise, disallow vcpu's access to
733  * the reclaimed features.
734  */
735 void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
736 {
737         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
738         struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
739
740         if (!lbr_desc->event) {
741                 vmx_disable_lbr_msrs_passthrough(vcpu);
742                 if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
743                         goto warn;
744                 if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
745                         goto warn;
746                 return;
747         }
748
749         if (lbr_desc->event->state < PERF_EVENT_STATE_ACTIVE) {
750                 vmx_disable_lbr_msrs_passthrough(vcpu);
751                 __clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
752                 goto warn;
753         } else
754                 vmx_enable_lbr_msrs_passthrough(vcpu);
755
756         return;
757
758 warn:
759         pr_warn_ratelimited("vcpu-%d: fail to passthrough LBR.\n", vcpu->vcpu_id);
760 }
761
762 static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
763 {
764         if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
765                 intel_pmu_release_guest_lbr_event(vcpu);
766 }
767
768 void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
769 {
770         struct kvm_pmc *pmc = NULL;
771         int bit, hw_idx;
772
773         for_each_set_bit(bit, (unsigned long *)&pmu->global_ctrl,
774                          X86_PMC_IDX_MAX) {
775                 pmc = intel_pmc_idx_to_pmc(pmu, bit);
776
777                 if (!pmc || !pmc_speculative_in_use(pmc) ||
778                     !pmc_is_globally_enabled(pmc) || !pmc->perf_event)
779                         continue;
780
781                 /*
782                  * A negative index indicates the event isn't mapped to a
783                  * physical counter in the host, e.g. due to contention.
784                  */
785                 hw_idx = pmc->perf_event->hw.idx;
786                 if (hw_idx != pmc->idx && hw_idx > -1)
787                         pmu->host_cross_mapped_mask |= BIT_ULL(hw_idx);
788         }
789 }
790
791 struct kvm_pmu_ops intel_pmu_ops __initdata = {
792         .hw_event_available = intel_hw_event_available,
793         .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
794         .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
795         .msr_idx_to_pmc = intel_msr_idx_to_pmc,
796         .is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx,
797         .is_valid_msr = intel_is_valid_msr,
798         .get_msr = intel_pmu_get_msr,
799         .set_msr = intel_pmu_set_msr,
800         .refresh = intel_pmu_refresh,
801         .init = intel_pmu_init,
802         .reset = intel_pmu_reset,
803         .deliver_pmi = intel_pmu_deliver_pmi,
804         .cleanup = intel_pmu_cleanup,
805         .EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT,
806         .MAX_NR_GP_COUNTERS = KVM_INTEL_PMC_MAX_GENERIC,
807         .MIN_NR_GP_COUNTERS = 1,
808 };