1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM PMU support for Intel CPUs
5 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
8 * Avi Kivity <avi@redhat.com>
9 * Gleb Natapov <gleb@redhat.com>
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/types.h>
14 #include <linux/kvm_host.h>
15 #include <linux/perf_event.h>
16 #include <asm/perf_event.h>
23 #define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
25 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
28 u8 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
31 pmu->fixed_ctr_ctrl = data;
32 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
33 u8 new_ctrl = fixed_ctrl_field(data, i);
34 u8 old_ctrl = fixed_ctrl_field(old_fixed_ctr_ctrl, i);
36 if (old_ctrl == new_ctrl)
39 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
41 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
42 kvm_pmu_request_counter_reprogram(pmc);
46 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
48 if (pmc_idx < INTEL_PMC_IDX_FIXED) {
49 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
52 u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
54 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
58 static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
60 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
61 bool fixed = idx & (1u << 30);
65 return fixed ? idx < pmu->nr_arch_fixed_counters
66 : idx < pmu->nr_arch_gp_counters;
69 static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
70 unsigned int idx, u64 *mask)
72 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
73 bool fixed = idx & (1u << 30);
74 struct kvm_pmc *counters;
75 unsigned int num_counters;
79 counters = pmu->fixed_counters;
80 num_counters = pmu->nr_arch_fixed_counters;
82 counters = pmu->gp_counters;
83 num_counters = pmu->nr_arch_gp_counters;
85 if (idx >= num_counters)
87 *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
88 return &counters[array_index_nospec(idx, num_counters)];
91 static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
93 if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
96 return vcpu->arch.perf_capabilities;
99 static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
101 return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0;
104 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
106 if (!fw_writes_is_enabled(pmu_to_vcpu(pmu)))
109 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
112 static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
114 struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
117 if (!intel_pmu_lbr_is_enabled(vcpu))
120 ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) ||
121 (index >= records->from && index < records->from + records->nr) ||
122 (index >= records->to && index < records->to + records->nr);
124 if (!ret && records->info)
125 ret = (index >= records->info && index < records->info + records->nr);
130 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
132 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
133 u64 perf_capabilities;
137 case MSR_CORE_PERF_FIXED_CTR_CTRL:
138 return kvm_pmu_has_perf_global_ctrl(pmu);
139 case MSR_IA32_PEBS_ENABLE:
140 ret = vcpu_get_perf_capabilities(vcpu) & PERF_CAP_PEBS_FORMAT;
142 case MSR_IA32_DS_AREA:
143 ret = guest_cpuid_has(vcpu, X86_FEATURE_DS);
145 case MSR_PEBS_DATA_CFG:
146 perf_capabilities = vcpu_get_perf_capabilities(vcpu);
147 ret = (perf_capabilities & PERF_CAP_PEBS_BASELINE) &&
148 ((perf_capabilities & PERF_CAP_PEBS_FORMAT) > 3);
151 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
152 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
153 get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) ||
154 intel_pmu_is_valid_lbr_msr(vcpu, msr);
161 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
163 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
166 pmc = get_fixed_pmc(pmu, msr);
167 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
168 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
173 static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu)
175 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
177 if (lbr_desc->event) {
178 perf_event_release_kernel(lbr_desc->event);
179 lbr_desc->event = NULL;
180 vcpu_to_pmu(vcpu)->event_count--;
184 int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
186 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
187 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
188 struct perf_event *event;
191 * The perf_event_attr is constructed in the minimum efficient way:
192 * - set 'pinned = true' to make it task pinned so that if another
193 * cpu pinned event reclaims LBR, the event->oncpu will be set to -1;
194 * - set '.exclude_host = true' to record guest branches behavior;
196 * - set '.config = INTEL_FIXED_VLBR_EVENT' to indicates host perf
197 * schedule the event without a real HW counter but a fake one;
198 * check is_guest_lbr_event() and __intel_get_event_constraints();
200 * - set 'sample_type = PERF_SAMPLE_BRANCH_STACK' and
201 * 'branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
202 * PERF_SAMPLE_BRANCH_USER' to configure it as a LBR callstack
203 * event, which helps KVM to save/restore guest LBR records
204 * during host context switches and reduces quite a lot overhead,
205 * check branch_user_callstack() and intel_pmu_lbr_sched_task();
207 struct perf_event_attr attr = {
208 .type = PERF_TYPE_RAW,
209 .size = sizeof(attr),
210 .config = INTEL_FIXED_VLBR_EVENT,
211 .sample_type = PERF_SAMPLE_BRANCH_STACK,
213 .exclude_host = true,
214 .branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
215 PERF_SAMPLE_BRANCH_USER,
218 if (unlikely(lbr_desc->event)) {
219 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
223 event = perf_event_create_kernel_counter(&attr, -1,
224 current, NULL, NULL);
226 pr_debug_ratelimited("%s: failed %ld\n",
227 __func__, PTR_ERR(event));
228 return PTR_ERR(event);
230 lbr_desc->event = event;
232 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
237 * It's safe to access LBR msrs from guest when they have not
238 * been passthrough since the host would help restore or reset
239 * the LBR msrs records when the guest LBR event is scheduled in.
241 static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu,
242 struct msr_data *msr_info, bool read)
244 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
245 u32 index = msr_info->index;
247 if (!intel_pmu_is_valid_lbr_msr(vcpu, index))
250 if (!lbr_desc->event && intel_pmu_create_guest_lbr_event(vcpu) < 0)
254 * Disable irq to ensure the LBR feature doesn't get reclaimed by the
255 * host at the time the value is read from the msr, and this avoids the
256 * host LBR value to be leaked to the guest. If LBR has been reclaimed,
257 * return 0 on guest reads.
260 if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) {
262 rdmsrl(index, msr_info->data);
264 wrmsrl(index, msr_info->data);
265 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
269 clear_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
278 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
280 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
282 u32 msr = msr_info->index;
285 case MSR_CORE_PERF_FIXED_CTR_CTRL:
286 msr_info->data = pmu->fixed_ctr_ctrl;
288 case MSR_IA32_PEBS_ENABLE:
289 msr_info->data = pmu->pebs_enable;
291 case MSR_IA32_DS_AREA:
292 msr_info->data = pmu->ds_area;
294 case MSR_PEBS_DATA_CFG:
295 msr_info->data = pmu->pebs_data_cfg;
298 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
299 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
300 u64 val = pmc_read_counter(pmc);
302 val & pmu->counter_bitmask[KVM_PMC_GP];
304 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
305 u64 val = pmc_read_counter(pmc);
307 val & pmu->counter_bitmask[KVM_PMC_FIXED];
309 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
310 msr_info->data = pmc->eventsel;
312 } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true)) {
321 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
323 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
325 u32 msr = msr_info->index;
326 u64 data = msr_info->data;
327 u64 reserved_bits, diff;
330 case MSR_CORE_PERF_FIXED_CTR_CTRL:
331 if (data & pmu->fixed_ctr_ctrl_mask)
334 if (pmu->fixed_ctr_ctrl != data)
335 reprogram_fixed_counters(pmu, data);
337 case MSR_IA32_PEBS_ENABLE:
338 if (data & pmu->pebs_enable_mask)
341 if (pmu->pebs_enable != data) {
342 diff = pmu->pebs_enable ^ data;
343 pmu->pebs_enable = data;
344 reprogram_counters(pmu, diff);
347 case MSR_IA32_DS_AREA:
348 if (is_noncanonical_address(data, vcpu))
353 case MSR_PEBS_DATA_CFG:
354 if (data & pmu->pebs_data_cfg_mask)
357 pmu->pebs_data_cfg = data;
360 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
361 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
362 if ((msr & MSR_PMC_FULL_WIDTH_BIT) &&
363 (data & ~pmu->counter_bitmask[KVM_PMC_GP]))
366 if (!msr_info->host_initiated &&
367 !(msr & MSR_PMC_FULL_WIDTH_BIT))
368 data = (s64)(s32)data;
369 pmc_write_counter(pmc, data);
371 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
372 pmc_write_counter(pmc, data);
374 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
375 reserved_bits = pmu->reserved_bits;
376 if ((pmc->idx == 2) &&
377 (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
378 reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
379 if (data & reserved_bits)
382 if (data != pmc->eventsel) {
383 pmc->eventsel = data;
384 kvm_pmu_request_counter_reprogram(pmc);
387 } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) {
390 /* Not a known PMU MSR. */
398 * Map fixed counter events to architectural general purpose event encodings.
399 * Perf doesn't provide APIs to allow KVM to directly program a fixed counter,
400 * and so KVM instead programs the architectural event to effectively request
401 * the fixed counter. Perf isn't guaranteed to use a fixed counter and may
402 * instead program the encoding into a general purpose counter, e.g. if a
403 * different perf_event is already utilizing the requested counter, but the end
404 * result is the same (ignoring the fact that using a general purpose counter
405 * will likely exacerbate counter contention).
407 * Note, reference cycles is counted using a perf-defined "psuedo-encoding",
408 * as there is no architectural general purpose encoding for reference cycles.
410 static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)
415 } fixed_pmc_events[] = {
416 [0] = { 0xc0, 0x00 }, /* Instruction Retired / PERF_COUNT_HW_INSTRUCTIONS. */
417 [1] = { 0x3c, 0x00 }, /* CPU Cycles/ PERF_COUNT_HW_CPU_CYCLES. */
418 [2] = { 0x00, 0x03 }, /* Reference Cycles / PERF_COUNT_HW_REF_CPU_CYCLES*/
422 BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_events) != KVM_PMC_MAX_FIXED);
424 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
425 int index = array_index_nospec(i, KVM_PMC_MAX_FIXED);
426 struct kvm_pmc *pmc = &pmu->fixed_counters[index];
428 pmc->eventsel = (fixed_pmc_events[index].unit_mask << 8) |
429 fixed_pmc_events[index].eventsel;
433 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
435 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
436 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
437 struct kvm_cpuid_entry2 *entry;
438 union cpuid10_eax eax;
439 union cpuid10_edx edx;
440 u64 perf_capabilities;
444 pmu->nr_arch_gp_counters = 0;
445 pmu->nr_arch_fixed_counters = 0;
446 pmu->counter_bitmask[KVM_PMC_GP] = 0;
447 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
449 pmu->reserved_bits = 0xffffffff00200000ull;
450 pmu->raw_event_mask = X86_RAW_EVENT_MASK;
451 pmu->global_ctrl_mask = ~0ull;
452 pmu->global_status_mask = ~0ull;
453 pmu->fixed_ctr_ctrl_mask = ~0ull;
454 pmu->pebs_enable_mask = ~0ull;
455 pmu->pebs_data_cfg_mask = ~0ull;
457 memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
460 * Setting passthrough of LBR MSRs is done only in the VM-Entry loop,
461 * and PMU refresh is disallowed after the vCPU has run, i.e. this code
462 * should never be reached while KVM is passing through MSRs.
464 if (KVM_BUG_ON(lbr_desc->msr_passthrough, vcpu->kvm))
467 entry = kvm_find_cpuid_entry(vcpu, 0xa);
468 if (!entry || !vcpu->kvm->arch.enable_pmu)
470 eax.full = entry->eax;
471 edx.full = entry->edx;
473 pmu->version = eax.split.version_id;
477 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
478 kvm_pmu_cap.num_counters_gp);
479 eax.split.bit_width = min_t(int, eax.split.bit_width,
480 kvm_pmu_cap.bit_width_gp);
481 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
482 eax.split.mask_length = min_t(int, eax.split.mask_length,
483 kvm_pmu_cap.events_mask_len);
484 pmu->available_event_types = ~entry->ebx &
485 ((1ull << eax.split.mask_length) - 1);
487 if (pmu->version == 1) {
488 pmu->nr_arch_fixed_counters = 0;
490 pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed,
491 kvm_pmu_cap.num_counters_fixed);
492 edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed,
493 kvm_pmu_cap.bit_width_fixed);
494 pmu->counter_bitmask[KVM_PMC_FIXED] =
495 ((u64)1 << edx.split.bit_width_fixed) - 1;
496 setup_fixed_pmc_eventsel(pmu);
499 for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
500 pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
501 counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
502 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED));
503 pmu->global_ctrl_mask = counter_mask;
506 * GLOBAL_STATUS and GLOBAL_OVF_CONTROL (a.k.a. GLOBAL_STATUS_RESET)
507 * share reserved bit definitions. The kernel just happens to use
508 * OVF_CTRL for the names.
510 pmu->global_status_mask = pmu->global_ctrl_mask
511 & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
512 MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
513 if (vmx_pt_mode_is_host_guest())
514 pmu->global_status_mask &=
515 ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
517 entry = kvm_find_cpuid_entry_index(vcpu, 7, 0);
519 (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
520 (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
521 pmu->reserved_bits ^= HSW_IN_TX;
522 pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
525 bitmap_set(pmu->all_valid_pmc_idx,
526 0, pmu->nr_arch_gp_counters);
527 bitmap_set(pmu->all_valid_pmc_idx,
528 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
530 perf_capabilities = vcpu_get_perf_capabilities(vcpu);
531 if (cpuid_model_is_consistent(vcpu) &&
532 (perf_capabilities & PMU_CAP_LBR_FMT))
533 x86_perf_get_lbr(&lbr_desc->records);
535 lbr_desc->records.nr = 0;
537 if (lbr_desc->records.nr)
538 bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
540 if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
541 if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
542 pmu->pebs_enable_mask = counter_mask;
543 pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
544 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
545 pmu->fixed_ctr_ctrl_mask &=
546 ~(1ULL << (INTEL_PMC_IDX_FIXED + i * 4));
548 pmu->pebs_data_cfg_mask = ~0xff00000full;
550 pmu->pebs_enable_mask =
551 ~((1ull << pmu->nr_arch_gp_counters) - 1);
556 static void intel_pmu_init(struct kvm_vcpu *vcpu)
559 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
560 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
562 for (i = 0; i < KVM_INTEL_PMC_MAX_GENERIC; i++) {
563 pmu->gp_counters[i].type = KVM_PMC_GP;
564 pmu->gp_counters[i].vcpu = vcpu;
565 pmu->gp_counters[i].idx = i;
566 pmu->gp_counters[i].current_config = 0;
569 for (i = 0; i < KVM_PMC_MAX_FIXED; i++) {
570 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
571 pmu->fixed_counters[i].vcpu = vcpu;
572 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
573 pmu->fixed_counters[i].current_config = 0;
576 lbr_desc->records.nr = 0;
577 lbr_desc->event = NULL;
578 lbr_desc->msr_passthrough = false;
581 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
583 intel_pmu_release_guest_lbr_event(vcpu);
587 * Emulate LBR_On_PMI behavior for 1 < pmu.version < 4.
589 * If Freeze_LBR_On_PMI = 1, the LBR is frozen on PMI and
590 * the KVM emulates to clear the LBR bit (bit 0) in IA32_DEBUGCTL.
592 * Guest needs to re-enable LBR to resume branches recording.
594 static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu)
596 u64 data = vmcs_read64(GUEST_IA32_DEBUGCTL);
598 if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) {
599 data &= ~DEBUGCTLMSR_LBR;
600 vmcs_write64(GUEST_IA32_DEBUGCTL, data);
604 static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
606 u8 version = vcpu_to_pmu(vcpu)->version;
608 if (!intel_pmu_lbr_is_enabled(vcpu))
611 if (version > 1 && version < 4)
612 intel_pmu_legacy_freezing_lbrs_on_pmi(vcpu);
615 static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
617 struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
620 for (i = 0; i < lbr->nr; i++) {
621 vmx_set_intercept_for_msr(vcpu, lbr->from + i, MSR_TYPE_RW, set);
622 vmx_set_intercept_for_msr(vcpu, lbr->to + i, MSR_TYPE_RW, set);
624 vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set);
627 vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set);
628 vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set);
631 static inline void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
633 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
635 if (!lbr_desc->msr_passthrough)
638 vmx_update_intercept_for_lbr_msrs(vcpu, true);
639 lbr_desc->msr_passthrough = false;
642 static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
644 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
646 if (lbr_desc->msr_passthrough)
649 vmx_update_intercept_for_lbr_msrs(vcpu, false);
650 lbr_desc->msr_passthrough = true;
654 * Higher priority host perf events (e.g. cpu pinned) could reclaim the
655 * pmu resources (e.g. LBR) that were assigned to the guest. This is
656 * usually done via ipi calls (more details in perf_install_in_context).
658 * Before entering the non-root mode (with irq disabled here), double
659 * confirm that the pmu features enabled to the guest are not reclaimed
660 * by higher priority host events. Otherwise, disallow vcpu's access to
661 * the reclaimed features.
663 void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
665 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
666 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
668 if (!lbr_desc->event) {
669 vmx_disable_lbr_msrs_passthrough(vcpu);
670 if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
672 if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
677 if (lbr_desc->event->state < PERF_EVENT_STATE_ACTIVE) {
678 vmx_disable_lbr_msrs_passthrough(vcpu);
679 __clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
682 vmx_enable_lbr_msrs_passthrough(vcpu);
687 pr_warn_ratelimited("vcpu-%d: fail to passthrough LBR.\n", vcpu->vcpu_id);
690 static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
692 if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
693 intel_pmu_release_guest_lbr_event(vcpu);
696 void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
698 struct kvm_pmc *pmc = NULL;
701 for_each_set_bit(bit, (unsigned long *)&pmu->global_ctrl,
703 pmc = intel_pmc_idx_to_pmc(pmu, bit);
705 if (!pmc || !pmc_speculative_in_use(pmc) ||
706 !pmc_is_globally_enabled(pmc) || !pmc->perf_event)
710 * A negative index indicates the event isn't mapped to a
711 * physical counter in the host, e.g. due to contention.
713 hw_idx = pmc->perf_event->hw.idx;
714 if (hw_idx != pmc->idx && hw_idx > -1)
715 pmu->host_cross_mapped_mask |= BIT_ULL(hw_idx);
719 struct kvm_pmu_ops intel_pmu_ops __initdata = {
720 .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
721 .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
722 .msr_idx_to_pmc = intel_msr_idx_to_pmc,
723 .is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx,
724 .is_valid_msr = intel_is_valid_msr,
725 .get_msr = intel_pmu_get_msr,
726 .set_msr = intel_pmu_set_msr,
727 .refresh = intel_pmu_refresh,
728 .init = intel_pmu_init,
729 .reset = intel_pmu_reset,
730 .deliver_pmi = intel_pmu_deliver_pmi,
731 .cleanup = intel_pmu_cleanup,
732 .EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT,
733 .MAX_NR_GP_COUNTERS = KVM_INTEL_PMC_MAX_GENERIC,
734 .MIN_NR_GP_COUNTERS = 1,