1 // SPDX-License-Identifier: GPL-2.0
3 * Test for x86 KVM_SET_PMU_EVENT_FILTER.
5 * Copyright (C) 2022, Google LLC.
7 * This work is licensed under the terms of the GNU GPL, version 2.
9 * Verifies the expected behavior of allow lists and deny lists for
13 #define _GNU_SOURCE /* for program_invocation_short_name */
14 #include "test_util.h"
16 #include "processor.h"
19 * In lieu of copying perf_event.h into tools...
21 #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
22 #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
24 /* End of stuff taken from perf_event.h. */
26 /* Oddly, this isn't in perf_event.h. */
27 #define ARCH_PERFMON_BRANCHES_RETIRED 5
29 #define NUM_BRANCHES 42
32 * This is how the event selector and unit mask are stored in an AMD
33 * core performance event-select register. Intel's format is similar,
34 * but the event selector is only 8 bits.
36 #define EVENT(select, umask) ((select & 0xf00UL) << 24 | (select & 0xff) | \
40 * "Branch instructions retired", from the Intel SDM, volume 3,
41 * "Pre-defined Architectural Performance Events."
44 #define INTEL_BR_RETIRED EVENT(0xc4, 0)
47 * "Retired branch instructions", from Processor Programming Reference
48 * (PPR) for AMD Family 17h Model 01h, Revision B1 Processors,
49 * Preliminary Processor Programming Reference (PPR) for AMD Family
50 * 17h Model 31h, Revision B0 Processors, and Preliminary Processor
51 * Programming Reference (PPR) for AMD Family 19h Model 01h, Revision
52 * B1 Processors Volume 1 of 2.
55 #define AMD_ZEN_BR_RETIRED EVENT(0xc2, 0)
58 * This event list comprises Intel's eight architectural events plus
59 * AMD's "retired branch instructions" for Zen[123] (and possibly
62 static const uint64_t event_list[] = {
75 * If we encounter a #GP during the guest PMU sanity check, then the guest
76 * PMU is not functional. Inform the hypervisor via GUEST_SYNC(0).
78 static void guest_gp_handler(struct ex_regs *regs)
84 * Check that we can write a new value to the given MSR and read it back.
85 * The caller should provide a non-empty set of bits that are safe to flip.
87 * Return on success. GUEST_SYNC(0) on error.
89 static void check_msr(uint32_t msr, uint64_t bits_to_flip)
91 uint64_t v = rdmsr(msr) ^ bits_to_flip;
103 static uint64_t run_and_measure_loop(uint32_t msr_base)
105 uint64_t branches_retired = rdmsr(msr_base + 0);
107 __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
109 return rdmsr(msr_base + 0) - branches_retired;
112 static void intel_guest_code(void)
114 check_msr(MSR_CORE_PERF_GLOBAL_CTRL, 1);
115 check_msr(MSR_P6_EVNTSEL0, 0xffff);
116 check_msr(MSR_IA32_PMC0, 0xffff);
122 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
123 wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
124 ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED);
125 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x1);
127 count = run_and_measure_loop(MSR_IA32_PMC0);
133 * To avoid needing a check for CPUID.80000001:ECX.PerfCtrExtCore[bit 23],
134 * this code uses the always-available, legacy K7 PMU MSRs, which alias to
135 * the first four of the six extended core PMU MSRs.
137 static void amd_guest_code(void)
139 check_msr(MSR_K7_EVNTSEL0, 0xffff);
140 check_msr(MSR_K7_PERFCTR0, 0xffff);
146 wrmsr(MSR_K7_EVNTSEL0, 0);
147 wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
148 ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED);
150 count = run_and_measure_loop(MSR_K7_PERFCTR0);
156 * Run the VM to the next GUEST_SYNC(value), and return the value passed
157 * to the sync. Any other exit from the guest is fatal.
159 static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu)
164 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
165 get_ucall(vcpu, &uc);
166 TEST_ASSERT(uc.cmd == UCALL_SYNC,
167 "Received ucall other than UCALL_SYNC: %lu", uc.cmd);
172 * In a nested environment or if the vPMU is disabled, the guest PMU
173 * might not work as architected (accessing the PMU MSRs may raise
174 * #GP, or writes could simply be discarded). In those situations,
175 * there is no point in running these tests. The guest code will perform
176 * a sanity check and then GUEST_SYNC(success). In the case of failure,
177 * the behavior of the guest on resumption is undefined.
179 static bool sanity_check_pmu(struct kvm_vcpu *vcpu)
183 vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler);
184 r = run_vcpu_to_sync(vcpu);
185 vm_install_exception_handler(vcpu->vm, GP_VECTOR, NULL);
190 static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents)
192 struct kvm_pmu_event_filter *f;
193 int size = sizeof(*f) + nevents * sizeof(f->events[0]);
196 TEST_ASSERT(f, "Out of memory");
198 f->nevents = nevents;
203 static struct kvm_pmu_event_filter *
204 create_pmu_event_filter(const uint64_t event_list[], int nevents,
205 uint32_t action, uint32_t flags)
207 struct kvm_pmu_event_filter *f;
210 f = alloc_pmu_event_filter(nevents);
213 for (i = 0; i < nevents; i++)
214 f->events[i] = event_list[i];
219 static struct kvm_pmu_event_filter *event_filter(uint32_t action)
221 return create_pmu_event_filter(event_list,
222 ARRAY_SIZE(event_list),
227 * Remove the first occurrence of 'event' (if any) from the filter's
230 static struct kvm_pmu_event_filter *remove_event(struct kvm_pmu_event_filter *f,
236 for (i = 0; i < f->nevents; i++) {
238 f->events[i - 1] = f->events[i];
240 found = f->events[i] == event;
247 #define ASSERT_PMC_COUNTING_INSTRUCTIONS(count) \
249 if (count && count != NUM_BRANCHES) \
250 pr_info("%s: Branch instructions retired = %lu (expected %u)\n", \
251 __func__, count, NUM_BRANCHES); \
252 TEST_ASSERT(count, "%s: Branch instructions retired = %lu (expected > 0)", \
256 #define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(count) \
258 TEST_ASSERT(!count, "%s: Branch instructions retired = %lu (expected 0)", \
262 static void test_without_filter(struct kvm_vcpu *vcpu)
264 uint64_t count = run_vcpu_to_sync(vcpu);
266 ASSERT_PMC_COUNTING_INSTRUCTIONS(count);
269 static uint64_t test_with_filter(struct kvm_vcpu *vcpu,
270 struct kvm_pmu_event_filter *f)
272 vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
273 return run_vcpu_to_sync(vcpu);
276 static void test_amd_deny_list(struct kvm_vcpu *vcpu)
278 uint64_t event = EVENT(0x1C2, 0);
279 struct kvm_pmu_event_filter *f;
282 f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0);
283 count = test_with_filter(vcpu, f);
286 ASSERT_PMC_COUNTING_INSTRUCTIONS(count);
289 static void test_member_deny_list(struct kvm_vcpu *vcpu)
291 struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
292 uint64_t count = test_with_filter(vcpu, f);
296 ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(count);
299 static void test_member_allow_list(struct kvm_vcpu *vcpu)
301 struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
302 uint64_t count = test_with_filter(vcpu, f);
306 ASSERT_PMC_COUNTING_INSTRUCTIONS(count);
309 static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
311 struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
314 remove_event(f, INTEL_BR_RETIRED);
315 remove_event(f, AMD_ZEN_BR_RETIRED);
316 count = test_with_filter(vcpu, f);
319 ASSERT_PMC_COUNTING_INSTRUCTIONS(count);
322 static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
324 struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
327 remove_event(f, INTEL_BR_RETIRED);
328 remove_event(f, AMD_ZEN_BR_RETIRED);
329 count = test_with_filter(vcpu, f);
332 ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(count);
336 * Verify that setting KVM_PMU_CAP_DISABLE prevents the use of the PMU.
338 * Note that KVM_CAP_PMU_CAPABILITY must be invoked prior to creating VCPUs.
340 static void test_pmu_config_disable(void (*guest_code)(void))
342 struct kvm_vcpu *vcpu;
346 r = kvm_check_cap(KVM_CAP_PMU_CAPABILITY);
347 if (!(r & KVM_PMU_CAP_DISABLE))
352 vm_enable_cap(vm, KVM_CAP_PMU_CAPABILITY, KVM_PMU_CAP_DISABLE);
354 vcpu = vm_vcpu_add(vm, 0, guest_code);
355 vm_init_descriptor_tables(vm);
356 vcpu_init_descriptor_tables(vcpu);
358 TEST_ASSERT(!sanity_check_pmu(vcpu),
359 "Guest should not be able to use disabled PMU.");
365 * On Intel, check for a non-zero PMU version, at least one general-purpose
366 * counter per logical processor, and support for counting the number of branch
367 * instructions retired.
369 static bool use_intel_pmu(void)
371 return host_cpu_is_intel &&
372 kvm_cpu_property(X86_PROPERTY_PMU_VERSION) &&
373 kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS) &&
374 kvm_pmu_has(X86_PMU_FEATURE_BRANCH_INSNS_RETIRED);
377 static bool is_zen1(uint32_t family, uint32_t model)
379 return family == 0x17 && model <= 0x0f;
382 static bool is_zen2(uint32_t family, uint32_t model)
384 return family == 0x17 && model >= 0x30 && model <= 0x3f;
387 static bool is_zen3(uint32_t family, uint32_t model)
389 return family == 0x19 && model <= 0x0f;
393 * Determining AMD support for a PMU event requires consulting the AMD
394 * PPR for the CPU or reference material derived therefrom. The AMD
395 * test code herein has been verified to work on Zen1, Zen2, and Zen3.
397 * Feel free to add more AMD CPUs that are documented to support event
398 * select 0xc2 umask 0 as "retired branch instructions."
400 static bool use_amd_pmu(void)
402 uint32_t family = kvm_cpu_family();
403 uint32_t model = kvm_cpu_model();
405 return host_cpu_is_amd &&
406 (is_zen1(family, model) ||
407 is_zen2(family, model) ||
408 is_zen3(family, model));
412 * "MEM_INST_RETIRED.ALL_LOADS", "MEM_INST_RETIRED.ALL_STORES", and
413 * "MEM_INST_RETIRED.ANY" from https://perfmon-events.intel.com/
414 * supported on Intel Xeon processors:
415 * - Sapphire Rapids, Ice Lake, Cascade Lake, Skylake.
417 #define MEM_INST_RETIRED 0xD0
418 #define MEM_INST_RETIRED_LOAD EVENT(MEM_INST_RETIRED, 0x81)
419 #define MEM_INST_RETIRED_STORE EVENT(MEM_INST_RETIRED, 0x82)
420 #define MEM_INST_RETIRED_LOAD_STORE EVENT(MEM_INST_RETIRED, 0x83)
422 static bool supports_event_mem_inst_retired(void)
424 uint32_t eax, ebx, ecx, edx;
426 cpuid(1, &eax, &ebx, &ecx, &edx);
427 if (x86_family(eax) == 0x6) {
428 switch (x86_model(eax)) {
429 /* Sapphire Rapids */
444 * "LS Dispatch", from Processor Programming Reference
445 * (PPR) for AMD Family 17h Model 01h, Revision B1 Processors,
446 * Preliminary Processor Programming Reference (PPR) for AMD Family
447 * 17h Model 31h, Revision B0 Processors, and Preliminary Processor
448 * Programming Reference (PPR) for AMD Family 19h Model 01h, Revision
449 * B1 Processors Volume 1 of 2.
451 #define LS_DISPATCH 0x29
452 #define LS_DISPATCH_LOAD EVENT(LS_DISPATCH, BIT(0))
453 #define LS_DISPATCH_STORE EVENT(LS_DISPATCH, BIT(1))
454 #define LS_DISPATCH_LOAD_STORE EVENT(LS_DISPATCH, BIT(2))
456 #define INCLUDE_MASKED_ENTRY(event_select, mask, match) \
457 KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, false)
458 #define EXCLUDE_MASKED_ENTRY(event_select, mask, match) \
459 KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, true)
461 struct perf_counter {
467 uint64_t loads_stores:20;
472 static uint64_t masked_events_guest_test(uint32_t msr_base)
474 uint64_t ld0, ld1, st0, st1, ls0, ls1;
475 struct perf_counter c;
479 * The acutal value of the counters don't determine the outcome of
480 * the test. Only that they are zero or non-zero.
482 ld0 = rdmsr(msr_base + 0);
483 st0 = rdmsr(msr_base + 1);
484 ls0 = rdmsr(msr_base + 2);
486 __asm__ __volatile__("movl $0, %[v];"
489 : [v]"+m"(val) :: "eax");
491 ld1 = rdmsr(msr_base + 0);
492 st1 = rdmsr(msr_base + 1);
493 ls1 = rdmsr(msr_base + 2);
496 c.stores = st1 - st0;
497 c.loads_stores = ls1 - ls0;
502 static void intel_masked_events_guest_code(void)
507 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
509 wrmsr(MSR_P6_EVNTSEL0 + 0, ARCH_PERFMON_EVENTSEL_ENABLE |
510 ARCH_PERFMON_EVENTSEL_OS | MEM_INST_RETIRED_LOAD);
511 wrmsr(MSR_P6_EVNTSEL0 + 1, ARCH_PERFMON_EVENTSEL_ENABLE |
512 ARCH_PERFMON_EVENTSEL_OS | MEM_INST_RETIRED_STORE);
513 wrmsr(MSR_P6_EVNTSEL0 + 2, ARCH_PERFMON_EVENTSEL_ENABLE |
514 ARCH_PERFMON_EVENTSEL_OS | MEM_INST_RETIRED_LOAD_STORE);
516 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x7);
518 r = masked_events_guest_test(MSR_IA32_PMC0);
524 static void amd_masked_events_guest_code(void)
529 wrmsr(MSR_K7_EVNTSEL0, 0);
530 wrmsr(MSR_K7_EVNTSEL1, 0);
531 wrmsr(MSR_K7_EVNTSEL2, 0);
533 wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
534 ARCH_PERFMON_EVENTSEL_OS | LS_DISPATCH_LOAD);
535 wrmsr(MSR_K7_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE |
536 ARCH_PERFMON_EVENTSEL_OS | LS_DISPATCH_STORE);
537 wrmsr(MSR_K7_EVNTSEL2, ARCH_PERFMON_EVENTSEL_ENABLE |
538 ARCH_PERFMON_EVENTSEL_OS | LS_DISPATCH_LOAD_STORE);
540 r = masked_events_guest_test(MSR_K7_PERFCTR0);
546 static struct perf_counter run_masked_events_test(struct kvm_vcpu *vcpu,
547 const uint64_t masked_events[],
548 const int nmasked_events)
550 struct kvm_pmu_event_filter *f;
551 struct perf_counter r;
553 f = create_pmu_event_filter(masked_events, nmasked_events,
555 KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
556 r.raw = test_with_filter(vcpu, f);
562 /* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
563 #define MAX_FILTER_EVENTS 300
564 #define MAX_TEST_EVENTS 10
566 #define ALLOW_LOADS BIT(0)
567 #define ALLOW_STORES BIT(1)
568 #define ALLOW_LOADS_STORES BIT(2)
570 struct masked_events_test {
571 uint64_t intel_events[MAX_TEST_EVENTS];
572 uint64_t intel_event_end;
573 uint64_t amd_events[MAX_TEST_EVENTS];
574 uint64_t amd_event_end;
580 * These are the test cases for the masked events tests.
582 * For each test, the guest enables 3 PMU counters (loads, stores,
583 * loads + stores). The filter is then set in KVM with the masked events
584 * provided. The test then verifies that the counters agree with which
585 * ones should be counting and which ones should be filtered.
587 const struct masked_events_test test_cases[] = {
590 INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x81),
593 INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(0)),
595 .msg = "Only allow loads.",
596 .flags = ALLOW_LOADS,
599 INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x82),
602 INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(1)),
604 .msg = "Only allow stores.",
605 .flags = ALLOW_STORES,
608 INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x83),
611 INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(2)),
613 .msg = "Only allow loads + stores.",
614 .flags = ALLOW_LOADS_STORES,
617 INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0x7C, 0),
618 EXCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x83),
621 INCLUDE_MASKED_ENTRY(LS_DISPATCH, ~(BIT(0) | BIT(1)), 0),
623 .msg = "Only allow loads and stores.",
624 .flags = ALLOW_LOADS | ALLOW_STORES,
627 INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0x7C, 0),
628 EXCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x82),
631 INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xF8, 0),
632 EXCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(1)),
634 .msg = "Only allow loads and loads + stores.",
635 .flags = ALLOW_LOADS | ALLOW_LOADS_STORES
638 INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFE, 0x82),
641 INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xF8, 0),
642 EXCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(0)),
644 .msg = "Only allow stores and loads + stores.",
645 .flags = ALLOW_STORES | ALLOW_LOADS_STORES
648 INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0x7C, 0),
651 INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xF8, 0),
653 .msg = "Only allow loads, stores, and loads + stores.",
654 .flags = ALLOW_LOADS | ALLOW_STORES | ALLOW_LOADS_STORES
658 static int append_test_events(const struct masked_events_test *test,
659 uint64_t *events, int nevents)
661 const uint64_t *evts;
664 evts = use_intel_pmu() ? test->intel_events : test->amd_events;
665 for (i = 0; i < MAX_TEST_EVENTS; i++) {
669 events[nevents + i] = evts[i];
675 static bool bool_eq(bool a, bool b)
680 static void run_masked_events_tests(struct kvm_vcpu *vcpu, uint64_t *events,
683 int ntests = ARRAY_SIZE(test_cases);
684 struct perf_counter c;
687 for (i = 0; i < ntests; i++) {
688 const struct masked_events_test *test = &test_cases[i];
690 /* Do any test case events overflow MAX_TEST_EVENTS? */
691 assert(test->intel_event_end == 0);
692 assert(test->amd_event_end == 0);
694 n = append_test_events(test, events, nevents);
696 c = run_masked_events_test(vcpu, events, n);
697 TEST_ASSERT(bool_eq(c.loads, test->flags & ALLOW_LOADS) &&
698 bool_eq(c.stores, test->flags & ALLOW_STORES) &&
699 bool_eq(c.loads_stores,
700 test->flags & ALLOW_LOADS_STORES),
701 "%s loads: %u, stores: %u, loads + stores: %u",
702 test->msg, c.loads, c.stores, c.loads_stores);
706 static void add_dummy_events(uint64_t *events, int nevents)
710 for (i = 0; i < nevents; i++) {
711 int event_select = i % 0xFF;
712 bool exclude = ((i % 4) == 0);
714 if (event_select == MEM_INST_RETIRED ||
715 event_select == LS_DISPATCH)
718 events[i] = KVM_PMU_ENCODE_MASKED_ENTRY(event_select, 0,
723 static void test_masked_events(struct kvm_vcpu *vcpu)
725 int nevents = MAX_FILTER_EVENTS - MAX_TEST_EVENTS;
726 uint64_t events[MAX_FILTER_EVENTS];
728 /* Run the test cases against a sparse PMU event filter. */
729 run_masked_events_tests(vcpu, events, 0);
731 /* Run the test cases against a dense PMU event filter. */
732 add_dummy_events(events, MAX_FILTER_EVENTS);
733 run_masked_events_tests(vcpu, events, nevents);
736 static int run_filter_test(struct kvm_vcpu *vcpu, const uint64_t *events,
737 int nevents, uint32_t flags)
739 struct kvm_pmu_event_filter *f;
742 f = create_pmu_event_filter(events, nevents, KVM_PMU_EVENT_ALLOW, flags);
743 r = __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
749 static void test_filter_ioctl(struct kvm_vcpu *vcpu)
755 * Unfortunately having invalid bits set in event data is expected to
756 * pass when flags == 0 (bits other than eventsel+umask).
758 r = run_filter_test(vcpu, &e, 1, 0);
759 TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
761 r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
762 TEST_ASSERT(r != 0, "Invalid PMU Event Filter is expected to fail");
764 e = KVM_PMU_ENCODE_MASKED_ENTRY(0xff, 0xff, 0xff, 0xf);
765 r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
766 TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
769 int main(int argc, char *argv[])
771 void (*guest_code)(void);
772 struct kvm_vcpu *vcpu, *vcpu2 = NULL;
775 TEST_REQUIRE(kvm_has_cap(KVM_CAP_PMU_EVENT_FILTER));
776 TEST_REQUIRE(kvm_has_cap(KVM_CAP_PMU_EVENT_MASKED_EVENTS));
778 TEST_REQUIRE(use_intel_pmu() || use_amd_pmu());
779 guest_code = use_intel_pmu() ? intel_guest_code : amd_guest_code;
781 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
783 vm_init_descriptor_tables(vm);
784 vcpu_init_descriptor_tables(vcpu);
786 TEST_REQUIRE(sanity_check_pmu(vcpu));
789 test_amd_deny_list(vcpu);
791 test_without_filter(vcpu);
792 test_member_deny_list(vcpu);
793 test_member_allow_list(vcpu);
794 test_not_member_deny_list(vcpu);
795 test_not_member_allow_list(vcpu);
797 if (use_intel_pmu() &&
798 supports_event_mem_inst_retired() &&
799 kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS) >= 3)
800 vcpu2 = vm_vcpu_add(vm, 2, intel_masked_events_guest_code);
801 else if (use_amd_pmu())
802 vcpu2 = vm_vcpu_add(vm, 2, amd_masked_events_guest_code);
805 test_masked_events(vcpu2);
806 test_filter_ioctl(vcpu);
810 test_pmu_config_disable(guest_code);