KVM: selftests: Rework macros in PMU counters test to prep for multi-insn loop
authorSean Christopherson <seanjc@google.com>
Fri, 28 Jun 2024 00:55:56 +0000 (17:55 -0700)
committerSean Christopherson <seanjc@google.com>
Fri, 28 Jun 2024 15:44:03 +0000 (08:44 -0700)
Tweak the macros in the PMU counters test to prepare for moving the
CLFLUSH+MFENCE instructions into the loop body, to fix an issue where
a single CLFUSH doesn't guarantee an LLC miss.

Link: https://lore.kernel.org/r/20240628005558.3835480-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
tools/testing/selftests/kvm/x86_64/pmu_counters_test.c

index 96446134c00b7d2e887931b6275ee161f6138179..bb40d7c0f83e8433647f4ef11505cef5a6c110f0 100644 (file)
@@ -7,15 +7,25 @@
 #include "pmu.h"
 #include "processor.h"
 
-/* Number of LOOP instructions for the guest measurement payload. */
-#define NUM_BRANCHES           10
+/* Number of iterations of the loop for the guest measurement payload. */
+#define NUM_LOOPS                      10
+
+/* Each iteration of the loop retires one branch instruction. */
+#define NUM_BRANCH_INSNS_RETIRED       (NUM_LOOPS)
+
+/* Number of instructions in each loop. */
+#define NUM_INSNS_PER_LOOP             1
+
 /*
  * Number of "extra" instructions that will be counted, i.e. the number of
- * instructions that are needed to set up the loop and then disabled the
+ * instructions that are needed to set up the loop and then disable the
  * counter.  1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE, 2 MOV, 2 XOR, 1 WRMSR.
  */
-#define NUM_EXTRA_INSNS                7
-#define NUM_INSNS_RETIRED      (NUM_BRANCHES + NUM_EXTRA_INSNS)
+#define NUM_EXTRA_INSNS                        7
+
+/* Total number of instructions retired within the measured section. */
+#define NUM_INSNS_RETIRED              (NUM_LOOPS * NUM_INSNS_PER_LOOP + NUM_EXTRA_INSNS)
+
 
 static uint8_t kvm_pmu_version;
 static bool kvm_has_perf_caps;
@@ -100,7 +110,7 @@ static void guest_assert_event_count(uint8_t idx,
                GUEST_ASSERT_EQ(count, NUM_INSNS_RETIRED);
                break;
        case INTEL_ARCH_BRANCHES_RETIRED_INDEX:
-               GUEST_ASSERT_EQ(count, NUM_BRANCHES);
+               GUEST_ASSERT_EQ(count, NUM_BRANCH_INSNS_RETIRED);
                break;
        case INTEL_ARCH_LLC_REFERENCES_INDEX:
        case INTEL_ARCH_LLC_MISSES_INDEX:
@@ -120,7 +130,7 @@ static void guest_assert_event_count(uint8_t idx,
        }
 
 sanity_checks:
-       __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
+       __asm__ __volatile__("loop ." : "+c"((int){NUM_LOOPS}));
        GUEST_ASSERT_EQ(_rdpmc(pmc), count);
 
        wrmsr(pmc_msr, 0xdead);
@@ -147,7 +157,7 @@ do {                                                                                \
        __asm__ __volatile__("wrmsr\n\t"                                        \
                             clflush "\n\t"                                     \
                             "mfence\n\t"                                       \
-                            "1: mov $" __stringify(NUM_BRANCHES) ", %%ecx\n\t" \
+                            "1: mov $" __stringify(NUM_LOOPS) ", %%ecx\n\t"    \
                             FEP "loop .\n\t"                                   \
                             FEP "mov %%edi, %%ecx\n\t"                         \
                             FEP "xor %%eax, %%eax\n\t"                         \
@@ -500,7 +510,7 @@ static void guest_test_fixed_counters(void)
                wrmsr(MSR_CORE_PERF_FIXED_CTR0 + i, 0);
                wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL));
                wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, FIXED_PMC_GLOBAL_CTRL_ENABLE(i));
-               __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
+               __asm__ __volatile__("loop ." : "+c"((int){NUM_LOOPS}));
                wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
                val = rdmsr(MSR_CORE_PERF_FIXED_CTR0 + i);