x86/msr: Convert the rdpmc() macro to an __always_inline function
authorXin Li (Intel) <xin@zytor.com>
Sun, 27 Apr 2025 09:20:17 +0000 (02:20 -0700)
committerIngo Molnar <mingo@kernel.org>
Fri, 2 May 2025 08:26:56 +0000 (10:26 +0200)
Functions offer type safety and better readability compared to macros.
Additionally, always inline functions can match the performance of
macros.  Converting the rdpmc() macro into an always inline function
is simple and straightforward, so just make the change.

Moreover, the read result is now the returned value, further enhancing
readability.

Signed-off-by: Xin Li (Intel) <xin@zytor.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Uros Bizjak <ubizjak@gmail.com>
Link: https://lore.kernel.org/r/20250427092027.1598740-6-xin@zytor.com
arch/x86/events/amd/uncore.c
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/ds.c
arch/x86/include/asm/msr.h
arch/x86/include/asm/paravirt.h
arch/x86/kernel/cpu/resctrl/pseudo_lock.c

index 42c833cf9d980dfbdc1f7ce99c9eb964af68bd32..13c4cea545c5886ac06526892f4367f8265553c2 100644 (file)
@@ -108,7 +108,7 @@ static void amd_uncore_read(struct perf_event *event)
        if (hwc->event_base_rdpmc < 0)
                rdmsrq(hwc->event_base, new);
        else
-               rdpmc(hwc->event_base_rdpmc, new);
+               new = rdpmc(hwc->event_base_rdpmc);
 
        local64_set(&hwc->prev_count, new);
        delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
index 600c0b61fec0031484037fd9ba6e79d5b7a2b2da..bc92eba7631f8127ac33b027ae9509dd2bf85f9c 100644 (file)
@@ -135,7 +135,7 @@ u64 x86_perf_event_update(struct perf_event *event)
         */
        prev_raw_count = local64_read(&hwc->prev_count);
        do {
-               rdpmc(hwc->event_base_rdpmc, new_raw_count);
+               new_raw_count = rdpmc(hwc->event_base_rdpmc);
        } while (!local64_try_cmpxchg(&hwc->prev_count,
                                      &prev_raw_count, new_raw_count));
 
index e8eec162da24152de4c86007681f0d6e8a9765d5..33f3fd2b8b88365ce091b85c6df8a50f85923135 100644 (file)
@@ -2725,12 +2725,12 @@ static u64 intel_update_topdown_event(struct perf_event *event, int metric_end,
 
        if (!val) {
                /* read Fixed counter 3 */
-               rdpmc((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
+               slots = rdpmc(3 | INTEL_PMC_FIXED_RDPMC_BASE);
                if (!slots)
                        return 0;
 
                /* read PERF_METRICS */
-               rdpmc(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
+               metrics = rdpmc(INTEL_PMC_FIXED_RDPMC_METRICS);
        } else {
                slots = val[0];
                metrics = val[1];
index 346db2f132d7e24815fcaa63f15c7cc2a1fe9edf..4f52719cd2c0b14a3ab5a5078f468c68f8bc75d0 100644 (file)
@@ -2277,7 +2277,7 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
        WARN_ON(this_cpu_read(cpu_hw_events.enabled));
 
        prev_raw_count = local64_read(&hwc->prev_count);
-       rdpmc(hwc->event_base_rdpmc, new_raw_count);
+       new_raw_count = rdpmc(hwc->event_base_rdpmc);
        local64_set(&hwc->prev_count, new_raw_count);
 
        /*
index 435a07bda3a1e6886faa82e5e2c12ccd170d840a..fbeb313ccad2bde42b557635503d2c12a98333d9 100644 (file)
@@ -217,7 +217,10 @@ static inline int rdmsrq_safe(u32 msr, u64 *p)
        return err;
 }
 
-#define rdpmc(counter, val) ((val) = native_read_pmc(counter))
+static __always_inline u64 rdpmc(int counter)
+{
+       return native_read_pmc(counter);
+}
 
 #endif /* !CONFIG_PARAVIRT_XXL */
 
index faa0713553b13d2dee508dd250ed9063be7086db..f272c4bd3d5b4503815f3235b7ce4fd399c27d30 100644 (file)
@@ -239,13 +239,11 @@ static inline int rdmsrq_safe(unsigned msr, u64 *p)
        return err;
 }
 
-static inline u64 paravirt_read_pmc(int counter)
+static __always_inline u64 rdpmc(int counter)
 {
        return PVOP_CALL1(u64, cpu.read_pmc, counter);
 }
 
-#define rdpmc(counter, val) ((val) = paravirt_read_pmc(counter))
-
 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
 {
        PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
index 15ff62d83bd8cd2b5b49e90914c4119f70096a2d..61d762555a7964806253ed9effab166a4470ce0c 100644 (file)
@@ -1019,8 +1019,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
         * used in L1 cache, second to capture accurate value that does not
         * include cache misses incurred because of instruction loads.
         */
-       rdpmc(hit_pmcnum, hits_before);
-       rdpmc(miss_pmcnum, miss_before);
+       hits_before = rdpmc(hit_pmcnum);
+       miss_before = rdpmc(miss_pmcnum);
        /*
         * From SDM: Performing back-to-back fast reads are not guaranteed
         * to be monotonic.
@@ -1028,8 +1028,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
         * before proceeding.
         */
        rmb();
-       rdpmc(hit_pmcnum, hits_before);
-       rdpmc(miss_pmcnum, miss_before);
+       hits_before = rdpmc(hit_pmcnum);
+       miss_before = rdpmc(miss_pmcnum);
        /*
         * Use LFENCE to ensure all previous instructions are retired
         * before proceeding.
@@ -1051,8 +1051,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
         * before proceeding.
         */
        rmb();
-       rdpmc(hit_pmcnum, hits_after);
-       rdpmc(miss_pmcnum, miss_after);
+       hits_after = rdpmc(hit_pmcnum);
+       miss_after = rdpmc(miss_pmcnum);
        /*
         * Use LFENCE to ensure all previous instructions are retired
         * before proceeding.