x86/msr: Rename rdpmcl() to rdpmc()
authorXin Li (Intel) <xin@zytor.com>
Sun, 27 Apr 2025 09:20:16 +0000 (02:20 -0700)
committerIngo Molnar <mingo@kernel.org>
Fri, 2 May 2025 08:25:24 +0000 (10:25 +0200)
Now that rdpmc() is gone, rdpmcl() is the sole PMC read helper,
simply rename rdpmcl() to rdpmc().

Signed-off-by: Xin Li (Intel) <xin@zytor.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Uros Bizjak <ubizjak@gmail.com>
Link: https://lore.kernel.org/r/20250427092027.1598740-5-xin@zytor.com
arch/x86/events/amd/uncore.c
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/ds.c
arch/x86/include/asm/msr.h
arch/x86/include/asm/paravirt.h
arch/x86/kernel/cpu/resctrl/pseudo_lock.c

index 2a3259df619a57dc123357e8af47c10165654b44..42c833cf9d980dfbdc1f7ce99c9eb964af68bd32 100644 (file)
@@ -108,7 +108,7 @@ static void amd_uncore_read(struct perf_event *event)
        if (hwc->event_base_rdpmc < 0)
                rdmsrq(hwc->event_base, new);
        else
-               rdpmcl(hwc->event_base_rdpmc, new);
+               rdpmc(hwc->event_base_rdpmc, new);
 
        local64_set(&hwc->prev_count, new);
        delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
index 7cfd3764a582320fb319cddaef609283f6eef90a..600c0b61fec0031484037fd9ba6e79d5b7a2b2da 100644 (file)
@@ -135,7 +135,7 @@ u64 x86_perf_event_update(struct perf_event *event)
         */
        prev_raw_count = local64_read(&hwc->prev_count);
        do {
-               rdpmcl(hwc->event_base_rdpmc, new_raw_count);
+               rdpmc(hwc->event_base_rdpmc, new_raw_count);
        } while (!local64_try_cmpxchg(&hwc->prev_count,
                                      &prev_raw_count, new_raw_count));
 
index 3d80ffb2c3743176e92a84ac1fdd26fa2f1495bd..e8eec162da24152de4c86007681f0d6e8a9765d5 100644 (file)
@@ -2725,12 +2725,12 @@ static u64 intel_update_topdown_event(struct perf_event *event, int metric_end,
 
        if (!val) {
                /* read Fixed counter 3 */
-               rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
+               rdpmc((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
                if (!slots)
                        return 0;
 
                /* read PERF_METRICS */
-               rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
+               rdpmc(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
        } else {
                slots = val[0];
                metrics = val[1];
index 00a0a859f177747eb765c2d5fa5133945e743181..346db2f132d7e24815fcaa63f15c7cc2a1fe9edf 100644 (file)
@@ -2277,7 +2277,7 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
        WARN_ON(this_cpu_read(cpu_hw_events.enabled));
 
        prev_raw_count = local64_read(&hwc->prev_count);
-       rdpmcl(hwc->event_base_rdpmc, new_raw_count);
+       rdpmc(hwc->event_base_rdpmc, new_raw_count);
        local64_set(&hwc->prev_count, new_raw_count);
 
        /*
index 07d45fc374ef514425ce08ce9e52dfe9b9c7bac2..435a07bda3a1e6886faa82e5e2c12ccd170d840a 100644 (file)
@@ -217,7 +217,7 @@ static inline int rdmsrq_safe(u32 msr, u64 *p)
        return err;
 }
 
-#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
+#define rdpmc(counter, val) ((val) = native_read_pmc(counter))
 
 #endif /* !CONFIG_PARAVIRT_XXL */
 
index c4dedb9847357cb21a17113cf4cf34be2ab8281e..faa0713553b13d2dee508dd250ed9063be7086db 100644 (file)
@@ -244,7 +244,7 @@ static inline u64 paravirt_read_pmc(int counter)
        return PVOP_CALL1(u64, cpu.read_pmc, counter);
 }
 
-#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
+#define rdpmc(counter, val) ((val) = paravirt_read_pmc(counter))
 
 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
 {
index 26c354bdea07fe12b905c81117494143eb5983bd..15ff62d83bd8cd2b5b49e90914c4119f70096a2d 100644 (file)
@@ -1019,8 +1019,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
         * used in L1 cache, second to capture accurate value that does not
         * include cache misses incurred because of instruction loads.
         */
-       rdpmcl(hit_pmcnum, hits_before);
-       rdpmcl(miss_pmcnum, miss_before);
+       rdpmc(hit_pmcnum, hits_before);
+       rdpmc(miss_pmcnum, miss_before);
        /*
         * From SDM: Performing back-to-back fast reads are not guaranteed
         * to be monotonic.
@@ -1028,8 +1028,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
         * before proceeding.
         */
        rmb();
-       rdpmcl(hit_pmcnum, hits_before);
-       rdpmcl(miss_pmcnum, miss_before);
+       rdpmc(hit_pmcnum, hits_before);
+       rdpmc(miss_pmcnum, miss_before);
        /*
         * Use LFENCE to ensure all previous instructions are retired
         * before proceeding.
@@ -1051,8 +1051,8 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
         * before proceeding.
         */
        rmb();
-       rdpmcl(hit_pmcnum, hits_after);
-       rdpmcl(miss_pmcnum, miss_after);
+       rdpmc(hit_pmcnum, hits_after);
+       rdpmc(miss_pmcnum, miss_after);
        /*
         * Use LFENCE to ensure all previous instructions are retired
         * before proceeding.