perf/x86/intel: Move the topdown stuff into the intel driver
authorPeter Zijlstra <peterz@infradead.org>
Tue, 10 May 2022 19:28:06 +0000 (21:28 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 7 Sep 2022 19:54:02 +0000 (21:54 +0200)
Use the new x86_pmu::{set_period,update}() methods to push the topdown
stuff into the Intel driver, where it belongs.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20220829101321.505933457@infradead.org
arch/x86/events/core.c
arch/x86/events/intel/core.c

index bb559b79ff23ea4cdc59ded91fc4aca336146f15..b074e71bab21a6a356384099c17fd617ea762ea9 100644 (file)
@@ -119,9 +119,6 @@ u64 x86_perf_event_update(struct perf_event *event)
        if (unlikely(!hwc->event_base))
                return 0;
 
-       if (unlikely(is_topdown_count(event)) && x86_pmu.update_topdown_event)
-               return x86_pmu.update_topdown_event(event);
-
        /*
         * Careful: an NMI might modify the previous event value.
         *
@@ -1373,10 +1370,6 @@ int x86_perf_event_set_period(struct perf_event *event)
        if (unlikely(!hwc->event_base))
                return 0;
 
-       if (unlikely(is_topdown_count(event)) &&
-           x86_pmu.set_topdown_event_period)
-               return x86_pmu.set_topdown_event_period(event);
-
        /*
         * If we are way outside a reasonable range then just skip forward:
         */
index ba101c28dcc97898b225cc91e4c18e2abbeaaf3b..feed732fdf57681c10bda16f806f95b0e0f27b88 100644 (file)
@@ -2302,7 +2302,7 @@ static void intel_pmu_nhm_workaround(void)
        for (i = 0; i < 4; i++) {
                event = cpuc->events[i];
                if (event)
-                       x86_perf_event_update(event);
+                       static_call(x86_pmu_update)(event);
        }
 
        for (i = 0; i < 4; i++) {
@@ -2317,7 +2317,7 @@ static void intel_pmu_nhm_workaround(void)
                event = cpuc->events[i];
 
                if (event) {
-                       x86_perf_event_set_period(event);
+                       static_call(x86_pmu_set_period)(event);
                        __x86_pmu_enable_event(&event->hw,
                                        ARCH_PERFMON_EVENTSEL_ENABLE);
                } else
@@ -2794,7 +2794,7 @@ static void intel_pmu_add_event(struct perf_event *event)
  */
 int intel_pmu_save_and_restart(struct perf_event *event)
 {
-       x86_perf_event_update(event);
+       static_call(x86_pmu_update)(event);
        /*
         * For a checkpointed counter always reset back to 0.  This
         * avoids a situation where the counter overflows, aborts the
@@ -2806,9 +2806,27 @@ int intel_pmu_save_and_restart(struct perf_event *event)
                wrmsrl(event->hw.event_base, 0);
                local64_set(&event->hw.prev_count, 0);
        }
+       return static_call(x86_pmu_set_period)(event);
+}
+
+static int intel_pmu_set_period(struct perf_event *event)
+{
+       if (unlikely(is_topdown_count(event)) &&
+           x86_pmu.set_topdown_event_period)
+               return x86_pmu.set_topdown_event_period(event);
+
        return x86_perf_event_set_period(event);
 }
 
+static u64 intel_pmu_update(struct perf_event *event)
+{
+       if (unlikely(is_topdown_count(event)) &&
+           x86_pmu.update_topdown_event)
+               return x86_pmu.update_topdown_event(event);
+
+       return x86_perf_event_update(event);
+}
+
 static void intel_pmu_reset(void)
 {
        struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
@@ -4786,6 +4804,8 @@ static __initconst const struct x86_pmu intel_pmu = {
        .add                    = intel_pmu_add_event,
        .del                    = intel_pmu_del_event,
        .read                   = intel_pmu_read_event,
+       .set_period             = intel_pmu_set_period,
+       .update                 = intel_pmu_update,
        .hw_config              = intel_pmu_hw_config,
        .schedule_events        = x86_schedule_events,
        .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,