arm_pmu: Tidy up clear_event_idx call backs
authorSuzuki K Poulose <suzuki.poulose@arm.com>
Tue, 10 Jul 2018 08:58:01 +0000 (09:58 +0100)
committerWill Deacon <will.deacon@arm.com>
Tue, 10 Jul 2018 17:19:02 +0000 (18:19 +0100)
The armpmu uses get_event_idx callback to allocate an event
counter for a given event, which marks the selected counter
as "used". Now, when we delete the counter, the arm_pmu goes
ahead and clears the "used" bit and then invokes the "clear_event_idx"
call back, which kind of splits the job between the core code
and the backend. To keep things tidy, mandate the implementation
of clear_event_idx() and add it for exisiting backends.
This will be useful for adding the chained event support, where
we leave the event idx maintenance to the backend.

Also, when an event is removed from the PMU, reset the hw.idx
to indicate that a counter is not allocated for this event,
to help the backends do better checks. This will be also used
for the chain counter support.

Cc: Will Deacon <will.deacon@arm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Julien Thierry <julien.thierry@arm.com>
Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
arch/arm/kernel/perf_event_v6.c
arch/arm/kernel/perf_event_v7.c
arch/arm/kernel/perf_event_xscale.c
arch/arm64/kernel/perf_event.c
drivers/perf/arm_pmu.c

index 0729f9841ef4e5bab36e362a266c927546c8cd22..1ae99deeec5491ebddfd212acd8de384e8969d0f 100644 (file)
@@ -411,6 +411,12 @@ armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
        }
 }
 
+static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc,
+                                    struct perf_event *event)
+{
+       clear_bit(event->hw.idx, cpuc->used_mask);
+}
+
 static void armv6pmu_disable_event(struct perf_event *event)
 {
        unsigned long val, mask, evt, flags;
@@ -491,6 +497,7 @@ static void armv6pmu_init(struct arm_pmu *cpu_pmu)
        cpu_pmu->read_counter   = armv6pmu_read_counter;
        cpu_pmu->write_counter  = armv6pmu_write_counter;
        cpu_pmu->get_event_idx  = armv6pmu_get_event_idx;
+       cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
        cpu_pmu->start          = armv6pmu_start;
        cpu_pmu->stop           = armv6pmu_stop;
        cpu_pmu->map_event      = armv6_map_event;
@@ -541,6 +548,7 @@ static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
        cpu_pmu->read_counter   = armv6pmu_read_counter;
        cpu_pmu->write_counter  = armv6pmu_write_counter;
        cpu_pmu->get_event_idx  = armv6pmu_get_event_idx;
+       cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
        cpu_pmu->start          = armv6pmu_start;
        cpu_pmu->stop           = armv6pmu_stop;
        cpu_pmu->map_event      = armv6mpcore_map_event;
index 973043dd6187a730cd638f864a85ac24ae04243e..a4fb0f8b8f84a96544977bf0c31b49d6e93c714a 100644 (file)
@@ -1058,6 +1058,12 @@ static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
        return -EAGAIN;
 }
 
+static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc,
+                                    struct perf_event *event)
+{
+       clear_bit(event->hw.idx, cpuc->used_mask);
+}
+
 /*
  * Add an event filter to a given event. This will only work for PMUv2 PMUs.
  */
@@ -1167,6 +1173,7 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu)
        cpu_pmu->read_counter   = armv7pmu_read_counter;
        cpu_pmu->write_counter  = armv7pmu_write_counter;
        cpu_pmu->get_event_idx  = armv7pmu_get_event_idx;
+       cpu_pmu->clear_event_idx = armv7pmu_clear_event_idx;
        cpu_pmu->start          = armv7pmu_start;
        cpu_pmu->stop           = armv7pmu_stop;
        cpu_pmu->reset          = armv7pmu_reset;
@@ -1637,6 +1644,7 @@ static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
        bool venum_event = EVENT_VENUM(hwc->config_base);
        bool krait_event = EVENT_CPU(hwc->config_base);
 
+       armv7pmu_clear_event_idx(cpuc, event);
        if (venum_event || krait_event) {
                bit = krait_event_to_bit(event, region, group);
                clear_bit(bit, cpuc->used_mask);
@@ -1966,6 +1974,7 @@ static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
        bool venum_event = EVENT_VENUM(hwc->config_base);
        bool scorpion_event = EVENT_CPU(hwc->config_base);
 
+       armv7pmu_clear_event_idx(cpuc, event);
        if (venum_event || scorpion_event) {
                bit = scorpion_event_to_bit(event, region, group);
                clear_bit(bit, cpuc->used_mask);
index 942230fe042637046ea3dc8a29b1d2771326ce3a..f6cdcacfb96dbc1bef13ec5eab5583e1b7341594 100644 (file)
@@ -292,6 +292,12 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
        }
 }
 
+static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc,
+                                    struct perf_event *event)
+{
+       clear_bit(event->hw.idx, cpuc->used_mask);
+}
+
 static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
 {
        unsigned long flags, val;
@@ -370,6 +376,7 @@ static int xscale1pmu_init(struct arm_pmu *cpu_pmu)
        cpu_pmu->read_counter   = xscale1pmu_read_counter;
        cpu_pmu->write_counter  = xscale1pmu_write_counter;
        cpu_pmu->get_event_idx  = xscale1pmu_get_event_idx;
+       cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
        cpu_pmu->start          = xscale1pmu_start;
        cpu_pmu->stop           = xscale1pmu_stop;
        cpu_pmu->map_event      = xscale_map_event;
@@ -738,6 +745,7 @@ static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
        cpu_pmu->read_counter   = xscale2pmu_read_counter;
        cpu_pmu->write_counter  = xscale2pmu_write_counter;
        cpu_pmu->get_event_idx  = xscale2pmu_get_event_idx;
+       cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
        cpu_pmu->start          = xscale2pmu_start;
        cpu_pmu->stop           = xscale2pmu_stop;
        cpu_pmu->map_event      = xscale_map_event;
index 66a2ffdca6ddd3d3bd437eda97db5d2d28bfb142..ac66851d4b1300f5b4dcc22a71413d9b2b2dcf30 100644 (file)
@@ -778,6 +778,12 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
        return -EAGAIN;
 }
 
+static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
+                                 struct perf_event *event)
+{
+       clear_bit(event->hw.idx, cpuc->used_mask);
+}
+
 /*
  * Add an event filter to a given event. This will only work for PMUv2 PMUs.
  */
@@ -956,6 +962,7 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
        cpu_pmu->read_counter           = armv8pmu_read_counter,
        cpu_pmu->write_counter          = armv8pmu_write_counter,
        cpu_pmu->get_event_idx          = armv8pmu_get_event_idx,
+       cpu_pmu->clear_event_idx        = armv8pmu_clear_event_idx,
        cpu_pmu->start                  = armv8pmu_start,
        cpu_pmu->stop                   = armv8pmu_stop,
        cpu_pmu->reset                  = armv8pmu_reset,
index 8cad6b535a2c5bf1029bfb749fb2544d152a394e..a28881058f180f6497ce8845d285e3c9cc1c3bd3 100644 (file)
@@ -238,11 +238,10 @@ armpmu_del(struct perf_event *event, int flags)
 
        armpmu_stop(event, PERF_EF_UPDATE);
        hw_events->events[idx] = NULL;
-       clear_bit(idx, hw_events->used_mask);
-       if (armpmu->clear_event_idx)
-               armpmu->clear_event_idx(hw_events, event);
-
+       armpmu->clear_event_idx(hw_events, event);
        perf_event_update_userpage(event);
+       /* Clear the allocated counter */
+       hwc->idx = -1;
 }
 
 static int