perf/core: Add perf_sample_save_brstack() helper
authorNamhyung Kim <namhyung@kernel.org>
Wed, 18 Jan 2023 06:05:55 +0000 (22:05 -0800)
committerIngo Molnar <mingo@kernel.org>
Wed, 18 Jan 2023 10:57:20 +0000 (11:57 +0100)
When we saves the branch stack to the perf sample data, we needs to
update the sample flags and the dynamic size.  To make sure this is
done consistently, add the perf_sample_save_brstack() helper and
convert all call sites.

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Tested-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20230118060559.615653-5-namhyung@kernel.org
arch/powerpc/perf/core-book3s.c
arch/x86/events/amd/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/ds.c
include/linux/perf_event.h
kernel/events/core.c

index bf318dd9b709fc74f5bb073a2a5ef57ad9755768..8c1f7def596e4a581abcef43b383174918076f26 100644 (file)
@@ -2313,8 +2313,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
                        struct cpu_hw_events *cpuhw;
                        cpuhw = this_cpu_ptr(&cpu_hw_events);
                        power_pmu_bhrb_read(event, cpuhw);
-                       data.br_stack = &cpuhw->bhrb_stack;
-                       data.sample_flags |= PERF_SAMPLE_BRANCH_STACK;
+                       perf_sample_save_brstack(&data, event, &cpuhw->bhrb_stack);
                }
 
                if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC &&
index 4386b10682ce4fd2ded0045eeb352c42fd056242..8c45b198b62f555d1565c0d68b38304afe767f7c 100644 (file)
@@ -928,10 +928,8 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
                if (!x86_perf_event_set_period(event))
                        continue;
 
-               if (has_branch_stack(event)) {
-                       data.br_stack = &cpuc->lbr_stack;
-                       data.sample_flags |= PERF_SAMPLE_BRANCH_STACK;
-               }
+               if (has_branch_stack(event))
+                       perf_sample_save_brstack(&data, event, &cpuc->lbr_stack);
 
                if (perf_event_overflow(event, &data, regs))
                        x86_pmu_stop(event, 0);
index 29d2d0411caf63b21bc97141cde2256eef79acef..14f0a746257d129ff273433f4c9624cd7ee11076 100644 (file)
@@ -3036,10 +3036,8 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
 
                perf_sample_data_init(&data, 0, event->hw.last_period);
 
-               if (has_branch_stack(event)) {
-                       data.br_stack = &cpuc->lbr_stack;
-                       data.sample_flags |= PERF_SAMPLE_BRANCH_STACK;
-               }
+               if (has_branch_stack(event))
+                       perf_sample_save_brstack(&data, event, &cpuc->lbr_stack);
 
                if (perf_event_overflow(event, &data, regs))
                        x86_pmu_stop(event, 0);
index 158cf845fc80e3ae86849dee41f8c60de1bddb92..07c8a2cdc3eed08c7a7f9d3024af5d7f7cf9bfa2 100644 (file)
@@ -1720,10 +1720,8 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event,
                data->sample_flags |= PERF_SAMPLE_TIME;
        }
 
-       if (has_branch_stack(event)) {
-               data->br_stack = &cpuc->lbr_stack;
-               data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
-       }
+       if (has_branch_stack(event))
+               perf_sample_save_brstack(data, event, &cpuc->lbr_stack);
 }
 
 static void adaptive_pebs_save_regs(struct pt_regs *regs,
@@ -1883,8 +1881,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
 
                if (has_branch_stack(event)) {
                        intel_pmu_store_pebs_lbrs(lbr);
-                       data->br_stack = &cpuc->lbr_stack;
-                       data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
+                       perf_sample_save_brstack(data, event, &cpuc->lbr_stack);
                }
        }
 
index 569dfac5887f93fe9082521729be8e854d7c6912..7db0e9cc268260f925c3b11e8c25b263694b7642 100644 (file)
@@ -1102,6 +1102,31 @@ extern u64 perf_event_read_value(struct perf_event *event,
 
 extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
 
+static inline bool branch_sample_no_flags(const struct perf_event *event)
+{
+       return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_FLAGS;
+}
+
+static inline bool branch_sample_no_cycles(const struct perf_event *event)
+{
+       return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_CYCLES;
+}
+
+static inline bool branch_sample_type(const struct perf_event *event)
+{
+       return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_TYPE_SAVE;
+}
+
+static inline bool branch_sample_hw_index(const struct perf_event *event)
+{
+       return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
+}
+
+static inline bool branch_sample_priv(const struct perf_event *event)
+{
+       return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE;
+}
+
 
 struct perf_sample_data {
        /*
@@ -1210,6 +1235,21 @@ static inline void perf_sample_save_raw_data(struct perf_sample_data *data,
        data->sample_flags |= PERF_SAMPLE_RAW;
 }
 
+static inline void perf_sample_save_brstack(struct perf_sample_data *data,
+                                           struct perf_event *event,
+                                           struct perf_branch_stack *brs)
+{
+       int size = sizeof(u64); /* nr */
+
+       if (branch_sample_hw_index(event))
+               size += sizeof(u64);
+       size += brs->nr * sizeof(struct perf_branch_entry);
+
+       data->br_stack = brs;
+       data->dyn_size += size;
+       data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
+}
+
 /*
  * Clear all bitfields in the perf_branch_entry.
  * The to and from fields are not cleared because they are
@@ -1827,30 +1867,4 @@ static inline void perf_lopwr_cb(bool mode)
 }
 #endif
 
-#ifdef CONFIG_PERF_EVENTS
-static inline bool branch_sample_no_flags(const struct perf_event *event)
-{
-       return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_FLAGS;
-}
-
-static inline bool branch_sample_no_cycles(const struct perf_event *event)
-{
-       return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_CYCLES;
-}
-
-static inline bool branch_sample_type(const struct perf_event *event)
-{
-       return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_TYPE_SAVE;
-}
-
-static inline bool branch_sample_hw_index(const struct perf_event *event)
-{
-       return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
-}
-
-static inline bool branch_sample_priv(const struct perf_event *event)
-{
-       return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE;
-}
-#endif /* CONFIG_PERF_EVENTS */
 #endif /* _LINUX_PERF_EVENT_H */
index 17108a23b3dd41c54a2de6e75c674047355c8691..bd20705a23bda9718626cd6700a15f298a194822 100644 (file)
@@ -7310,7 +7310,7 @@ void perf_output_sample(struct perf_output_handle *handle,
        }
 
        if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
-               if (data->sample_flags & PERF_SAMPLE_BRANCH_STACK) {
+               if (data->br_stack) {
                        size_t size;
 
                        size = data->br_stack->nr
@@ -7587,16 +7587,10 @@ void perf_prepare_sample(struct perf_event_header *header,
                data->sample_flags |= PERF_SAMPLE_RAW;
        }
 
-       if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
-               int size = sizeof(u64); /* nr */
-               if (data->sample_flags & PERF_SAMPLE_BRANCH_STACK) {
-                       if (branch_sample_hw_index(event))
-                               size += sizeof(u64);
-
-                       size += data->br_stack->nr
-                             * sizeof(struct perf_branch_entry);
-               }
-               data->dyn_size += size;
+       if (filtered_sample_type & PERF_SAMPLE_BRANCH_STACK) {
+               data->br_stack = NULL;
+               data->dyn_size += sizeof(u64);
+               data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
        }
 
        if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))