perf trace: Add missed freeing of ordered events and thread
authorIan Rogers <irogers@google.com>
Tue, 17 Jun 2025 22:33:55 +0000 (15:33 -0700)
committerNamhyung Kim <namhyung@kernel.org>
Tue, 24 Jun 2025 17:27:51 +0000 (10:27 -0700)
Caught by leak sanitizer running "perf trace BTF general tests". Make
the ordered_events initialization unconditional and early so that
trace__exit cleanup is simple - ordered_events__init doesn't allocate
and just sets up 4 values and inits 3 list heads.

Signed-off-by: Ian Rogers <irogers@google.com>
Link: https://lore.kernel.org/r/20250617223356.2752099-3-irogers@google.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
tools/perf/builtin-trace.c

index 61650be8fccdb864e833c42574b61c8bebf64692..c38225a89fc8f8a571bd66291b1a73d1acf18e77 100644 (file)
@@ -5361,6 +5361,7 @@ out:
 
 static void trace__exit(struct trace *trace)
 {
+       thread__zput(trace->current);
        strlist__delete(trace->ev_qualifier);
        zfree(&trace->ev_qualifier_ids.entries);
        if (trace->syscalls.table) {
@@ -5371,6 +5372,7 @@ static void trace__exit(struct trace *trace)
        zfree(&trace->perfconfig_events);
        evlist__delete(trace->evlist);
        trace->evlist = NULL;
+       ordered_events__free(&trace->oe.data);
 #ifdef HAVE_LIBBPF_SUPPORT
        btf__free(trace->btf);
        trace->btf = NULL;
@@ -5520,6 +5522,9 @@ int cmd_trace(int argc, const char **argv)
        sigchld_act.sa_sigaction = sighandler_chld;
        sigaction(SIGCHLD, &sigchld_act, NULL);
 
+       ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
+       ordered_events__set_copy_on_queue(&trace.oe.data, true);
+
        trace.evlist = evlist__new();
 
        if (trace.evlist == NULL) {
@@ -5678,11 +5683,6 @@ skip_augmentation:
                        trace__load_vmlinux_btf(&trace);
        }
 
-       if (trace.sort_events) {
-               ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
-               ordered_events__set_copy_on_queue(&trace.oe.data, true);
-       }
-
        /*
         * If we are augmenting syscalls, then combine what we put in the
         * __augmented_syscalls__ BPF map with what is in the