ftrace: Do not disabled function graph based on "disabled" field
authorSteven Rostedt <rostedt@goodmis.org>
Mon, 5 May 2025 21:21:11 +0000 (17:21 -0400)
committerSteven Rostedt (Google) <rostedt@goodmis.org>
Fri, 9 May 2025 19:19:10 +0000 (15:19 -0400)
The per CPU "disabled" value was the original way to disable tracing when
the tracing subsystem was first created. Today, the ring buffer
infrastructure has its own way to disable tracing. In fact, things have
changed so much since 2008 that many things ignore the disable flag.

Do not bother disabling the function graph tracer if the per CPU disabled
field is set. Just record as normal. If tracing is disabled in the ring
buffer it will not be recorded.

Also, when tracing is enabled again, it will not drop the return call of
the function.

Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: https://lore.kernel.org/20250505212235.715752008@goodmis.org
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
kernel/trace/trace_functions_graph.c

index 0c357a89c58e01ea0b437e8cb71bfd63368ac065..9234e2c39abfc78059de2bd0836edd53f40e4aaf 100644 (file)
@@ -202,12 +202,9 @@ static int graph_entry(struct ftrace_graph_ent *trace,
 {
        unsigned long *task_var = fgraph_get_task_var(gops);
        struct trace_array *tr = gops->private;
-       struct trace_array_cpu *data;
        struct fgraph_times *ftimes;
        unsigned int trace_ctx;
-       long disabled;
        int ret = 0;
-       int cpu;
 
        if (*task_var & TRACE_GRAPH_NOTRACE)
                return 0;
@@ -257,21 +254,14 @@ static int graph_entry(struct ftrace_graph_ent *trace,
        if (tracing_thresh)
                return 1;
 
-       preempt_disable_notrace();
-       cpu = raw_smp_processor_id();
-       data = per_cpu_ptr(tr->array_buffer.data, cpu);
-       disabled = atomic_read(&data->disabled);
-       if (likely(!disabled)) {
-               trace_ctx = tracing_gen_ctx();
-               if (IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) &&
-                   tracer_flags_is_set(TRACE_GRAPH_PRINT_RETADDR)) {
-                       unsigned long retaddr = ftrace_graph_top_ret_addr(current);
-                       ret = __trace_graph_retaddr_entry(tr, trace, trace_ctx, retaddr);
-               } else {
-                       ret = __graph_entry(tr, trace, trace_ctx, fregs);
-               }
+       trace_ctx = tracing_gen_ctx();
+       if (IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) &&
+           tracer_flags_is_set(TRACE_GRAPH_PRINT_RETADDR)) {
+               unsigned long retaddr = ftrace_graph_top_ret_addr(current);
+               ret = __trace_graph_retaddr_entry(tr, trace, trace_ctx, retaddr);
+       } else {
+               ret = __graph_entry(tr, trace, trace_ctx, fregs);
        }
-       preempt_enable_notrace();
 
        return ret;
 }
@@ -351,13 +341,10 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
 {
        unsigned long *task_var = fgraph_get_task_var(gops);
        struct trace_array *tr = gops->private;
-       struct trace_array_cpu *data;
        struct fgraph_times *ftimes;
        unsigned int trace_ctx;
        u64 calltime, rettime;
-       long disabled;
        int size;
-       int cpu;
 
        rettime = trace_clock_local();
 
@@ -376,15 +363,8 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
 
        calltime = ftimes->calltime;
 
-       preempt_disable_notrace();
-       cpu = raw_smp_processor_id();
-       data = per_cpu_ptr(tr->array_buffer.data, cpu);
-       disabled = atomic_read(&data->disabled);
-       if (likely(!disabled)) {
-               trace_ctx = tracing_gen_ctx();
-               __trace_graph_return(tr, trace, trace_ctx, calltime, rettime);
-       }
-       preempt_enable_notrace();
+       trace_ctx = tracing_gen_ctx();
+       __trace_graph_return(tr, trace, trace_ctx, calltime, rettime);
 }
 
 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,