ftrace: Do not bother checking per CPU "disabled" flag
authorSteven Rostedt <rostedt@goodmis.org>
Mon, 5 May 2025 21:21:06 +0000 (17:21 -0400)
committerSteven Rostedt (Google) <rostedt@goodmis.org>
Thu, 8 May 2025 13:36:09 +0000 (09:36 -0400)
The per CPU "disabled" value was the original way to disable tracing when
the tracing subsystem was first created. Today, the ring buffer
infrastructure has its own way to disable tracing. In fact, things have
changed so much since 2008 that many things ignore the disable flag.

There's no reason for the function tracer to check it, if tracing is
disabled, the ring buffer will not record the event anyway.

Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: https://lore.kernel.org/20250505212234.868972758@goodmis.org
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
kernel/trace/trace_functions.c

index 98ccf3f00c519deaa768a3341d1774a5ca8426a9..bd153219a712b3e0f266f73e0b1e40c48134d5ec 100644 (file)
@@ -209,7 +209,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
                    struct ftrace_ops *op, struct ftrace_regs *fregs)
 {
        struct trace_array *tr = op->private;
-       struct trace_array_cpu *data;
        unsigned int trace_ctx;
        int bit;
 
@@ -224,9 +223,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
 
        trace_ctx = tracing_gen_ctx_dec();
 
-       data = this_cpu_ptr(tr->array_buffer.data);
-       if (!atomic_read(&data->disabled))
-               trace_function(tr, ip, parent_ip, trace_ctx, NULL);
+       trace_function(tr, ip, parent_ip, trace_ctx, NULL);
 
        ftrace_test_recursion_unlock(bit);
 }
@@ -236,10 +233,8 @@ function_args_trace_call(unsigned long ip, unsigned long parent_ip,
                         struct ftrace_ops *op, struct ftrace_regs *fregs)
 {
        struct trace_array *tr = op->private;
-       struct trace_array_cpu *data;
        unsigned int trace_ctx;
        int bit;
-       int cpu;
 
        if (unlikely(!tr->function_enabled))
                return;
@@ -250,10 +245,7 @@ function_args_trace_call(unsigned long ip, unsigned long parent_ip,
 
        trace_ctx = tracing_gen_ctx();
 
-       cpu = smp_processor_id();
-       data = per_cpu_ptr(tr->array_buffer.data, cpu);
-       if (!atomic_read(&data->disabled))
-               trace_function(tr, ip, parent_ip, trace_ctx, fregs);
+       trace_function(tr, ip, parent_ip, trace_ctx, fregs);
 
        ftrace_test_recursion_unlock(bit);
 }
@@ -352,7 +344,6 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
 {
        struct trace_func_repeats *last_info;
        struct trace_array *tr = op->private;
-       struct trace_array_cpu *data;
        unsigned int trace_ctx;
        int bit;
 
@@ -364,8 +355,7 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
                return;
 
        parent_ip = function_get_true_parent_ip(parent_ip, fregs);
-       data = this_cpu_ptr(tr->array_buffer.data);
-       if (atomic_read(&data->disabled))
+       if (!tracer_tracing_is_on(tr))
                goto out;
 
        /*