tracing: Use atomic_inc_return() for updating "disabled" counter in irqsoff tracer
authorSteven Rostedt <rostedt@goodmis.org>
Mon, 5 May 2025 21:21:16 +0000 (17:21 -0400)
committerSteven Rostedt (Google) <rostedt@goodmis.org>
Fri, 9 May 2025 19:19:10 +0000 (15:19 -0400)
The irqsoff tracer uses the per CPU "disabled" field to prevent corruption
of the accounting when it starts to trace interrupts disabled, but there's
a slight race that could happen if for some reason it was called twice.
Use atomic_inc_return() instead.

Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: https://lore.kernel.org/20250505212236.567884756@goodmis.org
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
kernel/trace/trace_irqsoff.c

index 0b6d932a931e74ababef7a12e1019cb709d18072..5496758b6c760f3af1ba6f2427d377ed4b21d086 100644 (file)
@@ -397,6 +397,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
        int cpu;
        struct trace_array *tr = irqsoff_trace;
        struct trace_array_cpu *data;
+       long disabled;
 
        if (!tracer_enabled || !tracing_is_enabled())
                return;
@@ -411,15 +412,17 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
        if (unlikely(!data) || local_read(&data->disabled))
                return;
 
-       local_inc(&data->disabled);
+       disabled = local_inc_return(&data->disabled);
 
-       data->critical_sequence = max_sequence;
-       data->preempt_timestamp = ftrace_now(cpu);
-       data->critical_start = parent_ip ? : ip;
+       if (disabled == 1) {
+               data->critical_sequence = max_sequence;
+               data->preempt_timestamp = ftrace_now(cpu);
+               data->critical_start = parent_ip ? : ip;
 
-       __trace_function(tr, ip, parent_ip, tracing_gen_ctx());
+               __trace_function(tr, ip, parent_ip, tracing_gen_ctx());
 
-       per_cpu(tracing_cpu, cpu) = 1;
+               per_cpu(tracing_cpu, cpu) = 1;
+       }
 
        local_dec(&data->disabled);
 }
@@ -431,6 +434,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
        struct trace_array *tr = irqsoff_trace;
        struct trace_array_cpu *data;
        unsigned int trace_ctx;
+       long disabled;
 
        cpu = raw_smp_processor_id();
        /* Always clear the tracing cpu on stopping the trace */
@@ -448,12 +452,15 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
            !data->critical_start || local_read(&data->disabled))
                return;
 
-       local_inc(&data->disabled);
+       disabled = local_inc_return(&data->disabled);
+
+       if (disabled == 1) {
+               trace_ctx = tracing_gen_ctx();
+               __trace_function(tr, ip, parent_ip, trace_ctx);
+               check_critical_timing(tr, data, parent_ip ? : ip, cpu);
+               data->critical_start = 0;
+       }
 
-       trace_ctx = tracing_gen_ctx();
-       __trace_function(tr, ip, parent_ip, trace_ctx);
-       check_critical_timing(tr, data, parent_ip ? : ip, cpu);
-       data->critical_start = 0;
        local_dec(&data->disabled);
 }