tracing, hardirq: No moar _rcuidle() tracing
authorPeter Zijlstra <peterz@infradead.org>
Thu, 12 Jan 2023 19:43:49 +0000 (20:43 +0100)
committerIngo Molnar <mingo@kernel.org>
Fri, 13 Jan 2023 10:48:16 +0000 (11:48 +0100)
Robot reported that trace_hardirqs_{on,off}() tickle the forbidden
_rcuidle() tracepoint through local_irq_{en,dis}able().

For 'sane' configs, these calls will only happen with RCU enabled and
as such can use the regular tracepoint. This also means it's possible
to trace them from NMI context again.

Reported-by: kernel test robot <lkp@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20230112195541.477416709@infradead.org
kernel/trace/trace_preemptirq.c

index 629f2854e12b534a390344144e543d396e120464..f992444a0b1fceacc6ac26ee1d72d52bd0d9bd01 100644 (file)
 /* Per-cpu variable to prevent redundant calls when IRQs already off */
 static DEFINE_PER_CPU(int, tracing_irq_cpu);
 
+/*
+ * Use regular trace points on architectures that implement noinstr
+ * tooling: these calls will only happen with RCU enabled, which can
+ * use a regular tracepoint.
+ *
+ * On older architectures, use the rcuidle tracing methods (which
+ * aren't NMI-safe - so exclude NMI contexts):
+ */
+#ifdef CONFIG_ARCH_WANTS_NO_INSTR
+#define trace(point)   trace_##point
+#else
+#define trace(point)   if (!in_nmi()) trace_##point##_rcuidle
+#endif
+
 /*
  * Like trace_hardirqs_on() but without the lockdep invocation. This is
  * used in the low level entry code where the ordering vs. RCU is important
@@ -28,8 +42,7 @@ static DEFINE_PER_CPU(int, tracing_irq_cpu);
 void trace_hardirqs_on_prepare(void)
 {
        if (this_cpu_read(tracing_irq_cpu)) {
-               if (!in_nmi())
-                       trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
+               trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
                tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
                this_cpu_write(tracing_irq_cpu, 0);
        }
@@ -40,8 +53,7 @@ NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
 void trace_hardirqs_on(void)
 {
        if (this_cpu_read(tracing_irq_cpu)) {
-               if (!in_nmi())
-                       trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+               trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
                tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
                this_cpu_write(tracing_irq_cpu, 0);
        }
@@ -63,8 +75,7 @@ void trace_hardirqs_off_finish(void)
        if (!this_cpu_read(tracing_irq_cpu)) {
                this_cpu_write(tracing_irq_cpu, 1);
                tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
-               if (!in_nmi())
-                       trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
+               trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
        }
 
 }
@@ -78,8 +89,7 @@ void trace_hardirqs_off(void)
        if (!this_cpu_read(tracing_irq_cpu)) {
                this_cpu_write(tracing_irq_cpu, 1);
                tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
-               if (!in_nmi())
-                       trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+               trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
        }
 }
 EXPORT_SYMBOL(trace_hardirqs_off);