Merge tag 'sched_ext-for-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[linux-2.6-block.git] / kernel / trace / trace_preemptirq.c
CommitLineData
c3bc8fd6
JFG
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * preemptoff and irqoff tracepoints
4 *
5 * Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org>
6 */
7
8#include <linux/kallsyms.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/ftrace.h>
eeeb080b 12#include <linux/kprobes.h>
3f1756dc 13#include "trace.h"
c3bc8fd6
JFG
14
15#define CREATE_TRACE_POINTS
16#include <trace/events/preemptirq.h>
17
9aedeaed
PZ
18/*
19 * Use regular trace points on architectures that implement noinstr
20 * tooling: these calls will only happen with RCU enabled, which can
21 * use a regular tracepoint.
22 *
23 * On older architectures, use the rcuidle tracing methods (which
24 * aren't NMI-safe - so exclude NMI contexts):
25 */
26#ifdef CONFIG_ARCH_WANTS_NO_INSTR
27#define trace(point) trace_##point
28#else
29#define trace(point) if (!in_nmi()) trace_##point##_rcuidle
30#endif
31
3017ba4b
PZ
32#ifdef CONFIG_TRACE_IRQFLAGS
33/* Per-cpu variable to prevent redundant calls when IRQs already off */
34static DEFINE_PER_CPU(int, tracing_irq_cpu);
35
0995a5df
TG
36/*
37 * Like trace_hardirqs_on() but without the lockdep invocation. This is
38 * used in the low level entry code where the ordering vs. RCU is important
39 * and lockdep uses a staged approach which splits the lockdep hardirq
40 * tracking into a RCU on and a RCU off section.
41 */
42void trace_hardirqs_on_prepare(void)
43{
44 if (this_cpu_read(tracing_irq_cpu)) {
9aedeaed 45 trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
0995a5df
TG
46 tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
47 this_cpu_write(tracing_irq_cpu, 0);
48 }
49}
50EXPORT_SYMBOL(trace_hardirqs_on_prepare);
51NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
52
c3bc8fd6
JFG
53void trace_hardirqs_on(void)
54{
bff1b208 55 if (this_cpu_read(tracing_irq_cpu)) {
9aedeaed 56 trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
3f1756dc 57 tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
bff1b208
SRV
58 this_cpu_write(tracing_irq_cpu, 0);
59 }
c3bc8fd6 60
8b023acc 61 lockdep_hardirqs_on_prepare();
bff1b208 62 lockdep_hardirqs_on(CALLER_ADDR0);
c3bc8fd6
JFG
63}
64EXPORT_SYMBOL(trace_hardirqs_on);
eeeb080b 65NOKPROBE_SYMBOL(trace_hardirqs_on);
c3bc8fd6 66
0995a5df
TG
67/*
68 * Like trace_hardirqs_off() but without the lockdep invocation. This is
69 * used in the low level entry code where the ordering vs. RCU is important
70 * and lockdep uses a staged approach which splits the lockdep hardirq
71 * tracking into a RCU on and a RCU off section.
72 */
bf2b3008 73void trace_hardirqs_off_finish(void)
0995a5df
TG
74{
75 if (!this_cpu_read(tracing_irq_cpu)) {
76 this_cpu_write(tracing_irq_cpu, 1);
77 tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
9aedeaed 78 trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
0995a5df
TG
79 }
80
81}
bf2b3008
PZ
82EXPORT_SYMBOL(trace_hardirqs_off_finish);
83NOKPROBE_SYMBOL(trace_hardirqs_off_finish);
0995a5df 84
c3bc8fd6
JFG
85void trace_hardirqs_off(void)
86{
bf2b3008
PZ
87 lockdep_hardirqs_off(CALLER_ADDR0);
88
bff1b208
SRV
89 if (!this_cpu_read(tracing_irq_cpu)) {
90 this_cpu_write(tracing_irq_cpu, 1);
3f1756dc 91 tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
9aedeaed 92 trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
bff1b208 93 }
c3bc8fd6
JFG
94}
95EXPORT_SYMBOL(trace_hardirqs_off);
eeeb080b 96NOKPROBE_SYMBOL(trace_hardirqs_off);
c3bc8fd6
JFG
97#endif /* CONFIG_TRACE_IRQFLAGS */
98
99#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
100
101void trace_preempt_on(unsigned long a0, unsigned long a1)
102{
3017ba4b 103 trace(preempt_enable)(a0, a1);
3f1756dc 104 tracer_preempt_on(a0, a1);
c3bc8fd6
JFG
105}
106
107void trace_preempt_off(unsigned long a0, unsigned long a1)
108{
3017ba4b 109 trace(preempt_disable)(a0, a1);
3f1756dc 110 tracer_preempt_off(a0, a1);
c3bc8fd6
JFG
111}
112#endif