Commit | Line | Data |
---|---|---|
35e8e302 SR |
1 | /* |
2 | * trace context switch | |
3 | * | |
4 | * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com> | |
5 | * | |
6 | */ | |
7 | #include <linux/module.h> | |
8 | #include <linux/fs.h> | |
9 | #include <linux/debugfs.h> | |
10 | #include <linux/kallsyms.h> | |
11 | #include <linux/uaccess.h> | |
12 | #include <linux/marker.h> | |
13 | #include <linux/ftrace.h> | |
14 | ||
15 | #include "trace.h" | |
16 | ||
17 | static struct trace_array *ctx_trace; | |
18 | static int __read_mostly tracer_enabled; | |
19 | ||
e309b41d | 20 | static void |
4e655519 | 21 | ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next) |
35e8e302 SR |
22 | { |
23 | struct trace_array *tr = ctx_trace; | |
24 | struct trace_array_cpu *data; | |
25 | unsigned long flags; | |
26 | long disabled; | |
27 | int cpu; | |
28 | ||
29 | if (!tracer_enabled) | |
30 | return; | |
31 | ||
18cef379 | 32 | local_irq_save(flags); |
35e8e302 SR |
33 | cpu = raw_smp_processor_id(); |
34 | data = tr->data[cpu]; | |
35 | disabled = atomic_inc_return(&data->disabled); | |
36 | ||
4d9493c9 | 37 | if (likely(disabled == 1)) |
35e8e302 SR |
38 | tracing_sched_switch_trace(tr, data, prev, next, flags); |
39 | ||
40 | atomic_dec(&data->disabled); | |
18cef379 | 41 | local_irq_restore(flags); |
35e8e302 SR |
42 | } |
43 | ||
4e655519 IM |
44 | static void |
45 | wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr) | |
57422797 IM |
46 | { |
47 | struct trace_array *tr = ctx_trace; | |
48 | struct trace_array_cpu *data; | |
49 | unsigned long flags; | |
50 | long disabled; | |
51 | int cpu; | |
52 | ||
53 | if (!tracer_enabled) | |
54 | return; | |
55 | ||
d9af56fb IM |
56 | tracing_record_cmdline(curr); |
57 | ||
57422797 IM |
58 | local_irq_save(flags); |
59 | cpu = raw_smp_processor_id(); | |
60 | data = tr->data[cpu]; | |
61 | disabled = atomic_inc_return(&data->disabled); | |
62 | ||
4d9493c9 | 63 | if (likely(disabled == 1)) |
57422797 IM |
64 | tracing_sched_wakeup_trace(tr, data, wakee, curr, flags); |
65 | ||
66 | atomic_dec(&data->disabled); | |
67 | local_irq_restore(flags); | |
68 | } | |
69 | ||
4e655519 IM |
70 | void |
71 | ftrace_ctx_switch(void *__rq, struct task_struct *prev, | |
72 | struct task_struct *next) | |
35e8e302 | 73 | { |
25b0b44a SR |
74 | if (unlikely(atomic_read(&trace_record_cmdline_enabled))) |
75 | tracing_record_cmdline(prev); | |
76 | ||
35e8e302 SR |
77 | /* |
78 | * If tracer_switch_func only points to the local | |
79 | * switch func, it still needs the ptr passed to it. | |
80 | */ | |
4e655519 | 81 | ctx_switch_func(__rq, prev, next); |
35e8e302 SR |
82 | |
83 | /* | |
84 | * Chain to the wakeup tracer (this is a NOP if disabled): | |
85 | */ | |
86 | wakeup_sched_switch(prev, next); | |
87 | } | |
88 | ||
57422797 | 89 | void |
4e655519 IM |
90 | ftrace_wake_up_task(void *__rq, struct task_struct *wakee, |
91 | struct task_struct *curr) | |
57422797 | 92 | { |
4e655519 | 93 | wakeup_func(__rq, wakee, curr); |
57422797 IM |
94 | |
95 | /* | |
96 | * Chain to the wakeup tracer (this is a NOP if disabled): | |
97 | */ | |
98 | wakeup_sched_wakeup(wakee, curr); | |
99 | } | |
100 | ||
88a4216c IM |
101 | void |
102 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |
103 | { | |
104 | struct trace_array *tr = ctx_trace; | |
105 | struct trace_array_cpu *data; | |
106 | unsigned long flags; | |
107 | long disabled; | |
108 | int cpu; | |
109 | ||
110 | if (!tracer_enabled) | |
111 | return; | |
112 | ||
113 | local_irq_save(flags); | |
114 | cpu = raw_smp_processor_id(); | |
115 | data = tr->data[cpu]; | |
116 | disabled = atomic_inc_return(&data->disabled); | |
117 | ||
118 | if (likely(disabled == 1)) | |
119 | __trace_special(tr, data, arg1, arg2, arg3); | |
120 | ||
121 | atomic_dec(&data->disabled); | |
122 | local_irq_restore(flags); | |
123 | } | |
124 | ||
e309b41d | 125 | static void sched_switch_reset(struct trace_array *tr) |
35e8e302 SR |
126 | { |
127 | int cpu; | |
128 | ||
750ed1a4 | 129 | tr->time_start = ftrace_now(tr->cpu); |
35e8e302 SR |
130 | |
131 | for_each_online_cpu(cpu) | |
132 | tracing_reset(tr->data[cpu]); | |
133 | } | |
134 | ||
e309b41d | 135 | static void start_sched_trace(struct trace_array *tr) |
35e8e302 SR |
136 | { |
137 | sched_switch_reset(tr); | |
25b0b44a | 138 | atomic_inc(&trace_record_cmdline_enabled); |
35e8e302 SR |
139 | tracer_enabled = 1; |
140 | } | |
141 | ||
e309b41d | 142 | static void stop_sched_trace(struct trace_array *tr) |
35e8e302 | 143 | { |
25b0b44a | 144 | atomic_dec(&trace_record_cmdline_enabled); |
35e8e302 SR |
145 | tracer_enabled = 0; |
146 | } | |
147 | ||
e309b41d | 148 | static void sched_switch_trace_init(struct trace_array *tr) |
35e8e302 SR |
149 | { |
150 | ctx_trace = tr; | |
151 | ||
152 | if (tr->ctrl) | |
153 | start_sched_trace(tr); | |
154 | } | |
155 | ||
e309b41d | 156 | static void sched_switch_trace_reset(struct trace_array *tr) |
35e8e302 SR |
157 | { |
158 | if (tr->ctrl) | |
159 | stop_sched_trace(tr); | |
160 | } | |
161 | ||
162 | static void sched_switch_trace_ctrl_update(struct trace_array *tr) | |
163 | { | |
164 | /* When starting a new trace, reset the buffers */ | |
165 | if (tr->ctrl) | |
166 | start_sched_trace(tr); | |
167 | else | |
168 | stop_sched_trace(tr); | |
169 | } | |
170 | ||
171 | static struct tracer sched_switch_trace __read_mostly = | |
172 | { | |
173 | .name = "sched_switch", | |
174 | .init = sched_switch_trace_init, | |
175 | .reset = sched_switch_trace_reset, | |
176 | .ctrl_update = sched_switch_trace_ctrl_update, | |
60a11774 SR |
177 | #ifdef CONFIG_FTRACE_SELFTEST |
178 | .selftest = trace_selftest_startup_sched_switch, | |
179 | #endif | |
35e8e302 SR |
180 | }; |
181 | ||
182 | __init static int init_sched_switch_trace(void) | |
183 | { | |
184 | return register_tracer(&sched_switch_trace); | |
185 | } | |
186 | device_initcall(init_sched_switch_trace); |