Commit | Line | Data |
---|---|---|
35e8e302 SR |
1 | /* |
2 | * trace context switch | |
3 | * | |
4 | * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com> | |
5 | * | |
6 | */ | |
7 | #include <linux/module.h> | |
8 | #include <linux/fs.h> | |
9 | #include <linux/debugfs.h> | |
10 | #include <linux/kallsyms.h> | |
11 | #include <linux/uaccess.h> | |
35e8e302 | 12 | #include <linux/ftrace.h> |
ad8d75ff | 13 | #include <trace/events/sched.h> |
35e8e302 SR |
14 | |
15 | #include "trace.h" | |
16 | ||
17 | static struct trace_array *ctx_trace; | |
18 | static int __read_mostly tracer_enabled; | |
efade6e7 FW |
19 | static int sched_ref; |
20 | static DEFINE_MUTEX(sched_register_mutex); | |
5fec6ddc | 21 | static int sched_stopped; |
35e8e302 | 22 | |
82e04af4 FW |
23 | |
24 | void | |
25 | tracing_sched_switch_trace(struct trace_array *tr, | |
26 | struct task_struct *prev, | |
27 | struct task_struct *next, | |
28 | unsigned long flags, int pc) | |
29 | { | |
30 | struct ftrace_event_call *call = &event_context_switch; | |
e77405ad | 31 | struct ring_buffer *buffer = tr->buffer; |
82e04af4 FW |
32 | struct ring_buffer_event *event; |
33 | struct ctx_switch_entry *entry; | |
34 | ||
e77405ad | 35 | event = trace_buffer_lock_reserve(buffer, TRACE_CTX, |
82e04af4 FW |
36 | sizeof(*entry), flags, pc); |
37 | if (!event) | |
38 | return; | |
39 | entry = ring_buffer_event_data(event); | |
40 | entry->prev_pid = prev->pid; | |
41 | entry->prev_prio = prev->prio; | |
42 | entry->prev_state = prev->state; | |
43 | entry->next_pid = next->pid; | |
44 | entry->next_prio = next->prio; | |
45 | entry->next_state = next->state; | |
46 | entry->next_cpu = task_cpu(next); | |
47 | ||
e77405ad SR |
48 | if (!filter_check_discard(call, entry, buffer, event)) |
49 | trace_buffer_unlock_commit(buffer, event, flags, pc); | |
82e04af4 FW |
50 | } |
51 | ||
e309b41d | 52 | static void |
b07c3f19 | 53 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, |
5b82a1b0 | 54 | struct task_struct *next) |
35e8e302 | 55 | { |
35e8e302 SR |
56 | struct trace_array_cpu *data; |
57 | unsigned long flags; | |
35e8e302 | 58 | int cpu; |
38697053 | 59 | int pc; |
35e8e302 | 60 | |
dcef788e | 61 | if (unlikely(!sched_ref)) |
b07c3f19 MD |
62 | return; |
63 | ||
41bc8144 SR |
64 | tracing_record_cmdline(prev); |
65 | tracing_record_cmdline(next); | |
66 | ||
dcef788e | 67 | if (!tracer_enabled || sched_stopped) |
35e8e302 SR |
68 | return; |
69 | ||
38697053 | 70 | pc = preempt_count(); |
18cef379 | 71 | local_irq_save(flags); |
35e8e302 | 72 | cpu = raw_smp_processor_id(); |
b07c3f19 | 73 | data = ctx_trace->data[cpu]; |
35e8e302 | 74 | |
3ea2e6d7 | 75 | if (likely(!atomic_read(&data->disabled))) |
7be42151 | 76 | tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc); |
35e8e302 | 77 | |
18cef379 | 78 | local_irq_restore(flags); |
35e8e302 SR |
79 | } |
80 | ||
82e04af4 FW |
81 | void |
82 | tracing_sched_wakeup_trace(struct trace_array *tr, | |
83 | struct task_struct *wakee, | |
84 | struct task_struct *curr, | |
85 | unsigned long flags, int pc) | |
86 | { | |
87 | struct ftrace_event_call *call = &event_wakeup; | |
88 | struct ring_buffer_event *event; | |
89 | struct ctx_switch_entry *entry; | |
e77405ad | 90 | struct ring_buffer *buffer = tr->buffer; |
82e04af4 | 91 | |
e77405ad | 92 | event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, |
82e04af4 FW |
93 | sizeof(*entry), flags, pc); |
94 | if (!event) | |
95 | return; | |
96 | entry = ring_buffer_event_data(event); | |
97 | entry->prev_pid = curr->pid; | |
98 | entry->prev_prio = curr->prio; | |
99 | entry->prev_state = curr->state; | |
100 | entry->next_pid = wakee->pid; | |
101 | entry->next_prio = wakee->prio; | |
102 | entry->next_state = wakee->state; | |
103 | entry->next_cpu = task_cpu(wakee); | |
104 | ||
e77405ad SR |
105 | if (!filter_check_discard(call, entry, buffer, event)) |
106 | ring_buffer_unlock_commit(buffer, event); | |
107 | ftrace_trace_stack(tr->buffer, flags, 6, pc); | |
108 | ftrace_trace_userstack(tr->buffer, flags, pc); | |
82e04af4 FW |
109 | } |
110 | ||
4e655519 | 111 | static void |
468a15bb | 112 | probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success) |
57422797 | 113 | { |
57422797 IM |
114 | struct trace_array_cpu *data; |
115 | unsigned long flags; | |
38697053 | 116 | int cpu, pc; |
57422797 | 117 | |
dcef788e | 118 | if (unlikely(!sched_ref)) |
57422797 IM |
119 | return; |
120 | ||
b07c3f19 | 121 | tracing_record_cmdline(current); |
d9af56fb | 122 | |
dcef788e | 123 | if (!tracer_enabled || sched_stopped) |
8bcae09b Z |
124 | return; |
125 | ||
dcef788e | 126 | pc = preempt_count(); |
57422797 IM |
127 | local_irq_save(flags); |
128 | cpu = raw_smp_processor_id(); | |
b07c3f19 | 129 | data = ctx_trace->data[cpu]; |
57422797 | 130 | |
3ea2e6d7 | 131 | if (likely(!atomic_read(&data->disabled))) |
7be42151 | 132 | tracing_sched_wakeup_trace(ctx_trace, wakee, current, |
38697053 | 133 | flags, pc); |
57422797 | 134 | |
57422797 IM |
135 | local_irq_restore(flags); |
136 | } | |
137 | ||
5b82a1b0 MD |
138 | static int tracing_sched_register(void) |
139 | { | |
140 | int ret; | |
141 | ||
b07c3f19 | 142 | ret = register_trace_sched_wakeup(probe_sched_wakeup); |
5b82a1b0 | 143 | if (ret) { |
b07c3f19 | 144 | pr_info("wakeup trace: Couldn't activate tracepoint" |
5b82a1b0 MD |
145 | " probe to kernel_sched_wakeup\n"); |
146 | return ret; | |
147 | } | |
148 | ||
b07c3f19 | 149 | ret = register_trace_sched_wakeup_new(probe_sched_wakeup); |
5b82a1b0 | 150 | if (ret) { |
b07c3f19 | 151 | pr_info("wakeup trace: Couldn't activate tracepoint" |
5b82a1b0 MD |
152 | " probe to kernel_sched_wakeup_new\n"); |
153 | goto fail_deprobe; | |
154 | } | |
155 | ||
b07c3f19 | 156 | ret = register_trace_sched_switch(probe_sched_switch); |
5b82a1b0 | 157 | if (ret) { |
b07c3f19 | 158 | pr_info("sched trace: Couldn't activate tracepoint" |
73d8b8bc | 159 | " probe to kernel_sched_switch\n"); |
5b82a1b0 MD |
160 | goto fail_deprobe_wake_new; |
161 | } | |
162 | ||
163 | return ret; | |
164 | fail_deprobe_wake_new: | |
b07c3f19 | 165 | unregister_trace_sched_wakeup_new(probe_sched_wakeup); |
5b82a1b0 | 166 | fail_deprobe: |
b07c3f19 | 167 | unregister_trace_sched_wakeup(probe_sched_wakeup); |
5b82a1b0 MD |
168 | return ret; |
169 | } | |
170 | ||
171 | static void tracing_sched_unregister(void) | |
172 | { | |
b07c3f19 MD |
173 | unregister_trace_sched_switch(probe_sched_switch); |
174 | unregister_trace_sched_wakeup_new(probe_sched_wakeup); | |
175 | unregister_trace_sched_wakeup(probe_sched_wakeup); | |
5b82a1b0 MD |
176 | } |
177 | ||
f2252935 | 178 | static void tracing_start_sched_switch(void) |
5b82a1b0 | 179 | { |
efade6e7 | 180 | mutex_lock(&sched_register_mutex); |
e168e051 | 181 | if (!(sched_ref++)) |
5b82a1b0 | 182 | tracing_sched_register(); |
efade6e7 | 183 | mutex_unlock(&sched_register_mutex); |
5b82a1b0 MD |
184 | } |
185 | ||
f2252935 | 186 | static void tracing_stop_sched_switch(void) |
5b82a1b0 | 187 | { |
efade6e7 | 188 | mutex_lock(&sched_register_mutex); |
e168e051 | 189 | if (!(--sched_ref)) |
5b82a1b0 | 190 | tracing_sched_unregister(); |
efade6e7 | 191 | mutex_unlock(&sched_register_mutex); |
5b82a1b0 MD |
192 | } |
193 | ||
41bc8144 SR |
194 | void tracing_start_cmdline_record(void) |
195 | { | |
196 | tracing_start_sched_switch(); | |
197 | } | |
198 | ||
199 | void tracing_stop_cmdline_record(void) | |
200 | { | |
201 | tracing_stop_sched_switch(); | |
202 | } | |
203 | ||
75f5c47d | 204 | /** |
e168e051 SR |
205 | * tracing_start_sched_switch_record - start tracing context switches |
206 | * | |
207 | * Turns on context switch tracing for a tracer. | |
208 | */ | |
209 | void tracing_start_sched_switch_record(void) | |
210 | { | |
211 | if (unlikely(!ctx_trace)) { | |
212 | WARN_ON(1); | |
213 | return; | |
214 | } | |
215 | ||
216 | tracing_start_sched_switch(); | |
217 | ||
218 | mutex_lock(&sched_register_mutex); | |
219 | tracer_enabled++; | |
220 | mutex_unlock(&sched_register_mutex); | |
221 | } | |
222 | ||
223 | /** | |
224 | * tracing_stop_sched_switch_record - start tracing context switches | |
225 | * | |
226 | * Turns off context switch tracing for a tracer. | |
227 | */ | |
228 | void tracing_stop_sched_switch_record(void) | |
229 | { | |
230 | mutex_lock(&sched_register_mutex); | |
231 | tracer_enabled--; | |
232 | WARN_ON(tracer_enabled < 0); | |
233 | mutex_unlock(&sched_register_mutex); | |
234 | ||
235 | tracing_stop_sched_switch(); | |
236 | } | |
237 | ||
238 | /** | |
239 | * tracing_sched_switch_assign_trace - assign a trace array for ctx switch | |
75f5c47d SR |
240 | * @tr: trace array pointer to assign |
241 | * | |
242 | * Some tracers might want to record the context switches in their | |
243 | * trace. This function lets those tracers assign the trace array | |
244 | * to use. | |
245 | */ | |
e168e051 | 246 | void tracing_sched_switch_assign_trace(struct trace_array *tr) |
75f5c47d SR |
247 | { |
248 | ctx_trace = tr; | |
249 | } | |
250 | ||
e309b41d | 251 | static void stop_sched_trace(struct trace_array *tr) |
35e8e302 | 252 | { |
e168e051 | 253 | tracing_stop_sched_switch_record(); |
35e8e302 SR |
254 | } |
255 | ||
1c80025a | 256 | static int sched_switch_trace_init(struct trace_array *tr) |
35e8e302 SR |
257 | { |
258 | ctx_trace = tr; | |
5fec6ddc | 259 | tracing_reset_online_cpus(tr); |
b6f11df2 | 260 | tracing_start_sched_switch_record(); |
1c80025a | 261 | return 0; |
35e8e302 SR |
262 | } |
263 | ||
e309b41d | 264 | static void sched_switch_trace_reset(struct trace_array *tr) |
35e8e302 | 265 | { |
c76f0694 | 266 | if (sched_ref) |
35e8e302 SR |
267 | stop_sched_trace(tr); |
268 | } | |
269 | ||
9036990d SR |
270 | static void sched_switch_trace_start(struct trace_array *tr) |
271 | { | |
5fec6ddc | 272 | sched_stopped = 0; |
9036990d SR |
273 | } |
274 | ||
275 | static void sched_switch_trace_stop(struct trace_array *tr) | |
276 | { | |
5fec6ddc | 277 | sched_stopped = 1; |
9036990d SR |
278 | } |
279 | ||
75f5c47d | 280 | static struct tracer sched_switch_trace __read_mostly = |
35e8e302 SR |
281 | { |
282 | .name = "sched_switch", | |
283 | .init = sched_switch_trace_init, | |
284 | .reset = sched_switch_trace_reset, | |
9036990d SR |
285 | .start = sched_switch_trace_start, |
286 | .stop = sched_switch_trace_stop, | |
6eaaa5d5 | 287 | .wait_pipe = poll_wait_pipe, |
60a11774 SR |
288 | #ifdef CONFIG_FTRACE_SELFTEST |
289 | .selftest = trace_selftest_startup_sched_switch, | |
290 | #endif | |
35e8e302 SR |
291 | }; |
292 | ||
293 | __init static int init_sched_switch_trace(void) | |
294 | { | |
295 | return register_tracer(&sched_switch_trace); | |
296 | } | |
297 | device_initcall(init_sched_switch_trace); | |
c71dd42d | 298 |