Commit | Line | Data |
---|---|---|
81d68a96 | 1 | /* |
73d8b8bc | 2 | * trace irqs off critical timings |
81d68a96 SR |
3 | * |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | |
6 | * | |
7 | * From code in the latency_tracer, that is: | |
8 | * | |
9 | * Copyright (C) 2004-2006 Ingo Molnar | |
10 | * Copyright (C) 2004 William Lee Irwin III | |
11 | */ | |
12 | #include <linux/kallsyms.h> | |
13 | #include <linux/debugfs.h> | |
14 | #include <linux/uaccess.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/ftrace.h> | |
17 | #include <linux/fs.h> | |
18 | ||
19 | #include "trace.h" | |
20 | ||
21 | static struct trace_array *irqsoff_trace __read_mostly; | |
22 | static int tracer_enabled __read_mostly; | |
23 | ||
6cd8a4bb SR |
24 | static DEFINE_PER_CPU(int, tracing_cpu); |
25 | ||
89b2f978 SR |
26 | static DEFINE_SPINLOCK(max_trace_lock); |
27 | ||
6cd8a4bb SR |
28 | enum { |
29 | TRACER_IRQS_OFF = (1 << 1), | |
30 | TRACER_PREEMPT_OFF = (1 << 2), | |
31 | }; | |
32 | ||
33 | static int trace_type __read_mostly; | |
34 | ||
e9d25fe6 SR |
35 | static int save_lat_flag; |
36 | ||
6cd8a4bb | 37 | #ifdef CONFIG_PREEMPT_TRACER |
e309b41d | 38 | static inline int |
6cd8a4bb SR |
39 | preempt_trace(void) |
40 | { | |
41 | return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); | |
42 | } | |
43 | #else | |
44 | # define preempt_trace() (0) | |
45 | #endif | |
46 | ||
47 | #ifdef CONFIG_IRQSOFF_TRACER | |
e309b41d | 48 | static inline int |
6cd8a4bb SR |
49 | irq_trace(void) |
50 | { | |
51 | return ((trace_type & TRACER_IRQS_OFF) && | |
52 | irqs_disabled()); | |
53 | } | |
54 | #else | |
55 | # define irq_trace() (0) | |
56 | #endif | |
57 | ||
81d68a96 SR |
58 | /* |
59 | * Sequence count - we record it when starting a measurement and | |
60 | * skip the latency if the sequence has changed - some other section | |
61 | * did a maximum and could disturb our measurement with serial console | |
62 | * printouts, etc. Truly coinciding maximum latencies should be rare | |
63 | * and what happens together happens separately as well, so this doesnt | |
64 | * decrease the validity of the maximum found: | |
65 | */ | |
66 | static __cacheline_aligned_in_smp unsigned long max_sequence; | |
67 | ||
606576ce | 68 | #ifdef CONFIG_FUNCTION_TRACER |
81d68a96 SR |
69 | /* |
70 | * irqsoff uses its own tracer function to keep the overhead down: | |
71 | */ | |
e309b41d | 72 | static void |
81d68a96 SR |
73 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) |
74 | { | |
75 | struct trace_array *tr = irqsoff_trace; | |
76 | struct trace_array_cpu *data; | |
77 | unsigned long flags; | |
78 | long disabled; | |
79 | int cpu; | |
80 | ||
361943ad SR |
81 | /* |
82 | * Does not matter if we preempt. We test the flags | |
83 | * afterward, to see if irqs are disabled or not. | |
84 | * If we preempt and get a false positive, the flags | |
85 | * test will fail. | |
86 | */ | |
87 | cpu = raw_smp_processor_id(); | |
88 | if (likely(!per_cpu(tracing_cpu, cpu))) | |
81d68a96 SR |
89 | return; |
90 | ||
91 | local_save_flags(flags); | |
361943ad SR |
92 | /* slight chance to get a false positive on tracing_cpu */ |
93 | if (!irqs_disabled_flags(flags)) | |
94 | return; | |
81d68a96 | 95 | |
81d68a96 SR |
96 | data = tr->data[cpu]; |
97 | disabled = atomic_inc_return(&data->disabled); | |
98 | ||
99 | if (likely(disabled == 1)) | |
7be42151 | 100 | trace_function(tr, ip, parent_ip, flags, preempt_count()); |
81d68a96 SR |
101 | |
102 | atomic_dec(&data->disabled); | |
103 | } | |
104 | ||
105 | static struct ftrace_ops trace_ops __read_mostly = | |
106 | { | |
107 | .func = irqsoff_tracer_call, | |
108 | }; | |
606576ce | 109 | #endif /* CONFIG_FUNCTION_TRACER */ |
81d68a96 SR |
110 | |
111 | /* | |
112 | * Should this new latency be reported/recorded? | |
113 | */ | |
e309b41d | 114 | static int report_latency(cycle_t delta) |
81d68a96 SR |
115 | { |
116 | if (tracing_thresh) { | |
117 | if (delta < tracing_thresh) | |
118 | return 0; | |
119 | } else { | |
120 | if (delta <= tracing_max_latency) | |
121 | return 0; | |
122 | } | |
123 | return 1; | |
124 | } | |
125 | ||
e309b41d | 126 | static void |
81d68a96 SR |
127 | check_critical_timing(struct trace_array *tr, |
128 | struct trace_array_cpu *data, | |
129 | unsigned long parent_ip, | |
130 | int cpu) | |
131 | { | |
132 | unsigned long latency, t0, t1; | |
89b2f978 | 133 | cycle_t T0, T1, delta; |
81d68a96 | 134 | unsigned long flags; |
38697053 | 135 | int pc; |
81d68a96 SR |
136 | |
137 | /* | |
138 | * usecs conversion is slow so we try to delay the conversion | |
139 | * as long as possible: | |
140 | */ | |
141 | T0 = data->preempt_timestamp; | |
750ed1a4 | 142 | T1 = ftrace_now(cpu); |
81d68a96 SR |
143 | delta = T1-T0; |
144 | ||
145 | local_save_flags(flags); | |
146 | ||
6450c1d3 SR |
147 | pc = preempt_count(); |
148 | ||
81d68a96 SR |
149 | if (!report_latency(delta)) |
150 | goto out; | |
151 | ||
c7aafc54 | 152 | spin_lock_irqsave(&max_trace_lock, flags); |
81d68a96 | 153 | |
89b2f978 SR |
154 | /* check if we are still the max latency */ |
155 | if (!report_latency(delta)) | |
156 | goto out_unlock; | |
157 | ||
7be42151 | 158 | trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
81d68a96 SR |
159 | |
160 | latency = nsecs_to_usecs(delta); | |
161 | ||
162 | if (data->critical_sequence != max_sequence) | |
89b2f978 | 163 | goto out_unlock; |
81d68a96 SR |
164 | |
165 | tracing_max_latency = delta; | |
166 | t0 = nsecs_to_usecs(T0); | |
167 | t1 = nsecs_to_usecs(T1); | |
168 | ||
169 | data->critical_end = parent_ip; | |
170 | ||
171 | update_max_tr_single(tr, current, cpu); | |
172 | ||
81d68a96 SR |
173 | max_sequence++; |
174 | ||
89b2f978 | 175 | out_unlock: |
c7aafc54 | 176 | spin_unlock_irqrestore(&max_trace_lock, flags); |
89b2f978 | 177 | |
81d68a96 SR |
178 | out: |
179 | data->critical_sequence = max_sequence; | |
750ed1a4 | 180 | data->preempt_timestamp = ftrace_now(cpu); |
3928a8a2 | 181 | tracing_reset(tr, cpu); |
7be42151 | 182 | trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
81d68a96 SR |
183 | } |
184 | ||
e309b41d | 185 | static inline void |
81d68a96 SR |
186 | start_critical_timing(unsigned long ip, unsigned long parent_ip) |
187 | { | |
188 | int cpu; | |
189 | struct trace_array *tr = irqsoff_trace; | |
190 | struct trace_array_cpu *data; | |
191 | unsigned long flags; | |
192 | ||
193 | if (likely(!tracer_enabled)) | |
194 | return; | |
195 | ||
c5f888ca SR |
196 | cpu = raw_smp_processor_id(); |
197 | ||
198 | if (per_cpu(tracing_cpu, cpu)) | |
6cd8a4bb SR |
199 | return; |
200 | ||
81d68a96 SR |
201 | data = tr->data[cpu]; |
202 | ||
c5f888ca | 203 | if (unlikely(!data) || atomic_read(&data->disabled)) |
81d68a96 SR |
204 | return; |
205 | ||
206 | atomic_inc(&data->disabled); | |
207 | ||
208 | data->critical_sequence = max_sequence; | |
750ed1a4 | 209 | data->preempt_timestamp = ftrace_now(cpu); |
6cd8a4bb | 210 | data->critical_start = parent_ip ? : ip; |
3928a8a2 | 211 | tracing_reset(tr, cpu); |
81d68a96 SR |
212 | |
213 | local_save_flags(flags); | |
6cd8a4bb | 214 | |
7be42151 | 215 | trace_function(tr, ip, parent_ip, flags, preempt_count()); |
81d68a96 | 216 | |
c5f888ca | 217 | per_cpu(tracing_cpu, cpu) = 1; |
6cd8a4bb | 218 | |
81d68a96 SR |
219 | atomic_dec(&data->disabled); |
220 | } | |
221 | ||
e309b41d | 222 | static inline void |
81d68a96 SR |
223 | stop_critical_timing(unsigned long ip, unsigned long parent_ip) |
224 | { | |
225 | int cpu; | |
226 | struct trace_array *tr = irqsoff_trace; | |
227 | struct trace_array_cpu *data; | |
228 | unsigned long flags; | |
229 | ||
c5f888ca | 230 | cpu = raw_smp_processor_id(); |
6cd8a4bb | 231 | /* Always clear the tracing cpu on stopping the trace */ |
c5f888ca SR |
232 | if (unlikely(per_cpu(tracing_cpu, cpu))) |
233 | per_cpu(tracing_cpu, cpu) = 0; | |
6cd8a4bb SR |
234 | else |
235 | return; | |
236 | ||
237 | if (!tracer_enabled) | |
81d68a96 SR |
238 | return; |
239 | ||
81d68a96 SR |
240 | data = tr->data[cpu]; |
241 | ||
3928a8a2 | 242 | if (unlikely(!data) || |
81d68a96 SR |
243 | !data->critical_start || atomic_read(&data->disabled)) |
244 | return; | |
245 | ||
246 | atomic_inc(&data->disabled); | |
c5f888ca | 247 | |
81d68a96 | 248 | local_save_flags(flags); |
7be42151 | 249 | trace_function(tr, ip, parent_ip, flags, preempt_count()); |
6cd8a4bb | 250 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); |
81d68a96 SR |
251 | data->critical_start = 0; |
252 | atomic_dec(&data->disabled); | |
253 | } | |
254 | ||
6cd8a4bb | 255 | /* start and stop critical timings used to for stoppage (in idle) */ |
e309b41d | 256 | void start_critical_timings(void) |
81d68a96 | 257 | { |
6cd8a4bb | 258 | if (preempt_trace() || irq_trace()) |
81d68a96 SR |
259 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
260 | } | |
1fe37104 | 261 | EXPORT_SYMBOL_GPL(start_critical_timings); |
81d68a96 | 262 | |
e309b41d | 263 | void stop_critical_timings(void) |
81d68a96 | 264 | { |
6cd8a4bb | 265 | if (preempt_trace() || irq_trace()) |
81d68a96 SR |
266 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
267 | } | |
1fe37104 | 268 | EXPORT_SYMBOL_GPL(stop_critical_timings); |
81d68a96 | 269 | |
6cd8a4bb | 270 | #ifdef CONFIG_IRQSOFF_TRACER |
81d68a96 | 271 | #ifdef CONFIG_PROVE_LOCKING |
e309b41d | 272 | void time_hardirqs_on(unsigned long a0, unsigned long a1) |
81d68a96 | 273 | { |
6cd8a4bb | 274 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
275 | stop_critical_timing(a0, a1); |
276 | } | |
277 | ||
e309b41d | 278 | void time_hardirqs_off(unsigned long a0, unsigned long a1) |
81d68a96 | 279 | { |
6cd8a4bb | 280 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
281 | start_critical_timing(a0, a1); |
282 | } | |
283 | ||
284 | #else /* !CONFIG_PROVE_LOCKING */ | |
285 | ||
286 | /* | |
287 | * Stubs: | |
288 | */ | |
289 | ||
290 | void early_boot_irqs_off(void) | |
291 | { | |
292 | } | |
293 | ||
294 | void early_boot_irqs_on(void) | |
295 | { | |
296 | } | |
297 | ||
298 | void trace_softirqs_on(unsigned long ip) | |
299 | { | |
300 | } | |
301 | ||
302 | void trace_softirqs_off(unsigned long ip) | |
303 | { | |
304 | } | |
305 | ||
e309b41d | 306 | inline void print_irqtrace_events(struct task_struct *curr) |
81d68a96 SR |
307 | { |
308 | } | |
309 | ||
310 | /* | |
311 | * We are only interested in hardirq on/off events: | |
312 | */ | |
e309b41d | 313 | void trace_hardirqs_on(void) |
81d68a96 | 314 | { |
6cd8a4bb | 315 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
316 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
317 | } | |
318 | EXPORT_SYMBOL(trace_hardirqs_on); | |
319 | ||
e309b41d | 320 | void trace_hardirqs_off(void) |
81d68a96 | 321 | { |
6cd8a4bb | 322 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
323 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
324 | } | |
325 | EXPORT_SYMBOL(trace_hardirqs_off); | |
326 | ||
e309b41d | 327 | void trace_hardirqs_on_caller(unsigned long caller_addr) |
81d68a96 | 328 | { |
6cd8a4bb | 329 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
330 | stop_critical_timing(CALLER_ADDR0, caller_addr); |
331 | } | |
332 | EXPORT_SYMBOL(trace_hardirqs_on_caller); | |
333 | ||
e309b41d | 334 | void trace_hardirqs_off_caller(unsigned long caller_addr) |
81d68a96 | 335 | { |
6cd8a4bb | 336 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
337 | start_critical_timing(CALLER_ADDR0, caller_addr); |
338 | } | |
339 | EXPORT_SYMBOL(trace_hardirqs_off_caller); | |
340 | ||
341 | #endif /* CONFIG_PROVE_LOCKING */ | |
6cd8a4bb SR |
342 | #endif /* CONFIG_IRQSOFF_TRACER */ |
343 | ||
344 | #ifdef CONFIG_PREEMPT_TRACER | |
e309b41d | 345 | void trace_preempt_on(unsigned long a0, unsigned long a1) |
6cd8a4bb | 346 | { |
1e01cb0c SR |
347 | if (preempt_trace()) |
348 | stop_critical_timing(a0, a1); | |
6cd8a4bb SR |
349 | } |
350 | ||
e309b41d | 351 | void trace_preempt_off(unsigned long a0, unsigned long a1) |
6cd8a4bb | 352 | { |
1e01cb0c SR |
353 | if (preempt_trace()) |
354 | start_critical_timing(a0, a1); | |
6cd8a4bb SR |
355 | } |
356 | #endif /* CONFIG_PREEMPT_TRACER */ | |
81d68a96 SR |
357 | |
358 | static void start_irqsoff_tracer(struct trace_array *tr) | |
359 | { | |
81d68a96 | 360 | register_ftrace_function(&trace_ops); |
94523e81 | 361 | if (tracing_is_enabled()) |
9036990d | 362 | tracer_enabled = 1; |
94523e81 | 363 | else |
9036990d | 364 | tracer_enabled = 0; |
81d68a96 SR |
365 | } |
366 | ||
367 | static void stop_irqsoff_tracer(struct trace_array *tr) | |
368 | { | |
81d68a96 | 369 | tracer_enabled = 0; |
89b2f978 | 370 | unregister_ftrace_function(&trace_ops); |
81d68a96 SR |
371 | } |
372 | ||
6cd8a4bb | 373 | static void __irqsoff_tracer_init(struct trace_array *tr) |
81d68a96 | 374 | { |
e9d25fe6 SR |
375 | save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; |
376 | trace_flags |= TRACE_ITER_LATENCY_FMT; | |
377 | ||
745b1626 | 378 | tracing_max_latency = 0; |
81d68a96 | 379 | irqsoff_trace = tr; |
c5f888ca | 380 | /* make sure that the tracer is visible */ |
81d68a96 | 381 | smp_wmb(); |
c76f0694 | 382 | start_irqsoff_tracer(tr); |
81d68a96 SR |
383 | } |
384 | ||
385 | static void irqsoff_tracer_reset(struct trace_array *tr) | |
386 | { | |
c76f0694 | 387 | stop_irqsoff_tracer(tr); |
e9d25fe6 SR |
388 | |
389 | if (!save_lat_flag) | |
390 | trace_flags &= ~TRACE_ITER_LATENCY_FMT; | |
81d68a96 SR |
391 | } |
392 | ||
9036990d SR |
393 | static void irqsoff_tracer_start(struct trace_array *tr) |
394 | { | |
9036990d | 395 | tracer_enabled = 1; |
9036990d SR |
396 | } |
397 | ||
398 | static void irqsoff_tracer_stop(struct trace_array *tr) | |
399 | { | |
400 | tracer_enabled = 0; | |
81d68a96 SR |
401 | } |
402 | ||
6cd8a4bb | 403 | #ifdef CONFIG_IRQSOFF_TRACER |
1c80025a | 404 | static int irqsoff_tracer_init(struct trace_array *tr) |
6cd8a4bb SR |
405 | { |
406 | trace_type = TRACER_IRQS_OFF; | |
407 | ||
408 | __irqsoff_tracer_init(tr); | |
1c80025a | 409 | return 0; |
6cd8a4bb | 410 | } |
81d68a96 SR |
411 | static struct tracer irqsoff_tracer __read_mostly = |
412 | { | |
413 | .name = "irqsoff", | |
414 | .init = irqsoff_tracer_init, | |
415 | .reset = irqsoff_tracer_reset, | |
9036990d SR |
416 | .start = irqsoff_tracer_start, |
417 | .stop = irqsoff_tracer_stop, | |
81d68a96 | 418 | .print_max = 1, |
60a11774 SR |
419 | #ifdef CONFIG_FTRACE_SELFTEST |
420 | .selftest = trace_selftest_startup_irqsoff, | |
421 | #endif | |
81d68a96 | 422 | }; |
6cd8a4bb SR |
423 | # define register_irqsoff(trace) register_tracer(&trace) |
424 | #else | |
425 | # define register_irqsoff(trace) do { } while (0) | |
426 | #endif | |
427 | ||
428 | #ifdef CONFIG_PREEMPT_TRACER | |
1c80025a | 429 | static int preemptoff_tracer_init(struct trace_array *tr) |
6cd8a4bb SR |
430 | { |
431 | trace_type = TRACER_PREEMPT_OFF; | |
432 | ||
433 | __irqsoff_tracer_init(tr); | |
1c80025a | 434 | return 0; |
6cd8a4bb SR |
435 | } |
436 | ||
437 | static struct tracer preemptoff_tracer __read_mostly = | |
438 | { | |
439 | .name = "preemptoff", | |
440 | .init = preemptoff_tracer_init, | |
441 | .reset = irqsoff_tracer_reset, | |
9036990d SR |
442 | .start = irqsoff_tracer_start, |
443 | .stop = irqsoff_tracer_stop, | |
6cd8a4bb | 444 | .print_max = 1, |
60a11774 SR |
445 | #ifdef CONFIG_FTRACE_SELFTEST |
446 | .selftest = trace_selftest_startup_preemptoff, | |
447 | #endif | |
6cd8a4bb SR |
448 | }; |
449 | # define register_preemptoff(trace) register_tracer(&trace) | |
450 | #else | |
451 | # define register_preemptoff(trace) do { } while (0) | |
452 | #endif | |
453 | ||
454 | #if defined(CONFIG_IRQSOFF_TRACER) && \ | |
455 | defined(CONFIG_PREEMPT_TRACER) | |
456 | ||
1c80025a | 457 | static int preemptirqsoff_tracer_init(struct trace_array *tr) |
6cd8a4bb SR |
458 | { |
459 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; | |
460 | ||
461 | __irqsoff_tracer_init(tr); | |
1c80025a | 462 | return 0; |
6cd8a4bb SR |
463 | } |
464 | ||
465 | static struct tracer preemptirqsoff_tracer __read_mostly = | |
466 | { | |
467 | .name = "preemptirqsoff", | |
468 | .init = preemptirqsoff_tracer_init, | |
469 | .reset = irqsoff_tracer_reset, | |
9036990d SR |
470 | .start = irqsoff_tracer_start, |
471 | .stop = irqsoff_tracer_stop, | |
6cd8a4bb | 472 | .print_max = 1, |
60a11774 SR |
473 | #ifdef CONFIG_FTRACE_SELFTEST |
474 | .selftest = trace_selftest_startup_preemptirqsoff, | |
475 | #endif | |
6cd8a4bb SR |
476 | }; |
477 | ||
478 | # define register_preemptirqsoff(trace) register_tracer(&trace) | |
479 | #else | |
480 | # define register_preemptirqsoff(trace) do { } while (0) | |
481 | #endif | |
81d68a96 SR |
482 | |
483 | __init static int init_irqsoff_tracer(void) | |
484 | { | |
6cd8a4bb SR |
485 | register_irqsoff(irqsoff_tracer); |
486 | register_preemptoff(preemptoff_tracer); | |
487 | register_preemptirqsoff(preemptirqsoff_tracer); | |
81d68a96 SR |
488 | |
489 | return 0; | |
490 | } | |
491 | device_initcall(init_irqsoff_tracer); |