Commit | Line | Data |
---|---|---|
81d68a96 | 1 | /* |
73d8b8bc | 2 | * trace irqs off critical timings |
81d68a96 SR |
3 | * |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | |
6 | * | |
7 | * From code in the latency_tracer, that is: | |
8 | * | |
9 | * Copyright (C) 2004-2006 Ingo Molnar | |
6d49e352 | 10 | * Copyright (C) 2004 Nadia Yvette Chambers |
81d68a96 SR |
11 | */ |
12 | #include <linux/kallsyms.h> | |
81d68a96 SR |
13 | #include <linux/uaccess.h> |
14 | #include <linux/module.h> | |
15 | #include <linux/ftrace.h> | |
81d68a96 SR |
16 | |
17 | #include "trace.h" | |
18 | ||
d5915816 JF |
19 | #define CREATE_TRACE_POINTS |
20 | #include <trace/events/preemptirq.h> | |
21 | ||
aaecaa0b | 22 | #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER) |
81d68a96 SR |
23 | static struct trace_array *irqsoff_trace __read_mostly; |
24 | static int tracer_enabled __read_mostly; | |
25 | ||
6cd8a4bb SR |
26 | static DEFINE_PER_CPU(int, tracing_cpu); |
27 | ||
5389f6fa | 28 | static DEFINE_RAW_SPINLOCK(max_trace_lock); |
89b2f978 | 29 | |
6cd8a4bb SR |
30 | enum { |
31 | TRACER_IRQS_OFF = (1 << 1), | |
32 | TRACER_PREEMPT_OFF = (1 << 2), | |
33 | }; | |
34 | ||
35 | static int trace_type __read_mostly; | |
36 | ||
613f04a0 | 37 | static int save_flags; |
e9d25fe6 | 38 | |
62b915f1 JO |
39 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph); |
40 | static int start_irqsoff_tracer(struct trace_array *tr, int graph); | |
41 | ||
6cd8a4bb | 42 | #ifdef CONFIG_PREEMPT_TRACER |
e309b41d | 43 | static inline int |
6cd8a4bb SR |
44 | preempt_trace(void) |
45 | { | |
46 | return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); | |
47 | } | |
48 | #else | |
49 | # define preempt_trace() (0) | |
50 | #endif | |
51 | ||
52 | #ifdef CONFIG_IRQSOFF_TRACER | |
e309b41d | 53 | static inline int |
6cd8a4bb SR |
54 | irq_trace(void) |
55 | { | |
56 | return ((trace_type & TRACER_IRQS_OFF) && | |
57 | irqs_disabled()); | |
58 | } | |
59 | #else | |
60 | # define irq_trace() (0) | |
61 | #endif | |
62 | ||
62b915f1 | 63 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
03905582 | 64 | static int irqsoff_display_graph(struct trace_array *tr, int set); |
983f938a | 65 | # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH) |
03905582 SRRH |
66 | #else |
67 | static inline int irqsoff_display_graph(struct trace_array *tr, int set) | |
68 | { | |
69 | return -EINVAL; | |
70 | } | |
983f938a | 71 | # define is_graph(tr) false |
62b915f1 | 72 | #endif |
62b915f1 | 73 | |
81d68a96 SR |
74 | /* |
75 | * Sequence count - we record it when starting a measurement and | |
76 | * skip the latency if the sequence has changed - some other section | |
77 | * did a maximum and could disturb our measurement with serial console | |
78 | * printouts, etc. Truly coinciding maximum latencies should be rare | |
25985edc | 79 | * and what happens together happens separately as well, so this doesn't |
81d68a96 SR |
80 | * decrease the validity of the maximum found: |
81 | */ | |
82 | static __cacheline_aligned_in_smp unsigned long max_sequence; | |
83 | ||
606576ce | 84 | #ifdef CONFIG_FUNCTION_TRACER |
81d68a96 | 85 | /* |
5e6d2b9c SR |
86 | * Prologue for the preempt and irqs off function tracers. |
87 | * | |
88 | * Returns 1 if it is OK to continue, and data->disabled is | |
89 | * incremented. | |
90 | * 0 if the trace is to be ignored, and data->disabled | |
91 | * is kept the same. | |
92 | * | |
93 | * Note, this function is also used outside this ifdef but | |
94 | * inside the #ifdef of the function graph tracer below. | |
95 | * This is OK, since the function graph tracer is | |
96 | * dependent on the function tracer. | |
81d68a96 | 97 | */ |
5e6d2b9c SR |
98 | static int func_prolog_dec(struct trace_array *tr, |
99 | struct trace_array_cpu **data, | |
100 | unsigned long *flags) | |
81d68a96 | 101 | { |
81d68a96 SR |
102 | long disabled; |
103 | int cpu; | |
104 | ||
361943ad SR |
105 | /* |
106 | * Does not matter if we preempt. We test the flags | |
107 | * afterward, to see if irqs are disabled or not. | |
108 | * If we preempt and get a false positive, the flags | |
109 | * test will fail. | |
110 | */ | |
111 | cpu = raw_smp_processor_id(); | |
112 | if (likely(!per_cpu(tracing_cpu, cpu))) | |
5e6d2b9c | 113 | return 0; |
81d68a96 | 114 | |
5e6d2b9c | 115 | local_save_flags(*flags); |
cb86e053 SRRH |
116 | /* |
117 | * Slight chance to get a false positive on tracing_cpu, | |
118 | * although I'm starting to think there isn't a chance. | |
119 | * Leave this for now just to be paranoid. | |
120 | */ | |
121 | if (!irqs_disabled_flags(*flags) && !preempt_count()) | |
5e6d2b9c | 122 | return 0; |
81d68a96 | 123 | |
12883efb | 124 | *data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
5e6d2b9c | 125 | disabled = atomic_inc_return(&(*data)->disabled); |
81d68a96 SR |
126 | |
127 | if (likely(disabled == 1)) | |
5e6d2b9c SR |
128 | return 1; |
129 | ||
130 | atomic_dec(&(*data)->disabled); | |
131 | ||
132 | return 0; | |
133 | } | |
134 | ||
135 | /* | |
136 | * irqsoff uses its own tracer function to keep the overhead down: | |
137 | */ | |
138 | static void | |
2f5f6ad9 | 139 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip, |
a1e2e31d | 140 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
5e6d2b9c SR |
141 | { |
142 | struct trace_array *tr = irqsoff_trace; | |
143 | struct trace_array_cpu *data; | |
144 | unsigned long flags; | |
145 | ||
146 | if (!func_prolog_dec(tr, &data, &flags)) | |
147 | return; | |
148 | ||
149 | trace_function(tr, ip, parent_ip, flags, preempt_count()); | |
81d68a96 SR |
150 | |
151 | atomic_dec(&data->disabled); | |
152 | } | |
606576ce | 153 | #endif /* CONFIG_FUNCTION_TRACER */ |
81d68a96 | 154 | |
62b915f1 | 155 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
03905582 | 156 | static int irqsoff_display_graph(struct trace_array *tr, int set) |
62b915f1 JO |
157 | { |
158 | int cpu; | |
159 | ||
983f938a | 160 | if (!(is_graph(tr) ^ set)) |
62b915f1 JO |
161 | return 0; |
162 | ||
163 | stop_irqsoff_tracer(irqsoff_trace, !set); | |
164 | ||
165 | for_each_possible_cpu(cpu) | |
166 | per_cpu(tracing_cpu, cpu) = 0; | |
167 | ||
6d9b3fa5 | 168 | tr->max_latency = 0; |
12883efb | 169 | tracing_reset_online_cpus(&irqsoff_trace->trace_buffer); |
62b915f1 JO |
170 | |
171 | return start_irqsoff_tracer(irqsoff_trace, set); | |
172 | } | |
173 | ||
174 | static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) | |
175 | { | |
176 | struct trace_array *tr = irqsoff_trace; | |
177 | struct trace_array_cpu *data; | |
178 | unsigned long flags; | |
62b915f1 | 179 | int ret; |
62b915f1 JO |
180 | int pc; |
181 | ||
1a414428 SRRH |
182 | if (ftrace_graph_ignore_func(trace)) |
183 | return 0; | |
184 | /* | |
185 | * Do not trace a function if it's filtered by set_graph_notrace. | |
186 | * Make the index of ret stack negative to indicate that it should | |
187 | * ignore further functions. But it needs its own ret stack entry | |
188 | * to recover the original index in order to continue tracing after | |
189 | * returning from the function. | |
190 | */ | |
191 | if (ftrace_graph_notrace_addr(trace->func)) | |
192 | return 1; | |
193 | ||
5e6d2b9c | 194 | if (!func_prolog_dec(tr, &data, &flags)) |
62b915f1 JO |
195 | return 0; |
196 | ||
5e6d2b9c SR |
197 | pc = preempt_count(); |
198 | ret = __trace_graph_entry(tr, trace, flags, pc); | |
62b915f1 | 199 | atomic_dec(&data->disabled); |
5e6d2b9c | 200 | |
62b915f1 JO |
201 | return ret; |
202 | } | |
203 | ||
204 | static void irqsoff_graph_return(struct ftrace_graph_ret *trace) | |
205 | { | |
206 | struct trace_array *tr = irqsoff_trace; | |
207 | struct trace_array_cpu *data; | |
208 | unsigned long flags; | |
62b915f1 JO |
209 | int pc; |
210 | ||
5e6d2b9c | 211 | if (!func_prolog_dec(tr, &data, &flags)) |
62b915f1 JO |
212 | return; |
213 | ||
5e6d2b9c SR |
214 | pc = preempt_count(); |
215 | __trace_graph_return(tr, trace, flags, pc); | |
62b915f1 JO |
216 | atomic_dec(&data->disabled); |
217 | } | |
218 | ||
219 | static void irqsoff_trace_open(struct trace_iterator *iter) | |
220 | { | |
983f938a | 221 | if (is_graph(iter->tr)) |
62b915f1 JO |
222 | graph_trace_open(iter); |
223 | ||
224 | } | |
225 | ||
226 | static void irqsoff_trace_close(struct trace_iterator *iter) | |
227 | { | |
228 | if (iter->private) | |
229 | graph_trace_close(iter); | |
230 | } | |
231 | ||
232 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \ | |
321e68b0 JO |
233 | TRACE_GRAPH_PRINT_PROC | \ |
234 | TRACE_GRAPH_PRINT_ABS_TIME | \ | |
235 | TRACE_GRAPH_PRINT_DURATION) | |
62b915f1 JO |
236 | |
237 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) | |
238 | { | |
62b915f1 JO |
239 | /* |
240 | * In graph mode call the graph tracer output function, | |
241 | * otherwise go with the TRACE_FN event handler | |
242 | */ | |
983f938a | 243 | if (is_graph(iter->tr)) |
0a772620 | 244 | return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); |
62b915f1 JO |
245 | |
246 | return TRACE_TYPE_UNHANDLED; | |
247 | } | |
248 | ||
249 | static void irqsoff_print_header(struct seq_file *s) | |
250 | { | |
983f938a SRRH |
251 | struct trace_array *tr = irqsoff_trace; |
252 | ||
253 | if (is_graph(tr)) | |
0a772620 JO |
254 | print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); |
255 | else | |
62b915f1 JO |
256 | trace_default_header(s); |
257 | } | |
258 | ||
62b915f1 JO |
259 | static void |
260 | __trace_function(struct trace_array *tr, | |
261 | unsigned long ip, unsigned long parent_ip, | |
262 | unsigned long flags, int pc) | |
263 | { | |
983f938a | 264 | if (is_graph(tr)) |
0a772620 JO |
265 | trace_graph_function(tr, ip, parent_ip, flags, pc); |
266 | else | |
62b915f1 | 267 | trace_function(tr, ip, parent_ip, flags, pc); |
62b915f1 JO |
268 | } |
269 | ||
270 | #else | |
271 | #define __trace_function trace_function | |
272 | ||
8179e8a1 | 273 | #ifdef CONFIG_FUNCTION_TRACER |
62b915f1 JO |
274 | static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) |
275 | { | |
276 | return -1; | |
277 | } | |
8179e8a1 | 278 | #endif |
62b915f1 JO |
279 | |
280 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) | |
281 | { | |
282 | return TRACE_TYPE_UNHANDLED; | |
283 | } | |
284 | ||
62b915f1 JO |
285 | static void irqsoff_trace_open(struct trace_iterator *iter) { } |
286 | static void irqsoff_trace_close(struct trace_iterator *iter) { } | |
7e9a49ef JO |
287 | |
288 | #ifdef CONFIG_FUNCTION_TRACER | |
8179e8a1 | 289 | static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } |
7e9a49ef JO |
290 | static void irqsoff_print_header(struct seq_file *s) |
291 | { | |
292 | trace_default_header(s); | |
293 | } | |
294 | #else | |
295 | static void irqsoff_print_header(struct seq_file *s) | |
296 | { | |
297 | trace_latency_header(s); | |
298 | } | |
299 | #endif /* CONFIG_FUNCTION_TRACER */ | |
62b915f1 JO |
300 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
301 | ||
81d68a96 SR |
302 | /* |
303 | * Should this new latency be reported/recorded? | |
304 | */ | |
a5a1d1c2 | 305 | static bool report_latency(struct trace_array *tr, u64 delta) |
81d68a96 SR |
306 | { |
307 | if (tracing_thresh) { | |
308 | if (delta < tracing_thresh) | |
79851821 | 309 | return false; |
81d68a96 | 310 | } else { |
6d9b3fa5 | 311 | if (delta <= tr->max_latency) |
79851821 | 312 | return false; |
81d68a96 | 313 | } |
79851821 | 314 | return true; |
81d68a96 SR |
315 | } |
316 | ||
e309b41d | 317 | static void |
81d68a96 SR |
318 | check_critical_timing(struct trace_array *tr, |
319 | struct trace_array_cpu *data, | |
320 | unsigned long parent_ip, | |
321 | int cpu) | |
322 | { | |
a5a1d1c2 | 323 | u64 T0, T1, delta; |
81d68a96 | 324 | unsigned long flags; |
38697053 | 325 | int pc; |
81d68a96 | 326 | |
81d68a96 | 327 | T0 = data->preempt_timestamp; |
750ed1a4 | 328 | T1 = ftrace_now(cpu); |
81d68a96 SR |
329 | delta = T1-T0; |
330 | ||
331 | local_save_flags(flags); | |
332 | ||
6450c1d3 SR |
333 | pc = preempt_count(); |
334 | ||
6d9b3fa5 | 335 | if (!report_latency(tr, delta)) |
81d68a96 SR |
336 | goto out; |
337 | ||
5389f6fa | 338 | raw_spin_lock_irqsave(&max_trace_lock, flags); |
81d68a96 | 339 | |
89b2f978 | 340 | /* check if we are still the max latency */ |
6d9b3fa5 | 341 | if (!report_latency(tr, delta)) |
89b2f978 SR |
342 | goto out_unlock; |
343 | ||
62b915f1 | 344 | __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
cc51a0fc SR |
345 | /* Skip 5 functions to get to the irq/preempt enable function */ |
346 | __trace_stack(tr, flags, 5, pc); | |
81d68a96 | 347 | |
81d68a96 | 348 | if (data->critical_sequence != max_sequence) |
89b2f978 | 349 | goto out_unlock; |
81d68a96 | 350 | |
81d68a96 SR |
351 | data->critical_end = parent_ip; |
352 | ||
b5130b1e | 353 | if (likely(!is_tracing_stopped())) { |
6d9b3fa5 | 354 | tr->max_latency = delta; |
b5130b1e CE |
355 | update_max_tr_single(tr, current, cpu); |
356 | } | |
81d68a96 | 357 | |
81d68a96 SR |
358 | max_sequence++; |
359 | ||
89b2f978 | 360 | out_unlock: |
5389f6fa | 361 | raw_spin_unlock_irqrestore(&max_trace_lock, flags); |
89b2f978 | 362 | |
81d68a96 SR |
363 | out: |
364 | data->critical_sequence = max_sequence; | |
750ed1a4 | 365 | data->preempt_timestamp = ftrace_now(cpu); |
62b915f1 | 366 | __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
81d68a96 SR |
367 | } |
368 | ||
e309b41d | 369 | static inline void |
81d68a96 SR |
370 | start_critical_timing(unsigned long ip, unsigned long parent_ip) |
371 | { | |
372 | int cpu; | |
373 | struct trace_array *tr = irqsoff_trace; | |
374 | struct trace_array_cpu *data; | |
375 | unsigned long flags; | |
376 | ||
10246fa3 | 377 | if (!tracer_enabled || !tracing_is_enabled()) |
81d68a96 SR |
378 | return; |
379 | ||
c5f888ca SR |
380 | cpu = raw_smp_processor_id(); |
381 | ||
382 | if (per_cpu(tracing_cpu, cpu)) | |
6cd8a4bb SR |
383 | return; |
384 | ||
12883efb | 385 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
81d68a96 | 386 | |
c5f888ca | 387 | if (unlikely(!data) || atomic_read(&data->disabled)) |
81d68a96 SR |
388 | return; |
389 | ||
390 | atomic_inc(&data->disabled); | |
391 | ||
392 | data->critical_sequence = max_sequence; | |
750ed1a4 | 393 | data->preempt_timestamp = ftrace_now(cpu); |
6cd8a4bb | 394 | data->critical_start = parent_ip ? : ip; |
81d68a96 SR |
395 | |
396 | local_save_flags(flags); | |
6cd8a4bb | 397 | |
62b915f1 | 398 | __trace_function(tr, ip, parent_ip, flags, preempt_count()); |
81d68a96 | 399 | |
c5f888ca | 400 | per_cpu(tracing_cpu, cpu) = 1; |
6cd8a4bb | 401 | |
81d68a96 SR |
402 | atomic_dec(&data->disabled); |
403 | } | |
404 | ||
e309b41d | 405 | static inline void |
81d68a96 SR |
406 | stop_critical_timing(unsigned long ip, unsigned long parent_ip) |
407 | { | |
408 | int cpu; | |
409 | struct trace_array *tr = irqsoff_trace; | |
410 | struct trace_array_cpu *data; | |
411 | unsigned long flags; | |
412 | ||
c5f888ca | 413 | cpu = raw_smp_processor_id(); |
6cd8a4bb | 414 | /* Always clear the tracing cpu on stopping the trace */ |
c5f888ca SR |
415 | if (unlikely(per_cpu(tracing_cpu, cpu))) |
416 | per_cpu(tracing_cpu, cpu) = 0; | |
6cd8a4bb SR |
417 | else |
418 | return; | |
419 | ||
10246fa3 | 420 | if (!tracer_enabled || !tracing_is_enabled()) |
81d68a96 SR |
421 | return; |
422 | ||
12883efb | 423 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
81d68a96 | 424 | |
3928a8a2 | 425 | if (unlikely(!data) || |
81d68a96 SR |
426 | !data->critical_start || atomic_read(&data->disabled)) |
427 | return; | |
428 | ||
429 | atomic_inc(&data->disabled); | |
c5f888ca | 430 | |
81d68a96 | 431 | local_save_flags(flags); |
62b915f1 | 432 | __trace_function(tr, ip, parent_ip, flags, preempt_count()); |
6cd8a4bb | 433 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); |
81d68a96 SR |
434 | data->critical_start = 0; |
435 | atomic_dec(&data->disabled); | |
436 | } | |
437 | ||
6cd8a4bb | 438 | /* start and stop critical timings used to for stoppage (in idle) */ |
e309b41d | 439 | void start_critical_timings(void) |
81d68a96 | 440 | { |
6cd8a4bb | 441 | if (preempt_trace() || irq_trace()) |
81d68a96 SR |
442 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
443 | } | |
1fe37104 | 444 | EXPORT_SYMBOL_GPL(start_critical_timings); |
81d68a96 | 445 | |
e309b41d | 446 | void stop_critical_timings(void) |
81d68a96 | 447 | { |
6cd8a4bb | 448 | if (preempt_trace() || irq_trace()) |
81d68a96 SR |
449 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
450 | } | |
1fe37104 | 451 | EXPORT_SYMBOL_GPL(stop_critical_timings); |
81d68a96 | 452 | |
6cd8a4bb | 453 | #ifdef CONFIG_IRQSOFF_TRACER |
81d68a96 | 454 | #ifdef CONFIG_PROVE_LOCKING |
e309b41d | 455 | void time_hardirqs_on(unsigned long a0, unsigned long a1) |
81d68a96 | 456 | { |
6cd8a4bb | 457 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
458 | stop_critical_timing(a0, a1); |
459 | } | |
460 | ||
e309b41d | 461 | void time_hardirqs_off(unsigned long a0, unsigned long a1) |
81d68a96 | 462 | { |
6cd8a4bb | 463 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
464 | start_critical_timing(a0, a1); |
465 | } | |
466 | ||
467 | #else /* !CONFIG_PROVE_LOCKING */ | |
468 | ||
81d68a96 SR |
469 | /* |
470 | * We are only interested in hardirq on/off events: | |
471 | */ | |
aaecaa0b | 472 | static inline void tracer_hardirqs_on(void) |
81d68a96 | 473 | { |
6cd8a4bb | 474 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
475 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
476 | } | |
81d68a96 | 477 | |
aaecaa0b | 478 | static inline void tracer_hardirqs_off(void) |
81d68a96 | 479 | { |
6cd8a4bb | 480 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
481 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
482 | } | |
81d68a96 | 483 | |
aaecaa0b | 484 | static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) |
81d68a96 | 485 | { |
6cd8a4bb | 486 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
487 | stop_critical_timing(CALLER_ADDR0, caller_addr); |
488 | } | |
81d68a96 | 489 | |
aaecaa0b | 490 | static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) |
81d68a96 | 491 | { |
6cd8a4bb | 492 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
493 | start_critical_timing(CALLER_ADDR0, caller_addr); |
494 | } | |
81d68a96 SR |
495 | |
496 | #endif /* CONFIG_PROVE_LOCKING */ | |
6cd8a4bb SR |
497 | #endif /* CONFIG_IRQSOFF_TRACER */ |
498 | ||
499 | #ifdef CONFIG_PREEMPT_TRACER | |
aaecaa0b | 500 | static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) |
6cd8a4bb | 501 | { |
e36de1de | 502 | if (preempt_trace() && !irq_trace()) |
1e01cb0c | 503 | stop_critical_timing(a0, a1); |
6cd8a4bb SR |
504 | } |
505 | ||
aaecaa0b | 506 | static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) |
6cd8a4bb | 507 | { |
e36de1de | 508 | if (preempt_trace() && !irq_trace()) |
1e01cb0c | 509 | start_critical_timing(a0, a1); |
6cd8a4bb SR |
510 | } |
511 | #endif /* CONFIG_PREEMPT_TRACER */ | |
81d68a96 | 512 | |
8179e8a1 SRRH |
513 | #ifdef CONFIG_FUNCTION_TRACER |
514 | static bool function_enabled; | |
515 | ||
4104d326 | 516 | static int register_irqsoff_function(struct trace_array *tr, int graph, int set) |
81d68a96 | 517 | { |
328df475 | 518 | int ret; |
62b915f1 | 519 | |
328df475 | 520 | /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */ |
983f938a | 521 | if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION))) |
328df475 SRRH |
522 | return 0; |
523 | ||
524 | if (graph) | |
62b915f1 JO |
525 | ret = register_ftrace_graph(&irqsoff_graph_return, |
526 | &irqsoff_graph_entry); | |
328df475 | 527 | else |
4104d326 | 528 | ret = register_ftrace_function(tr->ops); |
328df475 SRRH |
529 | |
530 | if (!ret) | |
531 | function_enabled = true; | |
532 | ||
533 | return ret; | |
534 | } | |
535 | ||
4104d326 | 536 | static void unregister_irqsoff_function(struct trace_array *tr, int graph) |
328df475 SRRH |
537 | { |
538 | if (!function_enabled) | |
539 | return; | |
540 | ||
541 | if (graph) | |
542 | unregister_ftrace_graph(); | |
543 | else | |
4104d326 | 544 | unregister_ftrace_function(tr->ops); |
328df475 SRRH |
545 | |
546 | function_enabled = false; | |
547 | } | |
548 | ||
8179e8a1 | 549 | static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set) |
328df475 | 550 | { |
8179e8a1 SRRH |
551 | if (!(mask & TRACE_ITER_FUNCTION)) |
552 | return 0; | |
553 | ||
328df475 | 554 | if (set) |
983f938a | 555 | register_irqsoff_function(tr, is_graph(tr), 1); |
328df475 | 556 | else |
983f938a | 557 | unregister_irqsoff_function(tr, is_graph(tr)); |
8179e8a1 SRRH |
558 | return 1; |
559 | } | |
560 | #else | |
561 | static int register_irqsoff_function(struct trace_array *tr, int graph, int set) | |
562 | { | |
03905582 | 563 | return 0; |
328df475 | 564 | } |
8179e8a1 SRRH |
565 | static void unregister_irqsoff_function(struct trace_array *tr, int graph) { } |
566 | static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set) | |
567 | { | |
568 | return 0; | |
569 | } | |
570 | #endif /* CONFIG_FUNCTION_TRACER */ | |
328df475 | 571 | |
bf6065b5 | 572 | static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) |
328df475 | 573 | { |
bf6065b5 SRRH |
574 | struct tracer *tracer = tr->current_trace; |
575 | ||
8179e8a1 SRRH |
576 | if (irqsoff_function_set(tr, mask, set)) |
577 | return 0; | |
03905582 | 578 | |
729358da | 579 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
03905582 SRRH |
580 | if (mask & TRACE_ITER_DISPLAY_GRAPH) |
581 | return irqsoff_display_graph(tr, set); | |
729358da | 582 | #endif |
328df475 SRRH |
583 | |
584 | return trace_keep_overwrite(tracer, mask, set); | |
585 | } | |
586 | ||
587 | static int start_irqsoff_tracer(struct trace_array *tr, int graph) | |
588 | { | |
589 | int ret; | |
590 | ||
4104d326 | 591 | ret = register_irqsoff_function(tr, graph, 0); |
62b915f1 JO |
592 | |
593 | if (!ret && tracing_is_enabled()) | |
9036990d | 594 | tracer_enabled = 1; |
94523e81 | 595 | else |
9036990d | 596 | tracer_enabled = 0; |
62b915f1 JO |
597 | |
598 | return ret; | |
81d68a96 SR |
599 | } |
600 | ||
62b915f1 | 601 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph) |
81d68a96 | 602 | { |
81d68a96 | 603 | tracer_enabled = 0; |
62b915f1 | 604 | |
4104d326 | 605 | unregister_irqsoff_function(tr, graph); |
81d68a96 SR |
606 | } |
607 | ||
02f2f764 SRRH |
608 | static bool irqsoff_busy; |
609 | ||
610 | static int __irqsoff_tracer_init(struct trace_array *tr) | |
81d68a96 | 611 | { |
02f2f764 SRRH |
612 | if (irqsoff_busy) |
613 | return -EBUSY; | |
614 | ||
983f938a | 615 | save_flags = tr->trace_flags; |
613f04a0 SRRH |
616 | |
617 | /* non overwrite screws up the latency tracers */ | |
2b6080f2 SR |
618 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); |
619 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); | |
e9d25fe6 | 620 | |
6d9b3fa5 | 621 | tr->max_latency = 0; |
81d68a96 | 622 | irqsoff_trace = tr; |
c5f888ca | 623 | /* make sure that the tracer is visible */ |
81d68a96 | 624 | smp_wmb(); |
62b915f1 | 625 | |
4104d326 SRRH |
626 | ftrace_init_array_ops(tr, irqsoff_tracer_call); |
627 | ||
628 | /* Only toplevel instance supports graph tracing */ | |
629 | if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL && | |
983f938a | 630 | is_graph(tr)))) |
62b915f1 | 631 | printk(KERN_ERR "failed to start irqsoff tracer\n"); |
02f2f764 SRRH |
632 | |
633 | irqsoff_busy = true; | |
634 | return 0; | |
81d68a96 SR |
635 | } |
636 | ||
637 | static void irqsoff_tracer_reset(struct trace_array *tr) | |
638 | { | |
613f04a0 SRRH |
639 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; |
640 | int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; | |
641 | ||
983f938a | 642 | stop_irqsoff_tracer(tr, is_graph(tr)); |
e9d25fe6 | 643 | |
2b6080f2 SR |
644 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); |
645 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); | |
4104d326 | 646 | ftrace_reset_array_ops(tr); |
02f2f764 SRRH |
647 | |
648 | irqsoff_busy = false; | |
81d68a96 SR |
649 | } |
650 | ||
9036990d SR |
651 | static void irqsoff_tracer_start(struct trace_array *tr) |
652 | { | |
9036990d | 653 | tracer_enabled = 1; |
9036990d SR |
654 | } |
655 | ||
656 | static void irqsoff_tracer_stop(struct trace_array *tr) | |
657 | { | |
658 | tracer_enabled = 0; | |
81d68a96 SR |
659 | } |
660 | ||
6cd8a4bb | 661 | #ifdef CONFIG_IRQSOFF_TRACER |
1c80025a | 662 | static int irqsoff_tracer_init(struct trace_array *tr) |
6cd8a4bb SR |
663 | { |
664 | trace_type = TRACER_IRQS_OFF; | |
665 | ||
02f2f764 | 666 | return __irqsoff_tracer_init(tr); |
6cd8a4bb | 667 | } |
81d68a96 SR |
668 | static struct tracer irqsoff_tracer __read_mostly = |
669 | { | |
670 | .name = "irqsoff", | |
671 | .init = irqsoff_tracer_init, | |
672 | .reset = irqsoff_tracer_reset, | |
9036990d SR |
673 | .start = irqsoff_tracer_start, |
674 | .stop = irqsoff_tracer_stop, | |
f43c738b | 675 | .print_max = true, |
62b915f1 JO |
676 | .print_header = irqsoff_print_header, |
677 | .print_line = irqsoff_print_line, | |
328df475 | 678 | .flag_changed = irqsoff_flag_changed, |
60a11774 SR |
679 | #ifdef CONFIG_FTRACE_SELFTEST |
680 | .selftest = trace_selftest_startup_irqsoff, | |
681 | #endif | |
62b915f1 JO |
682 | .open = irqsoff_trace_open, |
683 | .close = irqsoff_trace_close, | |
02f2f764 | 684 | .allow_instances = true, |
f43c738b | 685 | .use_max_tr = true, |
81d68a96 | 686 | }; |
6cd8a4bb SR |
687 | # define register_irqsoff(trace) register_tracer(&trace) |
688 | #else | |
689 | # define register_irqsoff(trace) do { } while (0) | |
690 | #endif | |
691 | ||
692 | #ifdef CONFIG_PREEMPT_TRACER | |
1c80025a | 693 | static int preemptoff_tracer_init(struct trace_array *tr) |
6cd8a4bb SR |
694 | { |
695 | trace_type = TRACER_PREEMPT_OFF; | |
696 | ||
02f2f764 | 697 | return __irqsoff_tracer_init(tr); |
6cd8a4bb SR |
698 | } |
699 | ||
700 | static struct tracer preemptoff_tracer __read_mostly = | |
701 | { | |
702 | .name = "preemptoff", | |
703 | .init = preemptoff_tracer_init, | |
704 | .reset = irqsoff_tracer_reset, | |
9036990d SR |
705 | .start = irqsoff_tracer_start, |
706 | .stop = irqsoff_tracer_stop, | |
f43c738b | 707 | .print_max = true, |
62b915f1 JO |
708 | .print_header = irqsoff_print_header, |
709 | .print_line = irqsoff_print_line, | |
328df475 | 710 | .flag_changed = irqsoff_flag_changed, |
60a11774 SR |
711 | #ifdef CONFIG_FTRACE_SELFTEST |
712 | .selftest = trace_selftest_startup_preemptoff, | |
713 | #endif | |
62b915f1 JO |
714 | .open = irqsoff_trace_open, |
715 | .close = irqsoff_trace_close, | |
02f2f764 | 716 | .allow_instances = true, |
f43c738b | 717 | .use_max_tr = true, |
6cd8a4bb SR |
718 | }; |
719 | # define register_preemptoff(trace) register_tracer(&trace) | |
720 | #else | |
721 | # define register_preemptoff(trace) do { } while (0) | |
722 | #endif | |
723 | ||
724 | #if defined(CONFIG_IRQSOFF_TRACER) && \ | |
725 | defined(CONFIG_PREEMPT_TRACER) | |
726 | ||
1c80025a | 727 | static int preemptirqsoff_tracer_init(struct trace_array *tr) |
6cd8a4bb SR |
728 | { |
729 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; | |
730 | ||
02f2f764 | 731 | return __irqsoff_tracer_init(tr); |
6cd8a4bb SR |
732 | } |
733 | ||
734 | static struct tracer preemptirqsoff_tracer __read_mostly = | |
735 | { | |
736 | .name = "preemptirqsoff", | |
737 | .init = preemptirqsoff_tracer_init, | |
738 | .reset = irqsoff_tracer_reset, | |
9036990d SR |
739 | .start = irqsoff_tracer_start, |
740 | .stop = irqsoff_tracer_stop, | |
f43c738b | 741 | .print_max = true, |
62b915f1 JO |
742 | .print_header = irqsoff_print_header, |
743 | .print_line = irqsoff_print_line, | |
328df475 | 744 | .flag_changed = irqsoff_flag_changed, |
60a11774 SR |
745 | #ifdef CONFIG_FTRACE_SELFTEST |
746 | .selftest = trace_selftest_startup_preemptirqsoff, | |
747 | #endif | |
62b915f1 JO |
748 | .open = irqsoff_trace_open, |
749 | .close = irqsoff_trace_close, | |
02f2f764 | 750 | .allow_instances = true, |
f43c738b | 751 | .use_max_tr = true, |
6cd8a4bb SR |
752 | }; |
753 | ||
754 | # define register_preemptirqsoff(trace) register_tracer(&trace) | |
755 | #else | |
756 | # define register_preemptirqsoff(trace) do { } while (0) | |
757 | #endif | |
81d68a96 SR |
758 | |
759 | __init static int init_irqsoff_tracer(void) | |
760 | { | |
6cd8a4bb SR |
761 | register_irqsoff(irqsoff_tracer); |
762 | register_preemptoff(preemptoff_tracer); | |
763 | register_preemptirqsoff(preemptirqsoff_tracer); | |
81d68a96 SR |
764 | |
765 | return 0; | |
766 | } | |
6f415672 | 767 | core_initcall(init_irqsoff_tracer); |
aaecaa0b JF |
768 | #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */ |
769 | ||
770 | #ifndef CONFIG_IRQSOFF_TRACER | |
771 | static inline void tracer_hardirqs_on(void) { } | |
772 | static inline void tracer_hardirqs_off(void) { } | |
773 | static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) { } | |
774 | static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) { } | |
775 | #endif | |
776 | ||
777 | #ifndef CONFIG_PREEMPT_TRACER | |
778 | static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { } | |
779 | static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { } | |
780 | #endif | |
781 | ||
782 | #if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING) | |
d5915816 JF |
783 | /* Per-cpu variable to prevent redundant calls when IRQs already off */ |
784 | static DEFINE_PER_CPU(int, tracing_irq_cpu); | |
785 | ||
aaecaa0b JF |
786 | void trace_hardirqs_on(void) |
787 | { | |
d5915816 JF |
788 | if (!this_cpu_read(tracing_irq_cpu)) |
789 | return; | |
790 | ||
791 | trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); | |
aaecaa0b | 792 | tracer_hardirqs_on(); |
d5915816 JF |
793 | |
794 | this_cpu_write(tracing_irq_cpu, 0); | |
aaecaa0b JF |
795 | } |
796 | EXPORT_SYMBOL(trace_hardirqs_on); | |
797 | ||
798 | void trace_hardirqs_off(void) | |
799 | { | |
d5915816 JF |
800 | if (this_cpu_read(tracing_irq_cpu)) |
801 | return; | |
802 | ||
803 | this_cpu_write(tracing_irq_cpu, 1); | |
804 | ||
805 | trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); | |
aaecaa0b JF |
806 | tracer_hardirqs_off(); |
807 | } | |
808 | EXPORT_SYMBOL(trace_hardirqs_off); | |
809 | ||
810 | __visible void trace_hardirqs_on_caller(unsigned long caller_addr) | |
811 | { | |
d5915816 JF |
812 | if (!this_cpu_read(tracing_irq_cpu)) |
813 | return; | |
814 | ||
815 | trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr); | |
aaecaa0b | 816 | tracer_hardirqs_on_caller(caller_addr); |
d5915816 JF |
817 | |
818 | this_cpu_write(tracing_irq_cpu, 0); | |
aaecaa0b JF |
819 | } |
820 | EXPORT_SYMBOL(trace_hardirqs_on_caller); | |
821 | ||
822 | __visible void trace_hardirqs_off_caller(unsigned long caller_addr) | |
823 | { | |
d5915816 JF |
824 | if (this_cpu_read(tracing_irq_cpu)) |
825 | return; | |
826 | ||
827 | this_cpu_write(tracing_irq_cpu, 1); | |
828 | ||
829 | trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr); | |
aaecaa0b JF |
830 | tracer_hardirqs_off_caller(caller_addr); |
831 | } | |
832 | EXPORT_SYMBOL(trace_hardirqs_off_caller); | |
833 | ||
834 | /* | |
835 | * Stubs: | |
836 | */ | |
837 | ||
838 | void trace_softirqs_on(unsigned long ip) | |
839 | { | |
840 | } | |
841 | ||
842 | void trace_softirqs_off(unsigned long ip) | |
843 | { | |
844 | } | |
845 | ||
846 | inline void print_irqtrace_events(struct task_struct *curr) | |
847 | { | |
848 | } | |
849 | #endif | |
850 | ||
d5915816 JF |
851 | #if defined(CONFIG_PREEMPT_TRACER) || \ |
852 | (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS)) | |
aaecaa0b JF |
853 | void trace_preempt_on(unsigned long a0, unsigned long a1) |
854 | { | |
d5915816 | 855 | trace_preempt_enable_rcuidle(a0, a1); |
aaecaa0b JF |
856 | tracer_preempt_on(a0, a1); |
857 | } | |
858 | ||
859 | void trace_preempt_off(unsigned long a0, unsigned long a1) | |
860 | { | |
d5915816 | 861 | trace_preempt_disable_rcuidle(a0, a1); |
aaecaa0b JF |
862 | tracer_preempt_off(a0, a1); |
863 | } | |
864 | #endif |