ftrace: don't use raw_local_irq_save/restore
[linux-2.6-block.git] / kernel / trace / trace_irqsoff.c
CommitLineData
81d68a96
SR
1/*
2 * trace irqs off criticall timings
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * From code in the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
12#include <linux/kallsyms.h>
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/module.h>
16#include <linux/ftrace.h>
17#include <linux/fs.h>
18
19#include "trace.h"
20
21static struct trace_array *irqsoff_trace __read_mostly;
22static int tracer_enabled __read_mostly;
23
6cd8a4bb
SR
24static DEFINE_PER_CPU(int, tracing_cpu);
25
26enum {
27 TRACER_IRQS_OFF = (1 << 1),
28 TRACER_PREEMPT_OFF = (1 << 2),
29};
30
31static int trace_type __read_mostly;
32
33#ifdef CONFIG_PREEMPT_TRACER
34static inline int notrace
35preempt_trace(void)
36{
37 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
38}
39#else
40# define preempt_trace() (0)
41#endif
42
43#ifdef CONFIG_IRQSOFF_TRACER
44static inline int notrace
45irq_trace(void)
46{
47 return ((trace_type & TRACER_IRQS_OFF) &&
48 irqs_disabled());
49}
50#else
51# define irq_trace() (0)
52#endif
53
81d68a96
SR
54/*
55 * Sequence count - we record it when starting a measurement and
56 * skip the latency if the sequence has changed - some other section
57 * did a maximum and could disturb our measurement with serial console
58 * printouts, etc. Truly coinciding maximum latencies should be rare
59 * and what happens together happens separately as well, so this doesnt
60 * decrease the validity of the maximum found:
61 */
62static __cacheline_aligned_in_smp unsigned long max_sequence;
63
64#ifdef CONFIG_FTRACE
65/*
66 * irqsoff uses its own tracer function to keep the overhead down:
67 */
68static void notrace
69irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
70{
71 struct trace_array *tr = irqsoff_trace;
72 struct trace_array_cpu *data;
73 unsigned long flags;
74 long disabled;
75 int cpu;
76
361943ad
SR
77 /*
78 * Does not matter if we preempt. We test the flags
79 * afterward, to see if irqs are disabled or not.
80 * If we preempt and get a false positive, the flags
81 * test will fail.
82 */
83 cpu = raw_smp_processor_id();
84 if (likely(!per_cpu(tracing_cpu, cpu)))
81d68a96
SR
85 return;
86
87 local_save_flags(flags);
361943ad
SR
88 /* slight chance to get a false positive on tracing_cpu */
89 if (!irqs_disabled_flags(flags))
90 return;
81d68a96 91
81d68a96
SR
92 data = tr->data[cpu];
93 disabled = atomic_inc_return(&data->disabled);
94
95 if (likely(disabled == 1))
96 ftrace(tr, data, ip, parent_ip, flags);
97
98 atomic_dec(&data->disabled);
99}
100
101static struct ftrace_ops trace_ops __read_mostly =
102{
103 .func = irqsoff_tracer_call,
104};
105#endif /* CONFIG_FTRACE */
106
107/*
108 * Should this new latency be reported/recorded?
109 */
110static int notrace report_latency(cycle_t delta)
111{
112 if (tracing_thresh) {
113 if (delta < tracing_thresh)
114 return 0;
115 } else {
116 if (delta <= tracing_max_latency)
117 return 0;
118 }
119 return 1;
120}
121
122static void notrace
123check_critical_timing(struct trace_array *tr,
124 struct trace_array_cpu *data,
125 unsigned long parent_ip,
126 int cpu)
127{
128 unsigned long latency, t0, t1;
129 cycle_t T0, T1, T2, delta;
130 unsigned long flags;
131
132 /*
133 * usecs conversion is slow so we try to delay the conversion
134 * as long as possible:
135 */
136 T0 = data->preempt_timestamp;
137 T1 = now(cpu);
138 delta = T1-T0;
139
140 local_save_flags(flags);
141
142 if (!report_latency(delta))
143 goto out;
144
145 ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
146 /*
147 * Update the timestamp, because the trace entry above
148 * might change it (it can only get larger so the latency
149 * is fair to be reported):
150 */
151 T2 = now(cpu);
152
153 delta = T2-T0;
154
155 latency = nsecs_to_usecs(delta);
156
157 if (data->critical_sequence != max_sequence)
158 goto out;
159
160 tracing_max_latency = delta;
161 t0 = nsecs_to_usecs(T0);
162 t1 = nsecs_to_usecs(T1);
163
164 data->critical_end = parent_ip;
165
166 update_max_tr_single(tr, current, cpu);
167
168 if (tracing_thresh)
169 printk(KERN_INFO "(%16s-%-5d|#%d): %lu us critical section "
170 "violates %lu us threshold.\n"
171 " => started at timestamp %lu: ",
172 current->comm, current->pid,
173 raw_smp_processor_id(),
174 latency, nsecs_to_usecs(tracing_thresh), t0);
175 else
176 printk(KERN_INFO "(%16s-%-5d|#%d):"
177 " new %lu us maximum-latency "
178 "critical section.\n => started at timestamp %lu: ",
179 current->comm, current->pid,
180 raw_smp_processor_id(),
181 latency, t0);
182
183 print_symbol(KERN_CONT "<%s>\n", data->critical_start);
184 printk(KERN_CONT " => ended at timestamp %lu: ", t1);
185 print_symbol(KERN_CONT "<%s>\n", data->critical_end);
186 dump_stack();
187 t1 = nsecs_to_usecs(now(cpu));
188 printk(KERN_CONT " => dump-end timestamp %lu\n\n", t1);
189
190 max_sequence++;
191
192out:
193 data->critical_sequence = max_sequence;
194 data->preempt_timestamp = now(cpu);
195 tracing_reset(data);
196 ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
197}
198
199static inline void notrace
200start_critical_timing(unsigned long ip, unsigned long parent_ip)
201{
202 int cpu;
203 struct trace_array *tr = irqsoff_trace;
204 struct trace_array_cpu *data;
205 unsigned long flags;
206
207 if (likely(!tracer_enabled))
208 return;
209
6cd8a4bb
SR
210 if (__get_cpu_var(tracing_cpu))
211 return;
212
81d68a96
SR
213 cpu = raw_smp_processor_id();
214 data = tr->data[cpu];
215
216 if (unlikely(!data) || unlikely(!data->trace) ||
6cd8a4bb 217 atomic_read(&data->disabled))
81d68a96
SR
218 return;
219
220 atomic_inc(&data->disabled);
221
222 data->critical_sequence = max_sequence;
223 data->preempt_timestamp = now(cpu);
6cd8a4bb 224 data->critical_start = parent_ip ? : ip;
81d68a96
SR
225 tracing_reset(data);
226
227 local_save_flags(flags);
6cd8a4bb 228
81d68a96
SR
229 ftrace(tr, data, ip, parent_ip, flags);
230
6cd8a4bb
SR
231 __get_cpu_var(tracing_cpu) = 1;
232
81d68a96
SR
233 atomic_dec(&data->disabled);
234}
235
236static inline void notrace
237stop_critical_timing(unsigned long ip, unsigned long parent_ip)
238{
239 int cpu;
240 struct trace_array *tr = irqsoff_trace;
241 struct trace_array_cpu *data;
242 unsigned long flags;
243
6cd8a4bb
SR
244 /* Always clear the tracing cpu on stopping the trace */
245 if (unlikely(__get_cpu_var(tracing_cpu)))
246 __get_cpu_var(tracing_cpu) = 0;
247 else
248 return;
249
250 if (!tracer_enabled)
81d68a96
SR
251 return;
252
253 cpu = raw_smp_processor_id();
254 data = tr->data[cpu];
255
256 if (unlikely(!data) || unlikely(!data->trace) ||
257 !data->critical_start || atomic_read(&data->disabled))
258 return;
259
260 atomic_inc(&data->disabled);
261 local_save_flags(flags);
262 ftrace(tr, data, ip, parent_ip, flags);
6cd8a4bb 263 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
81d68a96
SR
264 data->critical_start = 0;
265 atomic_dec(&data->disabled);
266}
267
6cd8a4bb 268/* start and stop critical timings used to for stoppage (in idle) */
81d68a96
SR
269void notrace start_critical_timings(void)
270{
6cd8a4bb 271 if (preempt_trace() || irq_trace())
81d68a96
SR
272 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
273}
274
275void notrace stop_critical_timings(void)
276{
6cd8a4bb 277 if (preempt_trace() || irq_trace())
81d68a96
SR
278 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
279}
280
6cd8a4bb 281#ifdef CONFIG_IRQSOFF_TRACER
81d68a96
SR
282#ifdef CONFIG_PROVE_LOCKING
283void notrace time_hardirqs_on(unsigned long a0, unsigned long a1)
284{
6cd8a4bb 285 if (!preempt_trace() && irq_trace())
81d68a96
SR
286 stop_critical_timing(a0, a1);
287}
288
289void notrace time_hardirqs_off(unsigned long a0, unsigned long a1)
290{
6cd8a4bb 291 if (!preempt_trace() && irq_trace())
81d68a96
SR
292 start_critical_timing(a0, a1);
293}
294
295#else /* !CONFIG_PROVE_LOCKING */
296
297/*
298 * Stubs:
299 */
300
301void early_boot_irqs_off(void)
302{
303}
304
305void early_boot_irqs_on(void)
306{
307}
308
309void trace_softirqs_on(unsigned long ip)
310{
311}
312
313void trace_softirqs_off(unsigned long ip)
314{
315}
316
317inline void print_irqtrace_events(struct task_struct *curr)
318{
319}
320
321/*
322 * We are only interested in hardirq on/off events:
323 */
324void notrace trace_hardirqs_on(void)
325{
6cd8a4bb 326 if (!preempt_trace() && irq_trace())
81d68a96
SR
327 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
328}
329EXPORT_SYMBOL(trace_hardirqs_on);
330
331void notrace trace_hardirqs_off(void)
332{
6cd8a4bb 333 if (!preempt_trace() && irq_trace())
81d68a96
SR
334 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
335}
336EXPORT_SYMBOL(trace_hardirqs_off);
337
338void notrace trace_hardirqs_on_caller(unsigned long caller_addr)
339{
6cd8a4bb 340 if (!preempt_trace() && irq_trace())
81d68a96
SR
341 stop_critical_timing(CALLER_ADDR0, caller_addr);
342}
343EXPORT_SYMBOL(trace_hardirqs_on_caller);
344
345void notrace trace_hardirqs_off_caller(unsigned long caller_addr)
346{
6cd8a4bb 347 if (!preempt_trace() && irq_trace())
81d68a96
SR
348 start_critical_timing(CALLER_ADDR0, caller_addr);
349}
350EXPORT_SYMBOL(trace_hardirqs_off_caller);
351
352#endif /* CONFIG_PROVE_LOCKING */
6cd8a4bb
SR
353#endif /* CONFIG_IRQSOFF_TRACER */
354
355#ifdef CONFIG_PREEMPT_TRACER
356void notrace trace_preempt_on(unsigned long a0, unsigned long a1)
357{
358 stop_critical_timing(a0, a1);
359}
360
361void notrace trace_preempt_off(unsigned long a0, unsigned long a1)
362{
363 start_critical_timing(a0, a1);
364}
365#endif /* CONFIG_PREEMPT_TRACER */
81d68a96
SR
366
367static void start_irqsoff_tracer(struct trace_array *tr)
368{
369 tracer_enabled = 1;
370 register_ftrace_function(&trace_ops);
371}
372
373static void stop_irqsoff_tracer(struct trace_array *tr)
374{
375 unregister_ftrace_function(&trace_ops);
376 tracer_enabled = 0;
377}
378
6cd8a4bb 379static void __irqsoff_tracer_init(struct trace_array *tr)
81d68a96
SR
380{
381 irqsoff_trace = tr;
382 /* make sure that the tracer is visibel */
383 smp_wmb();
384
385 if (tr->ctrl)
386 start_irqsoff_tracer(tr);
387}
388
389static void irqsoff_tracer_reset(struct trace_array *tr)
390{
391 if (tr->ctrl)
392 stop_irqsoff_tracer(tr);
393}
394
395static void irqsoff_tracer_ctrl_update(struct trace_array *tr)
396{
397 if (tr->ctrl)
398 start_irqsoff_tracer(tr);
399 else
400 stop_irqsoff_tracer(tr);
401}
402
403static void notrace irqsoff_tracer_open(struct trace_iterator *iter)
404{
405 /* stop the trace while dumping */
406 if (iter->tr->ctrl)
407 stop_irqsoff_tracer(iter->tr);
408}
409
410static void notrace irqsoff_tracer_close(struct trace_iterator *iter)
411{
412 if (iter->tr->ctrl)
413 start_irqsoff_tracer(iter->tr);
414}
415
6cd8a4bb
SR
416#ifdef CONFIG_IRQSOFF_TRACER
417static void irqsoff_tracer_init(struct trace_array *tr)
418{
419 trace_type = TRACER_IRQS_OFF;
420
421 __irqsoff_tracer_init(tr);
422}
81d68a96
SR
423static struct tracer irqsoff_tracer __read_mostly =
424{
425 .name = "irqsoff",
426 .init = irqsoff_tracer_init,
427 .reset = irqsoff_tracer_reset,
428 .open = irqsoff_tracer_open,
429 .close = irqsoff_tracer_close,
430 .ctrl_update = irqsoff_tracer_ctrl_update,
431 .print_max = 1,
432};
6cd8a4bb
SR
433# define register_irqsoff(trace) register_tracer(&trace)
434#else
435# define register_irqsoff(trace) do { } while (0)
436#endif
437
438#ifdef CONFIG_PREEMPT_TRACER
439static void preemptoff_tracer_init(struct trace_array *tr)
440{
441 trace_type = TRACER_PREEMPT_OFF;
442
443 __irqsoff_tracer_init(tr);
444}
445
446static struct tracer preemptoff_tracer __read_mostly =
447{
448 .name = "preemptoff",
449 .init = preemptoff_tracer_init,
450 .reset = irqsoff_tracer_reset,
451 .open = irqsoff_tracer_open,
452 .close = irqsoff_tracer_close,
453 .ctrl_update = irqsoff_tracer_ctrl_update,
454 .print_max = 1,
455};
456# define register_preemptoff(trace) register_tracer(&trace)
457#else
458# define register_preemptoff(trace) do { } while (0)
459#endif
460
461#if defined(CONFIG_IRQSOFF_TRACER) && \
462 defined(CONFIG_PREEMPT_TRACER)
463
464static void preemptirqsoff_tracer_init(struct trace_array *tr)
465{
466 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
467
468 __irqsoff_tracer_init(tr);
469}
470
471static struct tracer preemptirqsoff_tracer __read_mostly =
472{
473 .name = "preemptirqsoff",
474 .init = preemptirqsoff_tracer_init,
475 .reset = irqsoff_tracer_reset,
476 .open = irqsoff_tracer_open,
477 .close = irqsoff_tracer_close,
478 .ctrl_update = irqsoff_tracer_ctrl_update,
479 .print_max = 1,
480};
481
482# define register_preemptirqsoff(trace) register_tracer(&trace)
483#else
484# define register_preemptirqsoff(trace) do { } while (0)
485#endif
81d68a96
SR
486
487__init static int init_irqsoff_tracer(void)
488{
6cd8a4bb
SR
489 register_irqsoff(irqsoff_tracer);
490 register_preemptoff(preemptoff_tracer);
491 register_preemptirqsoff(preemptirqsoff_tracer);
81d68a96
SR
492
493 return 0;
494}
495device_initcall(init_irqsoff_tracer);