ftrace: add logic to record overruns
[linux-2.6-block.git] / kernel / trace / trace.h
CommitLineData
bc0c38d1
SR
1#ifndef _LINUX_KERNEL_TRACE_H
2#define _LINUX_KERNEL_TRACE_H
3
4#include <linux/fs.h>
5#include <asm/atomic.h>
6#include <linux/sched.h>
7#include <linux/clocksource.h>
8
72829bc3
TG
9enum trace_type {
10 __TRACE_FIRST_TYPE = 0,
11
12 TRACE_FN,
13 TRACE_CTX,
14 TRACE_WAKE,
15 TRACE_STACK,
16 TRACE_SPECIAL,
17
18 __TRACE_LAST_TYPE
19};
20
bc0c38d1
SR
21/*
22 * Function trace entry - function address and parent function addres:
23 */
24struct ftrace_entry {
25 unsigned long ip;
26 unsigned long parent_ip;
27};
28
29/*
30 * Context switch trace entry - which task (and prio) we switched from/to:
31 */
32struct ctx_switch_entry {
33 unsigned int prev_pid;
34 unsigned char prev_prio;
35 unsigned char prev_state;
36 unsigned int next_pid;
37 unsigned char next_prio;
bac524d3 38 unsigned char next_state;
bc0c38d1
SR
39};
40
f0a920d5
IM
41/*
42 * Special (free-form) trace entry:
43 */
44struct special_entry {
45 unsigned long arg1;
46 unsigned long arg2;
47 unsigned long arg3;
48};
49
86387f7e
IM
50/*
51 * Stack-trace entry:
52 */
53
54#define FTRACE_STACK_ENTRIES 5
55
56struct stack_entry {
57 unsigned long caller[FTRACE_STACK_ENTRIES];
58};
59
bc0c38d1
SR
60/*
61 * The trace entry - the most basic unit of tracing. This is what
62 * is printed in the end as a single line in the trace output, such as:
63 *
64 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
65 */
66struct trace_entry {
67 char type;
68 char cpu;
69 char flags;
70 char preempt_count;
71 int pid;
72 cycle_t t;
bc0c38d1
SR
73 union {
74 struct ftrace_entry fn;
75 struct ctx_switch_entry ctx;
f0a920d5 76 struct special_entry special;
86387f7e 77 struct stack_entry stack;
bc0c38d1
SR
78 };
79};
80
81#define TRACE_ENTRY_SIZE sizeof(struct trace_entry)
82
83/*
84 * The CPU trace array - it consists of thousands of trace entries
85 * plus some other descriptor data: (for example which task started
86 * the trace, etc.)
87 */
88struct trace_array_cpu {
4c11d7ae 89 struct list_head trace_pages;
bc0c38d1 90 atomic_t disabled;
92205c23 91 raw_spinlock_t lock;
d4c5a2f5 92 struct lock_class_key lock_key;
4e3c3333 93
c7aafc54 94 /* these fields get copied into max-trace: */
93a588f4
SR
95 unsigned trace_head_idx;
96 unsigned trace_tail_idx;
97 void *trace_head; /* producer */
98 void *trace_tail; /* consumer */
c7aafc54 99 unsigned long trace_idx;
53d0aa77 100 unsigned long overrun;
bc0c38d1
SR
101 unsigned long saved_latency;
102 unsigned long critical_start;
103 unsigned long critical_end;
104 unsigned long critical_sequence;
105 unsigned long nice;
106 unsigned long policy;
107 unsigned long rt_priority;
108 cycle_t preempt_timestamp;
109 pid_t pid;
110 uid_t uid;
111 char comm[TASK_COMM_LEN];
112};
113
114struct trace_iterator;
115
116/*
117 * The trace array - an array of per-CPU trace arrays. This is the
118 * highest level data structure that individual tracers deal with.
119 * They have on/off state as well:
120 */
121struct trace_array {
122 unsigned long entries;
123 long ctrl;
124 int cpu;
125 cycle_t time_start;
b3806b43 126 struct task_struct *waiter;
bc0c38d1
SR
127 struct trace_array_cpu *data[NR_CPUS];
128};
129
130/*
131 * A specific tracer, represented by methods that operate on a trace array:
132 */
133struct tracer {
134 const char *name;
135 void (*init)(struct trace_array *tr);
136 void (*reset)(struct trace_array *tr);
137 void (*open)(struct trace_iterator *iter);
138 void (*close)(struct trace_iterator *iter);
139 void (*start)(struct trace_iterator *iter);
140 void (*stop)(struct trace_iterator *iter);
141 void (*ctrl_update)(struct trace_array *tr);
60a11774
SR
142#ifdef CONFIG_FTRACE_STARTUP_TEST
143 int (*selftest)(struct tracer *trace,
144 struct trace_array *tr);
145#endif
72829bc3 146 int (*print_line)(struct trace_iterator *iter);
bc0c38d1
SR
147 struct tracer *next;
148 int print_max;
149};
150
214023c3
SR
151struct trace_seq {
152 unsigned char buffer[PAGE_SIZE];
153 unsigned int len;
154};
155
bc0c38d1
SR
156/*
157 * Trace iterator - used by printout routines who present trace
158 * results to users and which routines might sleep, etc:
159 */
160struct trace_iterator {
161 struct trace_array *tr;
162 struct tracer *trace;
53d0aa77
SR
163 long last_overrun[NR_CPUS];
164 long overrun[NR_CPUS];
4e3c3333 165
53d0aa77
SR
166 /* The below is zeroed out in pipe_read */
167 struct trace_seq seq;
bc0c38d1 168 struct trace_entry *ent;
4e3c3333
IM
169 int cpu;
170
171 struct trace_entry *prev_ent;
172 int prev_cpu;
173
bc0c38d1
SR
174 unsigned long iter_flags;
175 loff_t pos;
176 unsigned long next_idx[NR_CPUS];
4c11d7ae
SR
177 struct list_head *next_page[NR_CPUS];
178 unsigned next_page_idx[NR_CPUS];
179 long idx;
bc0c38d1
SR
180};
181
e309b41d 182void tracing_reset(struct trace_array_cpu *data);
bc0c38d1
SR
183int tracing_open_generic(struct inode *inode, struct file *filp);
184struct dentry *tracing_init_dentry(void);
185void ftrace(struct trace_array *tr,
186 struct trace_array_cpu *data,
187 unsigned long ip,
188 unsigned long parent_ip,
189 unsigned long flags);
190void tracing_sched_switch_trace(struct trace_array *tr,
191 struct trace_array_cpu *data,
192 struct task_struct *prev,
193 struct task_struct *next,
194 unsigned long flags);
195void tracing_record_cmdline(struct task_struct *tsk);
57422797
IM
196
197void tracing_sched_wakeup_trace(struct trace_array *tr,
198 struct trace_array_cpu *data,
199 struct task_struct *wakee,
200 struct task_struct *cur,
201 unsigned long flags);
f0a920d5
IM
202void trace_special(struct trace_array *tr,
203 struct trace_array_cpu *data,
204 unsigned long arg1,
205 unsigned long arg2,
206 unsigned long arg3);
6fb44b71
SR
207void trace_function(struct trace_array *tr,
208 struct trace_array_cpu *data,
209 unsigned long ip,
210 unsigned long parent_ip,
211 unsigned long flags);
bc0c38d1
SR
212
213void tracing_start_function_trace(void);
214void tracing_stop_function_trace(void);
215int register_tracer(struct tracer *type);
216void unregister_tracer(struct tracer *type);
217
218extern unsigned long nsecs_to_usecs(unsigned long nsecs);
219
220extern unsigned long tracing_max_latency;
221extern unsigned long tracing_thresh;
222
25b0b44a
SR
223extern atomic_t trace_record_cmdline_enabled;
224
bc0c38d1
SR
225void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
226void update_max_tr_single(struct trace_array *tr,
227 struct task_struct *tsk, int cpu);
228
e309b41d 229extern cycle_t ftrace_now(int cpu);
bc0c38d1
SR
230
231#ifdef CONFIG_SCHED_TRACER
e309b41d 232extern void
bc0c38d1 233wakeup_sched_switch(struct task_struct *prev, struct task_struct *next);
57422797
IM
234extern void
235wakeup_sched_wakeup(struct task_struct *wakee, struct task_struct *curr);
bc0c38d1
SR
236#else
237static inline void
238wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
239{
240}
57422797
IM
241static inline void
242wakeup_sched_wakeup(struct task_struct *wakee, struct task_struct *curr)
243{
244}
bc0c38d1
SR
245#endif
246
247#ifdef CONFIG_CONTEXT_SWITCH_TRACER
248typedef void
249(*tracer_switch_func_t)(void *private,
250 struct task_struct *prev,
251 struct task_struct *next);
252
253struct tracer_switch_ops {
254 tracer_switch_func_t func;
255 void *private;
256 struct tracer_switch_ops *next;
257};
258
259extern int register_tracer_switch(struct tracer_switch_ops *ops);
260extern int unregister_tracer_switch(struct tracer_switch_ops *ops);
261
262#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
263
264#ifdef CONFIG_DYNAMIC_FTRACE
265extern unsigned long ftrace_update_tot_cnt;
d05cdb25
SR
266#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
267extern int DYN_FTRACE_TEST_NAME(void);
bc0c38d1
SR
268#endif
269
60a11774
SR
270#ifdef CONFIG_FTRACE_STARTUP_TEST
271#ifdef CONFIG_FTRACE
272extern int trace_selftest_startup_function(struct tracer *trace,
273 struct trace_array *tr);
274#endif
275#ifdef CONFIG_IRQSOFF_TRACER
276extern int trace_selftest_startup_irqsoff(struct tracer *trace,
277 struct trace_array *tr);
278#endif
279#ifdef CONFIG_PREEMPT_TRACER
280extern int trace_selftest_startup_preemptoff(struct tracer *trace,
281 struct trace_array *tr);
282#endif
283#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
284extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
285 struct trace_array *tr);
286#endif
287#ifdef CONFIG_SCHED_TRACER
288extern int trace_selftest_startup_wakeup(struct tracer *trace,
289 struct trace_array *tr);
290#endif
291#ifdef CONFIG_CONTEXT_SWITCH_TRACER
292extern int trace_selftest_startup_sched_switch(struct tracer *trace,
293 struct trace_array *tr);
294#endif
295#endif /* CONFIG_FTRACE_STARTUP_TEST */
296
c7aafc54 297extern void *head_page(struct trace_array_cpu *data);
72829bc3
TG
298extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
299extern long ns2usecs(cycle_t nsec);
c7aafc54 300
4e655519
IM
301extern unsigned long trace_flags;
302
4fcdae83
SR
303/*
304 * trace_iterator_flags is an enumeration that defines bit
305 * positions into trace_flags that controls the output.
306 *
307 * NOTE: These bits must match the trace_options array in
308 * trace.c.
309 */
4e655519
IM
310enum trace_iterator_flags {
311 TRACE_ITER_PRINT_PARENT = 0x01,
312 TRACE_ITER_SYM_OFFSET = 0x02,
313 TRACE_ITER_SYM_ADDR = 0x04,
314 TRACE_ITER_VERBOSE = 0x08,
315 TRACE_ITER_RAW = 0x10,
316 TRACE_ITER_HEX = 0x20,
317 TRACE_ITER_BIN = 0x40,
318 TRACE_ITER_BLOCK = 0x80,
319 TRACE_ITER_STACKTRACE = 0x100,
4ac3ba41 320 TRACE_ITER_SCHED_TREE = 0x200,
4e655519
IM
321};
322
bc0c38d1 323#endif /* _LINUX_KERNEL_TRACE_H */