ftrace: modulize the number of CPU buffers
[linux-2.6-block.git] / kernel / trace / trace.h
CommitLineData
bc0c38d1
SR
1#ifndef _LINUX_KERNEL_TRACE_H
2#define _LINUX_KERNEL_TRACE_H
3
4#include <linux/fs.h>
5#include <asm/atomic.h>
6#include <linux/sched.h>
7#include <linux/clocksource.h>
8
72829bc3
TG
9enum trace_type {
10 __TRACE_FIRST_TYPE = 0,
11
12 TRACE_FN,
13 TRACE_CTX,
14 TRACE_WAKE,
15 TRACE_STACK,
16 TRACE_SPECIAL,
17
18 __TRACE_LAST_TYPE
19};
20
bc0c38d1
SR
21/*
22 * Function trace entry - function address and parent function addres:
23 */
24struct ftrace_entry {
25 unsigned long ip;
26 unsigned long parent_ip;
27};
28
29/*
30 * Context switch trace entry - which task (and prio) we switched from/to:
31 */
32struct ctx_switch_entry {
33 unsigned int prev_pid;
34 unsigned char prev_prio;
35 unsigned char prev_state;
36 unsigned int next_pid;
37 unsigned char next_prio;
bac524d3 38 unsigned char next_state;
bc0c38d1
SR
39};
40
f0a920d5
IM
41/*
42 * Special (free-form) trace entry:
43 */
44struct special_entry {
45 unsigned long arg1;
46 unsigned long arg2;
47 unsigned long arg3;
48};
49
86387f7e
IM
50/*
51 * Stack-trace entry:
52 */
53
54#define FTRACE_STACK_ENTRIES 5
55
56struct stack_entry {
57 unsigned long caller[FTRACE_STACK_ENTRIES];
58};
59
bc0c38d1
SR
60/*
61 * The trace entry - the most basic unit of tracing. This is what
62 * is printed in the end as a single line in the trace output, such as:
63 *
64 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
65 */
66struct trace_entry {
67 char type;
68 char cpu;
69 char flags;
70 char preempt_count;
71 int pid;
72 cycle_t t;
bc0c38d1
SR
73 union {
74 struct ftrace_entry fn;
75 struct ctx_switch_entry ctx;
f0a920d5 76 struct special_entry special;
86387f7e 77 struct stack_entry stack;
bc0c38d1
SR
78 };
79};
80
81#define TRACE_ENTRY_SIZE sizeof(struct trace_entry)
82
83/*
84 * The CPU trace array - it consists of thousands of trace entries
85 * plus some other descriptor data: (for example which task started
86 * the trace, etc.)
87 */
88struct trace_array_cpu {
4c11d7ae 89 struct list_head trace_pages;
bc0c38d1 90 atomic_t disabled;
92205c23 91 raw_spinlock_t lock;
d4c5a2f5 92 struct lock_class_key lock_key;
4e3c3333 93
c7aafc54 94 /* these fields get copied into max-trace: */
93a588f4
SR
95 unsigned trace_head_idx;
96 unsigned trace_tail_idx;
97 void *trace_head; /* producer */
98 void *trace_tail; /* consumer */
c7aafc54 99 unsigned long trace_idx;
bc0c38d1
SR
100 unsigned long saved_latency;
101 unsigned long critical_start;
102 unsigned long critical_end;
103 unsigned long critical_sequence;
104 unsigned long nice;
105 unsigned long policy;
106 unsigned long rt_priority;
107 cycle_t preempt_timestamp;
108 pid_t pid;
109 uid_t uid;
110 char comm[TASK_COMM_LEN];
111};
112
113struct trace_iterator;
114
115/*
116 * The trace array - an array of per-CPU trace arrays. This is the
117 * highest level data structure that individual tracers deal with.
118 * They have on/off state as well:
119 */
120struct trace_array {
121 unsigned long entries;
122 long ctrl;
123 int cpu;
124 cycle_t time_start;
b3806b43 125 struct task_struct *waiter;
bc0c38d1
SR
126 struct trace_array_cpu *data[NR_CPUS];
127};
128
129/*
130 * A specific tracer, represented by methods that operate on a trace array:
131 */
132struct tracer {
133 const char *name;
134 void (*init)(struct trace_array *tr);
135 void (*reset)(struct trace_array *tr);
136 void (*open)(struct trace_iterator *iter);
137 void (*close)(struct trace_iterator *iter);
138 void (*start)(struct trace_iterator *iter);
139 void (*stop)(struct trace_iterator *iter);
140 void (*ctrl_update)(struct trace_array *tr);
60a11774
SR
141#ifdef CONFIG_FTRACE_STARTUP_TEST
142 int (*selftest)(struct tracer *trace,
143 struct trace_array *tr);
144#endif
72829bc3 145 int (*print_line)(struct trace_iterator *iter);
bc0c38d1
SR
146 struct tracer *next;
147 int print_max;
148};
149
214023c3
SR
150struct trace_seq {
151 unsigned char buffer[PAGE_SIZE];
152 unsigned int len;
153};
154
bc0c38d1
SR
155/*
156 * Trace iterator - used by printout routines who present trace
157 * results to users and which routines might sleep, etc:
158 */
159struct trace_iterator {
214023c3 160 struct trace_seq seq;
bc0c38d1
SR
161 struct trace_array *tr;
162 struct tracer *trace;
4e3c3333 163
bc0c38d1 164 struct trace_entry *ent;
4e3c3333
IM
165 int cpu;
166
167 struct trace_entry *prev_ent;
168 int prev_cpu;
169
bc0c38d1
SR
170 unsigned long iter_flags;
171 loff_t pos;
172 unsigned long next_idx[NR_CPUS];
4c11d7ae
SR
173 struct list_head *next_page[NR_CPUS];
174 unsigned next_page_idx[NR_CPUS];
175 long idx;
bc0c38d1
SR
176};
177
e309b41d 178void tracing_reset(struct trace_array_cpu *data);
bc0c38d1
SR
179int tracing_open_generic(struct inode *inode, struct file *filp);
180struct dentry *tracing_init_dentry(void);
181void ftrace(struct trace_array *tr,
182 struct trace_array_cpu *data,
183 unsigned long ip,
184 unsigned long parent_ip,
185 unsigned long flags);
186void tracing_sched_switch_trace(struct trace_array *tr,
187 struct trace_array_cpu *data,
188 struct task_struct *prev,
189 struct task_struct *next,
190 unsigned long flags);
191void tracing_record_cmdline(struct task_struct *tsk);
57422797
IM
192
193void tracing_sched_wakeup_trace(struct trace_array *tr,
194 struct trace_array_cpu *data,
195 struct task_struct *wakee,
196 struct task_struct *cur,
197 unsigned long flags);
f0a920d5
IM
198void trace_special(struct trace_array *tr,
199 struct trace_array_cpu *data,
200 unsigned long arg1,
201 unsigned long arg2,
202 unsigned long arg3);
6fb44b71
SR
203void trace_function(struct trace_array *tr,
204 struct trace_array_cpu *data,
205 unsigned long ip,
206 unsigned long parent_ip,
207 unsigned long flags);
bc0c38d1
SR
208
209void tracing_start_function_trace(void);
210void tracing_stop_function_trace(void);
211int register_tracer(struct tracer *type);
212void unregister_tracer(struct tracer *type);
213
214extern unsigned long nsecs_to_usecs(unsigned long nsecs);
215
216extern unsigned long tracing_max_latency;
217extern unsigned long tracing_thresh;
218
219void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
220void update_max_tr_single(struct trace_array *tr,
221 struct task_struct *tsk, int cpu);
222
e309b41d 223extern cycle_t ftrace_now(int cpu);
bc0c38d1
SR
224
225#ifdef CONFIG_SCHED_TRACER
e309b41d 226extern void
bc0c38d1 227wakeup_sched_switch(struct task_struct *prev, struct task_struct *next);
57422797
IM
228extern void
229wakeup_sched_wakeup(struct task_struct *wakee, struct task_struct *curr);
bc0c38d1
SR
230#else
231static inline void
232wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
233{
234}
57422797
IM
235static inline void
236wakeup_sched_wakeup(struct task_struct *wakee, struct task_struct *curr)
237{
238}
bc0c38d1
SR
239#endif
240
241#ifdef CONFIG_CONTEXT_SWITCH_TRACER
242typedef void
243(*tracer_switch_func_t)(void *private,
244 struct task_struct *prev,
245 struct task_struct *next);
246
247struct tracer_switch_ops {
248 tracer_switch_func_t func;
249 void *private;
250 struct tracer_switch_ops *next;
251};
252
253extern int register_tracer_switch(struct tracer_switch_ops *ops);
254extern int unregister_tracer_switch(struct tracer_switch_ops *ops);
255
256#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
257
258#ifdef CONFIG_DYNAMIC_FTRACE
259extern unsigned long ftrace_update_tot_cnt;
d05cdb25
SR
260#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
261extern int DYN_FTRACE_TEST_NAME(void);
bc0c38d1
SR
262#endif
263
60a11774
SR
264#ifdef CONFIG_FTRACE_STARTUP_TEST
265#ifdef CONFIG_FTRACE
266extern int trace_selftest_startup_function(struct tracer *trace,
267 struct trace_array *tr);
268#endif
269#ifdef CONFIG_IRQSOFF_TRACER
270extern int trace_selftest_startup_irqsoff(struct tracer *trace,
271 struct trace_array *tr);
272#endif
273#ifdef CONFIG_PREEMPT_TRACER
274extern int trace_selftest_startup_preemptoff(struct tracer *trace,
275 struct trace_array *tr);
276#endif
277#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
278extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
279 struct trace_array *tr);
280#endif
281#ifdef CONFIG_SCHED_TRACER
282extern int trace_selftest_startup_wakeup(struct tracer *trace,
283 struct trace_array *tr);
284#endif
285#ifdef CONFIG_CONTEXT_SWITCH_TRACER
286extern int trace_selftest_startup_sched_switch(struct tracer *trace,
287 struct trace_array *tr);
288#endif
289#endif /* CONFIG_FTRACE_STARTUP_TEST */
290
c7aafc54 291extern void *head_page(struct trace_array_cpu *data);
72829bc3
TG
292extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
293extern long ns2usecs(cycle_t nsec);
c7aafc54 294
4e655519
IM
295extern unsigned long trace_flags;
296
297enum trace_iterator_flags {
298 TRACE_ITER_PRINT_PARENT = 0x01,
299 TRACE_ITER_SYM_OFFSET = 0x02,
300 TRACE_ITER_SYM_ADDR = 0x04,
301 TRACE_ITER_VERBOSE = 0x08,
302 TRACE_ITER_RAW = 0x10,
303 TRACE_ITER_HEX = 0x20,
304 TRACE_ITER_BIN = 0x40,
305 TRACE_ITER_BLOCK = 0x80,
306 TRACE_ITER_STACKTRACE = 0x100,
4ac3ba41 307 TRACE_ITER_SCHED_TREE = 0x200,
4e655519
IM
308};
309
bc0c38d1 310#endif /* _LINUX_KERNEL_TRACE_H */