hw-breakpoints: Rewrite the hw-breakpoints layer on top of perf events
[linux-2.6-block.git] / kernel / trace / trace.h
CommitLineData
bc0c38d1
SR
1#ifndef _LINUX_KERNEL_TRACE_H
2#define _LINUX_KERNEL_TRACE_H
3
4#include <linux/fs.h>
5#include <asm/atomic.h>
6#include <linux/sched.h>
7#include <linux/clocksource.h>
3928a8a2 8#include <linux/ring_buffer.h>
bd8ac686 9#include <linux/mmiotrace.h>
4e5292ea 10#include <linux/tracepoint.h>
d13744cd 11#include <linux/ftrace.h>
3f5ec136 12#include <trace/boot.h>
02af61bb 13#include <linux/kmemtrace.h>
24f1e32c 14#include <linux/hw_breakpoint.h>
bc0c38d1 15
9504504c 16#include <linux/trace_seq.h>
97f20251 17#include <linux/ftrace_event.h>
9504504c 18
72829bc3
TG
19enum trace_type {
20 __TRACE_FIRST_TYPE = 0,
21
22 TRACE_FN,
23 TRACE_CTX,
24 TRACE_WAKE,
25 TRACE_STACK,
dd0e545f 26 TRACE_PRINT,
48ead020 27 TRACE_BPRINT,
72829bc3 28 TRACE_SPECIAL,
bd8ac686
PP
29 TRACE_MMIO_RW,
30 TRACE_MMIO_MAP,
9f029e83 31 TRACE_BRANCH,
74239072
FW
32 TRACE_BOOT_CALL,
33 TRACE_BOOT_RET,
287b6e68
FW
34 TRACE_GRAPH_RET,
35 TRACE_GRAPH_ENT,
02b67518 36 TRACE_USER_STACK,
a93751ca 37 TRACE_HW_BRANCHES,
36994e58
FW
38 TRACE_KMEM_ALLOC,
39 TRACE_KMEM_FREE,
c71a8961 40 TRACE_BLK,
0722db01 41 TRACE_KSYM,
72829bc3 42
f0868d1e 43 __TRACE_LAST_TYPE,
72829bc3
TG
44};
45
0a1c49db
SR
46enum kmemtrace_type_id {
47 KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */
48 KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */
49 KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */
287b6e68
FW
50};
51
d13744cd 52extern struct tracer boot_tracer;
bc0c38d1 53
0a1c49db
SR
54#undef __field
55#define __field(type, item) type item;
777e208d 56
d7315094
SR
57#undef __field_struct
58#define __field_struct(type, item) __field(type, item)
777e208d 59
d7315094
SR
60#undef __field_desc
61#define __field_desc(type, container, item)
777e208d 62
0a1c49db
SR
63#undef __array
64#define __array(type, item, size) type item[size];
74239072 65
d7315094
SR
66#undef __array_desc
67#define __array_desc(type, container, item, size)
777e208d 68
0a1c49db
SR
69#undef __dynamic_array
70#define __dynamic_array(type, item) type item[];
52f232cb 71
0a1c49db
SR
72#undef F_STRUCT
73#define F_STRUCT(args...) args
1e9b51c2 74
0a1c49db
SR
75#undef FTRACE_ENTRY
76#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
77 struct struct_name { \
78 struct trace_entry ent; \
79 tstruct \
80 }
f3f47a67 81
0a1c49db
SR
82#undef TP_ARGS
83#define TP_ARGS(args...) args
ca2b84cb 84
0a1c49db
SR
85#undef FTRACE_ENTRY_DUP
86#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
36994e58 87
0a1c49db 88#include "trace_entries.h"
36994e58 89
0a1c49db
SR
90/*
91 * syscalls are special, and need special handling, this is why
92 * they are not included in trace_entries.h
93 */
bed1ffca
FW
94struct syscall_trace_enter {
95 struct trace_entry ent;
96 int nr;
97 unsigned long args[];
98};
99
100struct syscall_trace_exit {
101 struct trace_entry ent;
102 int nr;
103 unsigned long ret;
104};
105
fc5e27ae
PP
106/*
107 * trace_flag_type is an enumeration that holds different
108 * states when a trace occurs. These are:
9244489a 109 * IRQS_OFF - interrupts were disabled
9de36825 110 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
bd9cfca9 111 * NEED_RESCHED - reschedule is requested
9244489a
SR
112 * HARDIRQ - inside an interrupt handler
113 * SOFTIRQ - inside a softirq handler
fc5e27ae
PP
114 */
115enum trace_flag_type {
116 TRACE_FLAG_IRQS_OFF = 0x01,
9244489a
SR
117 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
118 TRACE_FLAG_NEED_RESCHED = 0x04,
119 TRACE_FLAG_HARDIRQ = 0x08,
120 TRACE_FLAG_SOFTIRQ = 0x10,
fc5e27ae
PP
121};
122
5bf9a1ee 123#define TRACE_BUF_SIZE 1024
bc0c38d1
SR
124
125/*
126 * The CPU trace array - it consists of thousands of trace entries
127 * plus some other descriptor data: (for example which task started
128 * the trace, etc.)
129 */
130struct trace_array_cpu {
bc0c38d1 131 atomic_t disabled;
2cadf913 132 void *buffer_page; /* ring buffer spare */
4e3c3333 133
bc0c38d1
SR
134 unsigned long saved_latency;
135 unsigned long critical_start;
136 unsigned long critical_end;
137 unsigned long critical_sequence;
138 unsigned long nice;
139 unsigned long policy;
140 unsigned long rt_priority;
2f26ebd5 141 unsigned long skipped_entries;
bc0c38d1
SR
142 cycle_t preempt_timestamp;
143 pid_t pid;
144 uid_t uid;
145 char comm[TASK_COMM_LEN];
146};
147
bc0c38d1
SR
148/*
149 * The trace array - an array of per-CPU trace arrays. This is the
150 * highest level data structure that individual tracers deal with.
151 * They have on/off state as well:
152 */
153struct trace_array {
3928a8a2 154 struct ring_buffer *buffer;
bc0c38d1 155 unsigned long entries;
bc0c38d1
SR
156 int cpu;
157 cycle_t time_start;
b3806b43 158 struct task_struct *waiter;
bc0c38d1
SR
159 struct trace_array_cpu *data[NR_CPUS];
160};
161
7104f300
SR
162#define FTRACE_CMP_TYPE(var, type) \
163 __builtin_types_compatible_p(typeof(var), type *)
164
165#undef IF_ASSIGN
166#define IF_ASSIGN(var, entry, etype, id) \
167 if (FTRACE_CMP_TYPE(var, etype)) { \
168 var = (typeof(var))(entry); \
169 WARN_ON(id && (entry)->type != id); \
170 break; \
171 }
172
173/* Will cause compile errors if type is not found. */
174extern void __ftrace_bad_type(void);
175
176/*
177 * The trace_assign_type is a verifier that the entry type is
178 * the same as the type being assigned. To add new types simply
179 * add a line with the following format:
180 *
181 * IF_ASSIGN(var, ent, type, id);
182 *
183 * Where "type" is the trace type that includes the trace_entry
184 * as the "ent" item. And "id" is the trace identifier that is
185 * used in the trace_type enum.
186 *
187 * If the type can have more than one id, then use zero.
188 */
189#define trace_assign_type(var, ent) \
190 do { \
191 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
192 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
7104f300 193 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
02b67518 194 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
7104f300 195 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
48ead020 196 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
7104f300
SR
197 IF_ASSIGN(var, ent, struct special_entry, 0); \
198 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
199 TRACE_MMIO_RW); \
200 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
201 TRACE_MMIO_MAP); \
74239072
FW
202 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
203 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
9f029e83 204 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
287b6e68
FW
205 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
206 TRACE_GRAPH_ENT); \
207 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
208 TRACE_GRAPH_RET); \
a93751ca 209 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
36994e58
FW
210 IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
211 TRACE_KMEM_ALLOC); \
212 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
213 TRACE_KMEM_FREE); \
db59504d 214 IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\
7104f300
SR
215 __ftrace_bad_type(); \
216 } while (0)
2c4f035f 217
adf9f195
FW
218/*
219 * An option specific to a tracer. This is a boolean value.
220 * The bit is the bit index that sets its value on the
221 * flags value in struct tracer_flags.
222 */
223struct tracer_opt {
9de36825
IM
224 const char *name; /* Will appear on the trace_options file */
225 u32 bit; /* Mask assigned in val field in tracer_flags */
adf9f195
FW
226};
227
228/*
229 * The set of specific options for a tracer. Your tracer
230 * have to set the initial value of the flags val.
231 */
232struct tracer_flags {
233 u32 val;
9de36825 234 struct tracer_opt *opts;
adf9f195
FW
235};
236
237/* Makes more easy to define a tracer opt */
238#define TRACER_OPT(s, b) .name = #s, .bit = b
239
034939b6 240
6eaaa5d5
FW
241/**
242 * struct tracer - a specific tracer and its callbacks to interact with debugfs
243 * @name: the name chosen to select it on the available_tracers file
244 * @init: called when one switches to this tracer (echo name > current_tracer)
245 * @reset: called when one switches to another tracer
246 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
247 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
248 * @open: called when the trace file is opened
249 * @pipe_open: called when the trace_pipe file is opened
250 * @wait_pipe: override how the user waits for traces on trace_pipe
251 * @close: called when the trace file is released
252 * @read: override the default read callback on trace_pipe
253 * @splice_read: override the default splice_read callback on trace_pipe
254 * @selftest: selftest to run on boot (see trace_selftest.c)
255 * @print_headers: override the first lines that describe your columns
256 * @print_line: callback that prints a trace
257 * @set_flag: signals one of your private flags changed (trace_options file)
258 * @flags: your private flags
bc0c38d1
SR
259 */
260struct tracer {
261 const char *name;
1c80025a 262 int (*init)(struct trace_array *tr);
bc0c38d1 263 void (*reset)(struct trace_array *tr);
9036990d
SR
264 void (*start)(struct trace_array *tr);
265 void (*stop)(struct trace_array *tr);
bc0c38d1 266 void (*open)(struct trace_iterator *iter);
107bad8b 267 void (*pipe_open)(struct trace_iterator *iter);
6eaaa5d5 268 void (*wait_pipe)(struct trace_iterator *iter);
bc0c38d1 269 void (*close)(struct trace_iterator *iter);
107bad8b
SR
270 ssize_t (*read)(struct trace_iterator *iter,
271 struct file *filp, char __user *ubuf,
272 size_t cnt, loff_t *ppos);
3c56819b
EGM
273 ssize_t (*splice_read)(struct trace_iterator *iter,
274 struct file *filp,
275 loff_t *ppos,
276 struct pipe_inode_info *pipe,
277 size_t len,
278 unsigned int flags);
60a11774
SR
279#ifdef CONFIG_FTRACE_STARTUP_TEST
280 int (*selftest)(struct tracer *trace,
281 struct trace_array *tr);
282#endif
8bba1bf5 283 void (*print_header)(struct seq_file *m);
2c4f035f 284 enum print_line_t (*print_line)(struct trace_iterator *iter);
adf9f195
FW
285 /* If you handled the flag setting, return 0 */
286 int (*set_flag)(u32 old_flags, u32 bit, int set);
bc0c38d1
SR
287 struct tracer *next;
288 int print_max;
9de36825 289 struct tracer_flags *flags;
bc0c38d1
SR
290};
291
f9520750 292
b04cc6b1
FW
293#define TRACE_PIPE_ALL_CPU -1
294
b6f11df2 295int tracer_init(struct tracer *t, struct trace_array *tr);
9036990d 296int tracing_is_enabled(void);
45dcd8b8 297void trace_wake_up(void);
3928a8a2 298void tracing_reset(struct trace_array *tr, int cpu);
213cc060 299void tracing_reset_online_cpus(struct trace_array *tr);
9456f0fa
SR
300void tracing_reset_current(int cpu);
301void tracing_reset_current_online_cpus(void);
bc0c38d1 302int tracing_open_generic(struct inode *inode, struct file *filp);
5452af66
FW
303struct dentry *trace_create_file(const char *name,
304 mode_t mode,
305 struct dentry *parent,
306 void *data,
307 const struct file_operations *fops);
308
bc0c38d1 309struct dentry *tracing_init_dentry(void);
d618b3e6
IM
310void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
311
51a763dd
ACM
312struct ring_buffer_event;
313
e77405ad
SR
314struct ring_buffer_event *
315trace_buffer_lock_reserve(struct ring_buffer *buffer,
316 int type,
317 unsigned long len,
318 unsigned long flags,
319 int pc);
320void trace_buffer_unlock_commit(struct ring_buffer *buffer,
51a763dd
ACM
321 struct ring_buffer_event *event,
322 unsigned long flags, int pc);
323
45dcd8b8
PP
324struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
325 struct trace_array_cpu *data);
c4a8e8be
FW
326
327struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
328 int *ent_cpu, u64 *ent_ts);
329
6eaaa5d5
FW
330void default_wait_pipe(struct trace_iterator *iter);
331void poll_wait_pipe(struct trace_iterator *iter);
332
bc0c38d1
SR
333void ftrace(struct trace_array *tr,
334 struct trace_array_cpu *data,
335 unsigned long ip,
336 unsigned long parent_ip,
38697053 337 unsigned long flags, int pc);
bc0c38d1 338void tracing_sched_switch_trace(struct trace_array *tr,
bc0c38d1
SR
339 struct task_struct *prev,
340 struct task_struct *next,
38697053 341 unsigned long flags, int pc);
57422797
IM
342
343void tracing_sched_wakeup_trace(struct trace_array *tr,
57422797
IM
344 struct task_struct *wakee,
345 struct task_struct *cur,
38697053 346 unsigned long flags, int pc);
f0a920d5
IM
347void trace_special(struct trace_array *tr,
348 struct trace_array_cpu *data,
349 unsigned long arg1,
350 unsigned long arg2,
38697053 351 unsigned long arg3, int pc);
6fb44b71 352void trace_function(struct trace_array *tr,
6fb44b71
SR
353 unsigned long ip,
354 unsigned long parent_ip,
38697053 355 unsigned long flags, int pc);
bc0c38d1 356
287b6e68 357void trace_graph_return(struct ftrace_graph_ret *trace);
e49dc19c 358int trace_graph_entry(struct ftrace_graph_ent *trace);
1a0799a8 359void set_graph_array(struct trace_array *tr);
1e9b51c2 360
41bc8144
SR
361void tracing_start_cmdline_record(void);
362void tracing_stop_cmdline_record(void);
e168e051
SR
363void tracing_sched_switch_assign_trace(struct trace_array *tr);
364void tracing_stop_sched_switch_record(void);
365void tracing_start_sched_switch_record(void);
bc0c38d1
SR
366int register_tracer(struct tracer *type);
367void unregister_tracer(struct tracer *type);
b5130b1e 368int is_tracing_stopped(void);
bc0c38d1 369
0f8f86c7
FW
370#define KSYM_SELFTEST_ENTRY "ksym_selftest_dummy"
371extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr);
bc0c38d1
SR
372
373extern unsigned long nsecs_to_usecs(unsigned long nsecs);
374
5d4a9dba 375#ifdef CONFIG_TRACER_MAX_TRACE
bc0c38d1
SR
376extern unsigned long tracing_max_latency;
377extern unsigned long tracing_thresh;
378
379void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
380void update_max_tr_single(struct trace_array *tr,
381 struct task_struct *tsk, int cpu);
5d4a9dba 382#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 383
c0a0d0d3 384#ifdef CONFIG_STACKTRACE
e77405ad 385void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
c0a0d0d3
FW
386 int skip, int pc);
387
e77405ad 388void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
c0a0d0d3 389 int pc);
bc0c38d1 390
c0a0d0d3
FW
391void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
392 int pc);
393#else
394static inline void ftrace_trace_stack(struct trace_array *tr,
395 unsigned long flags, int skip, int pc)
396{
397}
398
399static inline void ftrace_trace_userstack(struct trace_array *tr,
400 unsigned long flags, int pc)
401{
402}
403
404static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
405 int skip, int pc)
406{
407}
408#endif /* CONFIG_STACKTRACE */
53614991 409
e309b41d 410extern cycle_t ftrace_now(int cpu);
bc0c38d1 411
4ca53085 412extern void trace_find_cmdline(int pid, char comm[]);
f7d48cbd 413
bc0c38d1
SR
414#ifdef CONFIG_DYNAMIC_FTRACE
415extern unsigned long ftrace_update_tot_cnt;
d05cdb25
SR
416#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
417extern int DYN_FTRACE_TEST_NAME(void);
bc0c38d1
SR
418#endif
419
020e5f85
LZ
420extern int ring_buffer_expanded;
421extern bool tracing_selftest_disabled;
5e5bf483 422DECLARE_PER_CPU(local_t, ftrace_cpu_disabled);
020e5f85 423
60a11774 424#ifdef CONFIG_FTRACE_STARTUP_TEST
60a11774
SR
425extern int trace_selftest_startup_function(struct tracer *trace,
426 struct trace_array *tr);
7447dce9
FW
427extern int trace_selftest_startup_function_graph(struct tracer *trace,
428 struct trace_array *tr);
60a11774
SR
429extern int trace_selftest_startup_irqsoff(struct tracer *trace,
430 struct trace_array *tr);
60a11774
SR
431extern int trace_selftest_startup_preemptoff(struct tracer *trace,
432 struct trace_array *tr);
60a11774
SR
433extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
434 struct trace_array *tr);
60a11774
SR
435extern int trace_selftest_startup_wakeup(struct tracer *trace,
436 struct trace_array *tr);
fb1b6d8b
SN
437extern int trace_selftest_startup_nop(struct tracer *trace,
438 struct trace_array *tr);
60a11774
SR
439extern int trace_selftest_startup_sched_switch(struct tracer *trace,
440 struct trace_array *tr);
a6dd24f8
IM
441extern int trace_selftest_startup_sysprof(struct tracer *trace,
442 struct trace_array *tr);
80e5ea45
SR
443extern int trace_selftest_startup_branch(struct tracer *trace,
444 struct trace_array *tr);
321bb5e1
MM
445extern int trace_selftest_startup_hw_branches(struct tracer *trace,
446 struct trace_array *tr);
0722db01
P
447extern int trace_selftest_startup_ksym(struct tracer *trace,
448 struct trace_array *tr);
60a11774
SR
449#endif /* CONFIG_FTRACE_STARTUP_TEST */
450
c7aafc54 451extern void *head_page(struct trace_array_cpu *data);
cf8e3474 452extern unsigned long long ns2usecs(cycle_t nsec);
1fd8f2a3 453extern int
40ce74f1 454trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
48ead020 455extern int
40ce74f1 456trace_vprintk(unsigned long ip, const char *fmt, va_list args);
659372d3
SR
457extern int
458trace_array_vprintk(struct trace_array *tr,
459 unsigned long ip, const char *fmt, va_list args);
460int trace_array_printk(struct trace_array *tr,
461 unsigned long ip, const char *fmt, ...);
c7aafc54 462
4e655519
IM
463extern unsigned long trace_flags;
464
5079f326
Z
465extern int trace_clock_id;
466
15e6cb36 467/* Standard output formatting function used for function return traces */
fb52607a
FW
468#ifdef CONFIG_FUNCTION_GRAPH_TRACER
469extern enum print_line_t print_graph_function(struct trace_iterator *iter);
0706f1c4
SR
470extern enum print_line_t
471trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
ea4e2bc4
SR
472
473#ifdef CONFIG_DYNAMIC_FTRACE
474/* TODO: make this variable */
475#define FTRACE_GRAPH_MAX_FUNCS 32
476extern int ftrace_graph_count;
477extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
478
479static inline int ftrace_graph_addr(unsigned long addr)
480{
481 int i;
482
483 if (!ftrace_graph_count || test_tsk_trace_graph(current))
484 return 1;
485
486 for (i = 0; i < ftrace_graph_count; i++) {
487 if (addr == ftrace_graph_funcs[i])
488 return 1;
489 }
490
491 return 0;
492}
15e6cb36 493#else
6b253930
IM
494static inline int ftrace_graph_addr(unsigned long addr)
495{
496 return 1;
ea4e2bc4
SR
497}
498#endif /* CONFIG_DYNAMIC_FTRACE */
ea4e2bc4 499#else /* CONFIG_FUNCTION_GRAPH_TRACER */
15e6cb36 500static inline enum print_line_t
fb52607a 501print_graph_function(struct trace_iterator *iter)
15e6cb36
FW
502{
503 return TRACE_TYPE_UNHANDLED;
504}
ea4e2bc4 505#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
15e6cb36 506
756d17ee 507extern struct list_head ftrace_pids;
804a6851 508
1155de47 509#ifdef CONFIG_FUNCTION_TRACER
804a6851
SR
510static inline int ftrace_trace_task(struct task_struct *task)
511{
756d17ee 512 if (list_empty(&ftrace_pids))
804a6851
SR
513 return 1;
514
515 return test_tsk_trace_trace(task);
516}
1155de47
PM
517#else
518static inline int ftrace_trace_task(struct task_struct *task)
519{
520 return 1;
521}
522#endif
804a6851 523
b63f39ea 524/*
525 * struct trace_parser - servers for reading the user input separated by spaces
526 * @cont: set if the input is not complete - no final space char was found
527 * @buffer: holds the parsed user input
528 * @idx: user input lenght
529 * @size: buffer size
530 */
531struct trace_parser {
532 bool cont;
533 char *buffer;
534 unsigned idx;
535 unsigned size;
536};
537
538static inline bool trace_parser_loaded(struct trace_parser *parser)
539{
540 return (parser->idx != 0);
541}
542
543static inline bool trace_parser_cont(struct trace_parser *parser)
544{
545 return parser->cont;
546}
547
548static inline void trace_parser_clear(struct trace_parser *parser)
549{
550 parser->cont = false;
551 parser->idx = 0;
552}
553
554extern int trace_parser_get_init(struct trace_parser *parser, int size);
555extern void trace_parser_put(struct trace_parser *parser);
556extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
557 size_t cnt, loff_t *ppos);
558
4fcdae83
SR
559/*
560 * trace_iterator_flags is an enumeration that defines bit
561 * positions into trace_flags that controls the output.
562 *
563 * NOTE: These bits must match the trace_options array in
564 * trace.c.
565 */
4e655519
IM
566enum trace_iterator_flags {
567 TRACE_ITER_PRINT_PARENT = 0x01,
568 TRACE_ITER_SYM_OFFSET = 0x02,
569 TRACE_ITER_SYM_ADDR = 0x04,
570 TRACE_ITER_VERBOSE = 0x08,
571 TRACE_ITER_RAW = 0x10,
572 TRACE_ITER_HEX = 0x20,
573 TRACE_ITER_BIN = 0x40,
574 TRACE_ITER_BLOCK = 0x80,
575 TRACE_ITER_STACKTRACE = 0x100,
4ac3ba41 576 TRACE_ITER_SCHED_TREE = 0x200,
f09ce573 577 TRACE_ITER_PRINTK = 0x400,
b2a866f9 578 TRACE_ITER_PREEMPTONLY = 0x800,
9f029e83 579 TRACE_ITER_BRANCH = 0x1000,
12ef7d44 580 TRACE_ITER_ANNOTATE = 0x2000,
b54d3de9 581 TRACE_ITER_USERSTACKTRACE = 0x4000,
66896a85 582 TRACE_ITER_SYM_USEROBJ = 0x8000,
c4a8e8be 583 TRACE_ITER_PRINTK_MSGONLY = 0x10000,
c032ef64
SR
584 TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
585 TRACE_ITER_LATENCY_FMT = 0x40000,
5079f326
Z
586 TRACE_ITER_SLEEP_TIME = 0x80000,
587 TRACE_ITER_GRAPH_TIME = 0x100000,
4e655519
IM
588};
589
15e6cb36
FW
590/*
591 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
592 * control the output of kernel symbols.
593 */
594#define TRACE_ITER_SYM_MASK \
595 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
596
43a15386
FW
597extern struct tracer nop_trace;
598
8f0a056f
SR
599/**
600 * ftrace_preempt_disable - disable preemption scheduler safe
601 *
602 * When tracing can happen inside the scheduler, there exists
603 * cases that the tracing might happen before the need_resched
604 * flag is checked. If this happens and the tracer calls
605 * preempt_enable (after a disable), a schedule might take place
606 * causing an infinite recursion.
607 *
57794a9d 608 * To prevent this, we read the need_resched flag before
8f0a056f
SR
609 * disabling preemption. When we want to enable preemption we
610 * check the flag, if it is set, then we call preempt_enable_no_resched.
611 * Otherwise, we call preempt_enable.
612 *
57794a9d 613 * The rational for doing the above is that if need_resched is set
8f0a056f
SR
614 * and we have yet to reschedule, we are either in an atomic location
615 * (where we do not need to check for scheduling) or we are inside
616 * the scheduler and do not want to resched.
617 */
618static inline int ftrace_preempt_disable(void)
619{
620 int resched;
621
622 resched = need_resched();
623 preempt_disable_notrace();
624
625 return resched;
626}
627
628/**
629 * ftrace_preempt_enable - enable preemption scheduler safe
630 * @resched: the return value from ftrace_preempt_disable
631 *
632 * This is a scheduler safe way to enable preemption and not miss
633 * any preemption checks. The disabled saved the state of preemption.
57794a9d 634 * If resched is set, then we are either inside an atomic or
8f0a056f
SR
635 * are inside the scheduler (we would have already scheduled
636 * otherwise). In this case, we do not want to call normal
637 * preempt_enable, but preempt_enable_no_resched instead.
638 */
639static inline void ftrace_preempt_enable(int resched)
640{
641 if (resched)
642 preempt_enable_no_resched_notrace();
643 else
644 preempt_enable_notrace();
645}
646
2ed84eeb 647#ifdef CONFIG_BRANCH_TRACER
9f029e83
SR
648extern int enable_branch_tracing(struct trace_array *tr);
649extern void disable_branch_tracing(void);
650static inline int trace_branch_enable(struct trace_array *tr)
52f232cb 651{
9f029e83
SR
652 if (trace_flags & TRACE_ITER_BRANCH)
653 return enable_branch_tracing(tr);
52f232cb
SR
654 return 0;
655}
9f029e83 656static inline void trace_branch_disable(void)
52f232cb
SR
657{
658 /* due to races, always disable */
9f029e83 659 disable_branch_tracing();
52f232cb
SR
660}
661#else
9f029e83 662static inline int trace_branch_enable(struct trace_array *tr)
52f232cb
SR
663{
664 return 0;
665}
9f029e83 666static inline void trace_branch_disable(void)
52f232cb
SR
667{
668}
2ed84eeb 669#endif /* CONFIG_BRANCH_TRACER */
52f232cb 670
1852fcce
SR
671/* set ring buffers to default size if not already done so */
672int tracing_update_buffers(void);
673
fd994989
SR
674/* trace event type bit fields, not numeric */
675enum {
676 TRACE_EVENT_TYPE_PRINTF = 1,
677 TRACE_EVENT_TYPE_RAW = 2,
678};
679
cf027f64
TZ
680struct ftrace_event_field {
681 struct list_head link;
682 char *name;
683 char *type;
aa38e9fc 684 int filter_type;
cf027f64
TZ
685 int offset;
686 int size;
a118e4d1 687 int is_signed;
cf027f64
TZ
688};
689
30e673b2
TZ
690struct event_filter {
691 int n_preds;
692 struct filter_pred **preds;
8b372562 693 char *filter_string;
30e673b2
TZ
694};
695
cfb180f3
TZ
696struct event_subsystem {
697 struct list_head list;
698 const char *name;
699 struct dentry *entry;
1f9963cb 700 struct event_filter *filter;
dc82ec98 701 int nr_events;
cfb180f3
TZ
702};
703
7ce7e424 704struct filter_pred;
1889d209 705struct regex;
7ce7e424 706
8b372562
TZ
707typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event,
708 int val1, int val2);
7ce7e424 709
1889d209
FW
710typedef int (*regex_match_func)(char *str, struct regex *r, int len);
711
3f6fe06d 712enum regex_type {
b0f1a59a 713 MATCH_FULL = 0,
3f6fe06d
FW
714 MATCH_FRONT_ONLY,
715 MATCH_MIDDLE_ONLY,
716 MATCH_END_ONLY,
717};
718
1889d209
FW
719struct regex {
720 char pattern[MAX_FILTER_STR_VAL];
721 int len;
722 int field_len;
723 regex_match_func match;
7ce7e424
TZ
724};
725
7ce7e424 726struct filter_pred {
1889d209
FW
727 filter_pred_fn_t fn;
728 u64 val;
729 struct regex regex;
730 char *field_name;
731 int offset;
732 int not;
733 int op;
734 int pop_n;
7ce7e424
TZ
735};
736
3f6fe06d
FW
737extern enum regex_type
738filter_parse_regex(char *buff, int len, char **search, int *not);
8b372562 739extern void print_event_filter(struct ftrace_event_call *call,
4bda2d51 740 struct trace_seq *s);
8b372562
TZ
741extern int apply_event_filter(struct ftrace_event_call *call,
742 char *filter_string);
743extern int apply_subsystem_event_filter(struct event_subsystem *system,
744 char *filter_string);
745extern void print_subsystem_event_filter(struct event_subsystem *system,
ac1adc55 746 struct trace_seq *s);
aa38e9fc 747extern int filter_assign_type(const char *type);
7ce7e424 748
eb02ce01 749static inline int
e1112b4d 750filter_check_discard(struct ftrace_event_call *call, void *rec,
eb02ce01 751 struct ring_buffer *buffer,
e1112b4d
TZ
752 struct ring_buffer_event *event)
753{
6fb2915d
LZ
754 if (unlikely(call->filter_active) &&
755 !filter_match_preds(call->filter, rec)) {
eb02ce01
TZ
756 ring_buffer_discard_commit(buffer, event);
757 return 1;
758 }
759
760 return 0;
e1112b4d
TZ
761}
762
20c8928a 763extern struct mutex event_mutex;
a59fd602 764extern struct list_head ftrace_events;
ac199db0 765
e9fb2b6d
SR
766extern const char *__start___trace_bprintk_fmt[];
767extern const char *__stop___trace_bprintk_fmt[];
768
4e5292ea
SR
769#undef FTRACE_ENTRY
770#define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
e1112b4d 771 extern struct ftrace_event_call event_##call;
4e5292ea
SR
772#undef FTRACE_ENTRY_DUP
773#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
774 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
775#include "trace_entries.h"
e1112b4d 776
bc0c38d1 777#endif /* _LINUX_KERNEL_TRACE_H */