tracing/branch-tracer: adapt to the stat tracing API
[linux-2.6-block.git] / kernel / trace / trace.h
CommitLineData
bc0c38d1
SR
1#ifndef _LINUX_KERNEL_TRACE_H
2#define _LINUX_KERNEL_TRACE_H
3
4#include <linux/fs.h>
5#include <asm/atomic.h>
6#include <linux/sched.h>
7#include <linux/clocksource.h>
3928a8a2 8#include <linux/ring_buffer.h>
bd8ac686 9#include <linux/mmiotrace.h>
d13744cd 10#include <linux/ftrace.h>
3f5ec136 11#include <trace/boot.h>
bc0c38d1 12
72829bc3
TG
13enum trace_type {
14 __TRACE_FIRST_TYPE = 0,
15
16 TRACE_FN,
17 TRACE_CTX,
18 TRACE_WAKE,
19 TRACE_STACK,
dd0e545f 20 TRACE_PRINT,
72829bc3 21 TRACE_SPECIAL,
bd8ac686
PP
22 TRACE_MMIO_RW,
23 TRACE_MMIO_MAP,
9f029e83 24 TRACE_BRANCH,
74239072
FW
25 TRACE_BOOT_CALL,
26 TRACE_BOOT_RET,
287b6e68
FW
27 TRACE_GRAPH_RET,
28 TRACE_GRAPH_ENT,
02b67518 29 TRACE_USER_STACK,
a93751ca 30 TRACE_HW_BRANCHES,
f3f47a67 31 TRACE_POWER,
72829bc3 32
f0868d1e 33 __TRACE_LAST_TYPE,
72829bc3
TG
34};
35
777e208d
SR
36/*
37 * The trace entry - the most basic unit of tracing. This is what
38 * is printed in the end as a single line in the trace output, such as:
39 *
40 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
41 */
42struct trace_entry {
43 unsigned char type;
44 unsigned char cpu;
45 unsigned char flags;
46 unsigned char preempt_count;
47 int pid;
02b67518 48 int tgid;
777e208d
SR
49};
50
bc0c38d1
SR
51/*
52 * Function trace entry - function address and parent function addres:
53 */
54struct ftrace_entry {
777e208d 55 struct trace_entry ent;
bc0c38d1
SR
56 unsigned long ip;
57 unsigned long parent_ip;
58};
15e6cb36 59
287b6e68
FW
60/* Function call entry */
61struct ftrace_graph_ent_entry {
62 struct trace_entry ent;
63 struct ftrace_graph_ent graph_ent;
64};
65
15e6cb36 66/* Function return entry */
287b6e68
FW
67struct ftrace_graph_ret_entry {
68 struct trace_entry ent;
69 struct ftrace_graph_ret ret;
15e6cb36 70};
d13744cd 71extern struct tracer boot_tracer;
bc0c38d1
SR
72
73/*
74 * Context switch trace entry - which task (and prio) we switched from/to:
75 */
76struct ctx_switch_entry {
777e208d 77 struct trace_entry ent;
bc0c38d1
SR
78 unsigned int prev_pid;
79 unsigned char prev_prio;
80 unsigned char prev_state;
81 unsigned int next_pid;
82 unsigned char next_prio;
bac524d3 83 unsigned char next_state;
80b5e940 84 unsigned int next_cpu;
bc0c38d1
SR
85};
86
f0a920d5
IM
87/*
88 * Special (free-form) trace entry:
89 */
90struct special_entry {
777e208d 91 struct trace_entry ent;
f0a920d5
IM
92 unsigned long arg1;
93 unsigned long arg2;
94 unsigned long arg3;
95};
96
86387f7e
IM
97/*
98 * Stack-trace entry:
99 */
100
74f4e369 101#define FTRACE_STACK_ENTRIES 8
86387f7e
IM
102
103struct stack_entry {
777e208d 104 struct trace_entry ent;
86387f7e
IM
105 unsigned long caller[FTRACE_STACK_ENTRIES];
106};
107
02b67518
TE
108struct userstack_entry {
109 struct trace_entry ent;
110 unsigned long caller[FTRACE_STACK_ENTRIES];
111};
112
dd0e545f
SR
113/*
114 * ftrace_printk entry:
115 */
116struct print_entry {
777e208d 117 struct trace_entry ent;
dd0e545f 118 unsigned long ip;
1fd8f2a3 119 int depth;
dd0e545f
SR
120 char buf[];
121};
122
777e208d
SR
123#define TRACE_OLD_SIZE 88
124
125struct trace_field_cont {
126 unsigned char type;
127 /* Temporary till we get rid of this completely */
128 char buf[TRACE_OLD_SIZE - 1];
129};
130
131struct trace_mmiotrace_rw {
132 struct trace_entry ent;
133 struct mmiotrace_rw rw;
134};
135
136struct trace_mmiotrace_map {
137 struct trace_entry ent;
138 struct mmiotrace_map map;
139};
140
74239072 141struct trace_boot_call {
777e208d 142 struct trace_entry ent;
74239072
FW
143 struct boot_trace_call boot_call;
144};
145
146struct trace_boot_ret {
147 struct trace_entry ent;
148 struct boot_trace_ret boot_ret;
777e208d
SR
149};
150
52f232cb
SR
151#define TRACE_FUNC_SIZE 30
152#define TRACE_FILE_SIZE 20
9f029e83 153struct trace_branch {
52f232cb
SR
154 struct trace_entry ent;
155 unsigned line;
156 char func[TRACE_FUNC_SIZE+1];
157 char file[TRACE_FILE_SIZE+1];
158 char correct;
159};
160
a93751ca 161struct hw_branch_entry {
1e9b51c2 162 struct trace_entry ent;
a93751ca
MM
163 u64 from;
164 u64 to;
1e9b51c2
MM
165};
166
f3f47a67
AV
167struct trace_power {
168 struct trace_entry ent;
169 struct power_trace state_data;
170};
171
fc5e27ae
PP
172/*
173 * trace_flag_type is an enumeration that holds different
174 * states when a trace occurs. These are:
9244489a
SR
175 * IRQS_OFF - interrupts were disabled
176 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
177 * NEED_RESCED - reschedule is requested
178 * HARDIRQ - inside an interrupt handler
179 * SOFTIRQ - inside a softirq handler
fc5e27ae
PP
180 */
181enum trace_flag_type {
182 TRACE_FLAG_IRQS_OFF = 0x01,
9244489a
SR
183 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
184 TRACE_FLAG_NEED_RESCHED = 0x04,
185 TRACE_FLAG_HARDIRQ = 0x08,
186 TRACE_FLAG_SOFTIRQ = 0x10,
fc5e27ae
PP
187};
188
5bf9a1ee 189#define TRACE_BUF_SIZE 1024
bc0c38d1
SR
190
191/*
192 * The CPU trace array - it consists of thousands of trace entries
193 * plus some other descriptor data: (for example which task started
194 * the trace, etc.)
195 */
196struct trace_array_cpu {
bc0c38d1 197 atomic_t disabled;
4e3c3333 198
c7aafc54 199 /* these fields get copied into max-trace: */
c7aafc54 200 unsigned long trace_idx;
53d0aa77 201 unsigned long overrun;
bc0c38d1
SR
202 unsigned long saved_latency;
203 unsigned long critical_start;
204 unsigned long critical_end;
205 unsigned long critical_sequence;
206 unsigned long nice;
207 unsigned long policy;
208 unsigned long rt_priority;
209 cycle_t preempt_timestamp;
210 pid_t pid;
211 uid_t uid;
212 char comm[TASK_COMM_LEN];
213};
214
215struct trace_iterator;
216
217/*
218 * The trace array - an array of per-CPU trace arrays. This is the
219 * highest level data structure that individual tracers deal with.
220 * They have on/off state as well:
221 */
222struct trace_array {
3928a8a2 223 struct ring_buffer *buffer;
bc0c38d1 224 unsigned long entries;
bc0c38d1
SR
225 int cpu;
226 cycle_t time_start;
b3806b43 227 struct task_struct *waiter;
bc0c38d1
SR
228 struct trace_array_cpu *data[NR_CPUS];
229};
230
7104f300
SR
231#define FTRACE_CMP_TYPE(var, type) \
232 __builtin_types_compatible_p(typeof(var), type *)
233
234#undef IF_ASSIGN
235#define IF_ASSIGN(var, entry, etype, id) \
236 if (FTRACE_CMP_TYPE(var, etype)) { \
237 var = (typeof(var))(entry); \
238 WARN_ON(id && (entry)->type != id); \
239 break; \
240 }
241
242/* Will cause compile errors if type is not found. */
243extern void __ftrace_bad_type(void);
244
245/*
246 * The trace_assign_type is a verifier that the entry type is
247 * the same as the type being assigned. To add new types simply
248 * add a line with the following format:
249 *
250 * IF_ASSIGN(var, ent, type, id);
251 *
252 * Where "type" is the trace type that includes the trace_entry
253 * as the "ent" item. And "id" is the trace identifier that is
254 * used in the trace_type enum.
255 *
256 * If the type can have more than one id, then use zero.
257 */
258#define trace_assign_type(var, ent) \
259 do { \
260 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
261 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
7104f300 262 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
02b67518 263 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
7104f300
SR
264 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
265 IF_ASSIGN(var, ent, struct special_entry, 0); \
266 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
267 TRACE_MMIO_RW); \
268 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
269 TRACE_MMIO_MAP); \
74239072
FW
270 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
271 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
9f029e83 272 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
287b6e68
FW
273 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
274 TRACE_GRAPH_ENT); \
275 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
276 TRACE_GRAPH_RET); \
a93751ca 277 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
f3f47a67 278 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
7104f300
SR
279 __ftrace_bad_type(); \
280 } while (0)
2c4f035f
FW
281
282/* Return values for print_line callback */
283enum print_line_t {
284 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
285 TRACE_TYPE_HANDLED = 1,
286 TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */
287};
288
adf9f195
FW
289
290/*
291 * An option specific to a tracer. This is a boolean value.
292 * The bit is the bit index that sets its value on the
293 * flags value in struct tracer_flags.
294 */
295struct tracer_opt {
296 const char *name; /* Will appear on the trace_options file */
297 u32 bit; /* Mask assigned in val field in tracer_flags */
298};
299
300/*
301 * The set of specific options for a tracer. Your tracer
302 * have to set the initial value of the flags val.
303 */
304struct tracer_flags {
305 u32 val;
306 struct tracer_opt *opts;
307};
308
309/* Makes more easy to define a tracer opt */
310#define TRACER_OPT(s, b) .name = #s, .bit = b
311
bc0c38d1
SR
312/*
313 * A specific tracer, represented by methods that operate on a trace array:
314 */
315struct tracer {
316 const char *name;
1c80025a
FW
317 /* Your tracer should raise a warning if init fails */
318 int (*init)(struct trace_array *tr);
bc0c38d1 319 void (*reset)(struct trace_array *tr);
9036990d
SR
320 void (*start)(struct trace_array *tr);
321 void (*stop)(struct trace_array *tr);
bc0c38d1 322 void (*open)(struct trace_iterator *iter);
107bad8b 323 void (*pipe_open)(struct trace_iterator *iter);
bc0c38d1 324 void (*close)(struct trace_iterator *iter);
107bad8b
SR
325 ssize_t (*read)(struct trace_iterator *iter,
326 struct file *filp, char __user *ubuf,
327 size_t cnt, loff_t *ppos);
60a11774
SR
328#ifdef CONFIG_FTRACE_STARTUP_TEST
329 int (*selftest)(struct tracer *trace,
330 struct trace_array *tr);
331#endif
8bba1bf5 332 void (*print_header)(struct seq_file *m);
2c4f035f 333 enum print_line_t (*print_line)(struct trace_iterator *iter);
adf9f195
FW
334 /* If you handled the flag setting, return 0 */
335 int (*set_flag)(u32 old_flags, u32 bit, int set);
bc0c38d1
SR
336 struct tracer *next;
337 int print_max;
adf9f195 338 struct tracer_flags *flags;
dbd0b4b3
FW
339
340 /*
341 * If you change one of the following on tracing runtime, recall
342 * init_tracer_stat()
343 */
344
345 /* Iteration over statistic entries */
346 void *(*stat_start)(void);
347 void *(*stat_next)(void *prev, int idx);
348 /* Compare two entries for sorting (optional) for stats */
349 int (*stat_cmp)(void *p1, void *p2);
350 /* Print a stat entry */
351 int (*stat_show)(struct seq_file *s, void *p);
352 /* Print the headers of your stat entries */
353 int (*stat_headers)(struct seq_file *s);
bc0c38d1
SR
354};
355
214023c3
SR
356struct trace_seq {
357 unsigned char buffer[PAGE_SIZE];
358 unsigned int len;
6c6c2796 359 unsigned int readpos;
214023c3
SR
360};
361
bc0c38d1
SR
362/*
363 * Trace iterator - used by printout routines who present trace
364 * results to users and which routines might sleep, etc:
365 */
366struct trace_iterator {
367 struct trace_array *tr;
368 struct tracer *trace;
107bad8b 369 void *private;
3928a8a2 370 struct ring_buffer_iter *buffer_iter[NR_CPUS];
4e3c3333 371
53d0aa77
SR
372 /* The below is zeroed out in pipe_read */
373 struct trace_seq seq;
bc0c38d1 374 struct trace_entry *ent;
4e3c3333 375 int cpu;
3928a8a2 376 u64 ts;
4e3c3333 377
bc0c38d1
SR
378 unsigned long iter_flags;
379 loff_t pos;
4c11d7ae 380 long idx;
a309720c
SR
381
382 cpumask_t started;
bc0c38d1
SR
383};
384
9036990d 385int tracing_is_enabled(void);
45dcd8b8 386void trace_wake_up(void);
3928a8a2 387void tracing_reset(struct trace_array *tr, int cpu);
213cc060 388void tracing_reset_online_cpus(struct trace_array *tr);
bc0c38d1
SR
389int tracing_open_generic(struct inode *inode, struct file *filp);
390struct dentry *tracing_init_dentry(void);
d618b3e6
IM
391void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
392
45dcd8b8
PP
393struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
394 struct trace_array_cpu *data);
395void tracing_generic_entry_update(struct trace_entry *entry,
38697053
SR
396 unsigned long flags,
397 int pc);
45dcd8b8 398
bc0c38d1
SR
399void ftrace(struct trace_array *tr,
400 struct trace_array_cpu *data,
401 unsigned long ip,
402 unsigned long parent_ip,
38697053 403 unsigned long flags, int pc);
bc0c38d1
SR
404void tracing_sched_switch_trace(struct trace_array *tr,
405 struct trace_array_cpu *data,
406 struct task_struct *prev,
407 struct task_struct *next,
38697053 408 unsigned long flags, int pc);
bc0c38d1 409void tracing_record_cmdline(struct task_struct *tsk);
57422797
IM
410
411void tracing_sched_wakeup_trace(struct trace_array *tr,
412 struct trace_array_cpu *data,
413 struct task_struct *wakee,
414 struct task_struct *cur,
38697053 415 unsigned long flags, int pc);
f0a920d5
IM
416void trace_special(struct trace_array *tr,
417 struct trace_array_cpu *data,
418 unsigned long arg1,
419 unsigned long arg2,
38697053 420 unsigned long arg3, int pc);
6fb44b71
SR
421void trace_function(struct trace_array *tr,
422 struct trace_array_cpu *data,
423 unsigned long ip,
424 unsigned long parent_ip,
38697053 425 unsigned long flags, int pc);
bc0c38d1 426
287b6e68 427void trace_graph_return(struct ftrace_graph_ret *trace);
e49dc19c 428int trace_graph_entry(struct ftrace_graph_ent *trace);
a93751ca 429void trace_hw_branch(struct trace_array *tr, u64 from, u64 to);
1e9b51c2 430
41bc8144
SR
431void tracing_start_cmdline_record(void);
432void tracing_stop_cmdline_record(void);
e168e051
SR
433void tracing_sched_switch_assign_trace(struct trace_array *tr);
434void tracing_stop_sched_switch_record(void);
435void tracing_start_sched_switch_record(void);
bc0c38d1
SR
436int register_tracer(struct tracer *type);
437void unregister_tracer(struct tracer *type);
438
dbd0b4b3
FW
439void init_tracer_stat(struct tracer *trace);
440
bc0c38d1
SR
441extern unsigned long nsecs_to_usecs(unsigned long nsecs);
442
443extern unsigned long tracing_max_latency;
444extern unsigned long tracing_thresh;
445
446void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
447void update_max_tr_single(struct trace_array *tr,
448 struct task_struct *tsk, int cpu);
449
e309b41d 450extern cycle_t ftrace_now(int cpu);
bc0c38d1 451
606576ce 452#ifdef CONFIG_FUNCTION_TRACER
001b6767
SR
453void tracing_start_function_trace(void);
454void tracing_stop_function_trace(void);
455#else
456# define tracing_start_function_trace() do { } while (0)
457# define tracing_stop_function_trace() do { } while (0)
458#endif
459
bc0c38d1
SR
460#ifdef CONFIG_CONTEXT_SWITCH_TRACER
461typedef void
462(*tracer_switch_func_t)(void *private,
5b82a1b0 463 void *__rq,
bc0c38d1
SR
464 struct task_struct *prev,
465 struct task_struct *next);
466
467struct tracer_switch_ops {
468 tracer_switch_func_t func;
469 void *private;
470 struct tracer_switch_ops *next;
471};
472
660c7f9b 473char *trace_find_cmdline(int pid);
bc0c38d1
SR
474#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
475
476#ifdef CONFIG_DYNAMIC_FTRACE
477extern unsigned long ftrace_update_tot_cnt;
d05cdb25
SR
478#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
479extern int DYN_FTRACE_TEST_NAME(void);
bc0c38d1
SR
480#endif
481
60a11774 482#ifdef CONFIG_FTRACE_STARTUP_TEST
60a11774
SR
483extern int trace_selftest_startup_function(struct tracer *trace,
484 struct trace_array *tr);
60a11774
SR
485extern int trace_selftest_startup_irqsoff(struct tracer *trace,
486 struct trace_array *tr);
60a11774
SR
487extern int trace_selftest_startup_preemptoff(struct tracer *trace,
488 struct trace_array *tr);
60a11774
SR
489extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
490 struct trace_array *tr);
60a11774
SR
491extern int trace_selftest_startup_wakeup(struct tracer *trace,
492 struct trace_array *tr);
fb1b6d8b
SN
493extern int trace_selftest_startup_nop(struct tracer *trace,
494 struct trace_array *tr);
60a11774
SR
495extern int trace_selftest_startup_sched_switch(struct tracer *trace,
496 struct trace_array *tr);
a6dd24f8
IM
497extern int trace_selftest_startup_sysprof(struct tracer *trace,
498 struct trace_array *tr);
80e5ea45
SR
499extern int trace_selftest_startup_branch(struct tracer *trace,
500 struct trace_array *tr);
60a11774
SR
501#endif /* CONFIG_FTRACE_STARTUP_TEST */
502
c7aafc54 503extern void *head_page(struct trace_array_cpu *data);
72829bc3 504extern long ns2usecs(cycle_t nsec);
1fd8f2a3
FW
505extern int
506trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args);
c7aafc54 507
4e655519
IM
508extern unsigned long trace_flags;
509
15e6cb36 510/* Standard output formatting function used for function return traces */
fb52607a
FW
511#ifdef CONFIG_FUNCTION_GRAPH_TRACER
512extern enum print_line_t print_graph_function(struct trace_iterator *iter);
ea4e2bc4
SR
513
514#ifdef CONFIG_DYNAMIC_FTRACE
515/* TODO: make this variable */
516#define FTRACE_GRAPH_MAX_FUNCS 32
517extern int ftrace_graph_count;
518extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
519
520static inline int ftrace_graph_addr(unsigned long addr)
521{
522 int i;
523
524 if (!ftrace_graph_count || test_tsk_trace_graph(current))
525 return 1;
526
527 for (i = 0; i < ftrace_graph_count; i++) {
528 if (addr == ftrace_graph_funcs[i])
529 return 1;
530 }
531
532 return 0;
533}
15e6cb36 534#else
ea4e2bc4
SR
535static inline int ftrace_trace_addr(unsigned long addr)
536{
6b253930
IM
537 return 1;
538}
539static inline int ftrace_graph_addr(unsigned long addr)
540{
541 return 1;
ea4e2bc4
SR
542}
543#endif /* CONFIG_DYNAMIC_FTRACE */
544
545#else /* CONFIG_FUNCTION_GRAPH_TRACER */
15e6cb36 546static inline enum print_line_t
fb52607a 547print_graph_function(struct trace_iterator *iter)
15e6cb36
FW
548{
549 return TRACE_TYPE_UNHANDLED;
550}
ea4e2bc4 551#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
15e6cb36 552
978f3a45 553extern struct pid *ftrace_pid_trace;
804a6851
SR
554
555static inline int ftrace_trace_task(struct task_struct *task)
556{
77d683f3 557 if (!ftrace_pid_trace)
804a6851
SR
558 return 1;
559
560 return test_tsk_trace_trace(task);
561}
562
4fcdae83
SR
563/*
564 * trace_iterator_flags is an enumeration that defines bit
565 * positions into trace_flags that controls the output.
566 *
567 * NOTE: These bits must match the trace_options array in
568 * trace.c.
569 */
4e655519
IM
570enum trace_iterator_flags {
571 TRACE_ITER_PRINT_PARENT = 0x01,
572 TRACE_ITER_SYM_OFFSET = 0x02,
573 TRACE_ITER_SYM_ADDR = 0x04,
574 TRACE_ITER_VERBOSE = 0x08,
575 TRACE_ITER_RAW = 0x10,
576 TRACE_ITER_HEX = 0x20,
577 TRACE_ITER_BIN = 0x40,
578 TRACE_ITER_BLOCK = 0x80,
579 TRACE_ITER_STACKTRACE = 0x100,
4ac3ba41 580 TRACE_ITER_SCHED_TREE = 0x200,
f09ce573 581 TRACE_ITER_PRINTK = 0x400,
b2a866f9 582 TRACE_ITER_PREEMPTONLY = 0x800,
9f029e83 583 TRACE_ITER_BRANCH = 0x1000,
12ef7d44 584 TRACE_ITER_ANNOTATE = 0x2000,
b54d3de9 585 TRACE_ITER_USERSTACKTRACE = 0x4000,
66896a85
FW
586 TRACE_ITER_SYM_USEROBJ = 0x8000,
587 TRACE_ITER_PRINTK_MSGONLY = 0x10000
4e655519
IM
588};
589
15e6cb36
FW
590/*
591 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
592 * control the output of kernel symbols.
593 */
594#define TRACE_ITER_SYM_MASK \
595 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
596
43a15386
FW
597extern struct tracer nop_trace;
598
8f0a056f
SR
599/**
600 * ftrace_preempt_disable - disable preemption scheduler safe
601 *
602 * When tracing can happen inside the scheduler, there exists
603 * cases that the tracing might happen before the need_resched
604 * flag is checked. If this happens and the tracer calls
605 * preempt_enable (after a disable), a schedule might take place
606 * causing an infinite recursion.
607 *
608 * To prevent this, we read the need_recshed flag before
609 * disabling preemption. When we want to enable preemption we
610 * check the flag, if it is set, then we call preempt_enable_no_resched.
611 * Otherwise, we call preempt_enable.
612 *
613 * The rational for doing the above is that if need resched is set
614 * and we have yet to reschedule, we are either in an atomic location
615 * (where we do not need to check for scheduling) or we are inside
616 * the scheduler and do not want to resched.
617 */
618static inline int ftrace_preempt_disable(void)
619{
620 int resched;
621
622 resched = need_resched();
623 preempt_disable_notrace();
624
625 return resched;
626}
627
628/**
629 * ftrace_preempt_enable - enable preemption scheduler safe
630 * @resched: the return value from ftrace_preempt_disable
631 *
632 * This is a scheduler safe way to enable preemption and not miss
633 * any preemption checks. The disabled saved the state of preemption.
634 * If resched is set, then we were either inside an atomic or
635 * are inside the scheduler (we would have already scheduled
636 * otherwise). In this case, we do not want to call normal
637 * preempt_enable, but preempt_enable_no_resched instead.
638 */
639static inline void ftrace_preempt_enable(int resched)
640{
641 if (resched)
642 preempt_enable_no_resched_notrace();
643 else
644 preempt_enable_notrace();
645}
646
2ed84eeb 647#ifdef CONFIG_BRANCH_TRACER
9f029e83
SR
648extern int enable_branch_tracing(struct trace_array *tr);
649extern void disable_branch_tracing(void);
650static inline int trace_branch_enable(struct trace_array *tr)
52f232cb 651{
9f029e83
SR
652 if (trace_flags & TRACE_ITER_BRANCH)
653 return enable_branch_tracing(tr);
52f232cb
SR
654 return 0;
655}
9f029e83 656static inline void trace_branch_disable(void)
52f232cb
SR
657{
658 /* due to races, always disable */
9f029e83 659 disable_branch_tracing();
52f232cb
SR
660}
661#else
9f029e83 662static inline int trace_branch_enable(struct trace_array *tr)
52f232cb
SR
663{
664 return 0;
665}
9f029e83 666static inline void trace_branch_disable(void)
52f232cb
SR
667{
668}
2ed84eeb 669#endif /* CONFIG_BRANCH_TRACER */
52f232cb 670
bc0c38d1 671#endif /* _LINUX_KERNEL_TRACE_H */