Commit | Line | Data |
---|---|---|
bc0c38d1 SR |
1 | #ifndef _LINUX_KERNEL_TRACE_H |
2 | #define _LINUX_KERNEL_TRACE_H | |
3 | ||
4 | #include <linux/fs.h> | |
5 | #include <asm/atomic.h> | |
6 | #include <linux/sched.h> | |
7 | #include <linux/clocksource.h> | |
3928a8a2 | 8 | #include <linux/ring_buffer.h> |
bd8ac686 | 9 | #include <linux/mmiotrace.h> |
d13744cd | 10 | #include <linux/ftrace.h> |
3f5ec136 | 11 | #include <trace/boot.h> |
02af61bb | 12 | #include <linux/kmemtrace.h> |
12922110 | 13 | #include <trace/power.h> |
bc0c38d1 | 14 | |
9504504c | 15 | #include <linux/trace_seq.h> |
97f20251 | 16 | #include <linux/ftrace_event.h> |
9504504c | 17 | |
72829bc3 TG |
18 | enum trace_type { |
19 | __TRACE_FIRST_TYPE = 0, | |
20 | ||
21 | TRACE_FN, | |
22 | TRACE_CTX, | |
23 | TRACE_WAKE, | |
24 | TRACE_STACK, | |
dd0e545f | 25 | TRACE_PRINT, |
48ead020 | 26 | TRACE_BPRINT, |
72829bc3 | 27 | TRACE_SPECIAL, |
bd8ac686 PP |
28 | TRACE_MMIO_RW, |
29 | TRACE_MMIO_MAP, | |
9f029e83 | 30 | TRACE_BRANCH, |
74239072 FW |
31 | TRACE_BOOT_CALL, |
32 | TRACE_BOOT_RET, | |
287b6e68 FW |
33 | TRACE_GRAPH_RET, |
34 | TRACE_GRAPH_ENT, | |
02b67518 | 35 | TRACE_USER_STACK, |
a93751ca | 36 | TRACE_HW_BRANCHES, |
ee08c6ec FW |
37 | TRACE_SYSCALL_ENTER, |
38 | TRACE_SYSCALL_EXIT, | |
36994e58 FW |
39 | TRACE_KMEM_ALLOC, |
40 | TRACE_KMEM_FREE, | |
f3f47a67 | 41 | TRACE_POWER, |
c71a8961 | 42 | TRACE_BLK, |
72829bc3 | 43 | |
f0868d1e | 44 | __TRACE_LAST_TYPE, |
72829bc3 TG |
45 | }; |
46 | ||
bc0c38d1 SR |
47 | /* |
48 | * Function trace entry - function address and parent function addres: | |
49 | */ | |
50 | struct ftrace_entry { | |
777e208d | 51 | struct trace_entry ent; |
bc0c38d1 SR |
52 | unsigned long ip; |
53 | unsigned long parent_ip; | |
54 | }; | |
15e6cb36 | 55 | |
287b6e68 FW |
56 | /* Function call entry */ |
57 | struct ftrace_graph_ent_entry { | |
b91facc3 | 58 | struct trace_entry ent; |
287b6e68 FW |
59 | struct ftrace_graph_ent graph_ent; |
60 | }; | |
61 | ||
15e6cb36 | 62 | /* Function return entry */ |
287b6e68 | 63 | struct ftrace_graph_ret_entry { |
b91facc3 | 64 | struct trace_entry ent; |
287b6e68 | 65 | struct ftrace_graph_ret ret; |
15e6cb36 | 66 | }; |
d13744cd | 67 | extern struct tracer boot_tracer; |
bc0c38d1 SR |
68 | |
69 | /* | |
70 | * Context switch trace entry - which task (and prio) we switched from/to: | |
71 | */ | |
72 | struct ctx_switch_entry { | |
777e208d | 73 | struct trace_entry ent; |
bc0c38d1 SR |
74 | unsigned int prev_pid; |
75 | unsigned char prev_prio; | |
76 | unsigned char prev_state; | |
77 | unsigned int next_pid; | |
78 | unsigned char next_prio; | |
bac524d3 | 79 | unsigned char next_state; |
80b5e940 | 80 | unsigned int next_cpu; |
bc0c38d1 SR |
81 | }; |
82 | ||
f0a920d5 IM |
83 | /* |
84 | * Special (free-form) trace entry: | |
85 | */ | |
86 | struct special_entry { | |
777e208d | 87 | struct trace_entry ent; |
f0a920d5 IM |
88 | unsigned long arg1; |
89 | unsigned long arg2; | |
90 | unsigned long arg3; | |
91 | }; | |
92 | ||
86387f7e IM |
93 | /* |
94 | * Stack-trace entry: | |
95 | */ | |
96 | ||
74f4e369 | 97 | #define FTRACE_STACK_ENTRIES 8 |
86387f7e IM |
98 | |
99 | struct stack_entry { | |
777e208d | 100 | struct trace_entry ent; |
86387f7e IM |
101 | unsigned long caller[FTRACE_STACK_ENTRIES]; |
102 | }; | |
103 | ||
02b67518 TE |
104 | struct userstack_entry { |
105 | struct trace_entry ent; | |
106 | unsigned long caller[FTRACE_STACK_ENTRIES]; | |
107 | }; | |
108 | ||
dd0e545f | 109 | /* |
5e1607a0 | 110 | * trace_printk entry: |
dd0e545f | 111 | */ |
48ead020 | 112 | struct bprint_entry { |
777e208d | 113 | struct trace_entry ent; |
9de36825 | 114 | unsigned long ip; |
769b0441 | 115 | const char *fmt; |
9de36825 | 116 | u32 buf[]; |
1427cdf0 | 117 | }; |
1427cdf0 | 118 | |
48ead020 FW |
119 | struct print_entry { |
120 | struct trace_entry ent; | |
121 | unsigned long ip; | |
48ead020 FW |
122 | char buf[]; |
123 | }; | |
124 | ||
777e208d SR |
125 | #define TRACE_OLD_SIZE 88 |
126 | ||
127 | struct trace_field_cont { | |
128 | unsigned char type; | |
129 | /* Temporary till we get rid of this completely */ | |
130 | char buf[TRACE_OLD_SIZE - 1]; | |
131 | }; | |
132 | ||
133 | struct trace_mmiotrace_rw { | |
134 | struct trace_entry ent; | |
135 | struct mmiotrace_rw rw; | |
136 | }; | |
137 | ||
138 | struct trace_mmiotrace_map { | |
139 | struct trace_entry ent; | |
140 | struct mmiotrace_map map; | |
141 | }; | |
142 | ||
74239072 | 143 | struct trace_boot_call { |
777e208d | 144 | struct trace_entry ent; |
74239072 FW |
145 | struct boot_trace_call boot_call; |
146 | }; | |
147 | ||
148 | struct trace_boot_ret { | |
149 | struct trace_entry ent; | |
150 | struct boot_trace_ret boot_ret; | |
777e208d SR |
151 | }; |
152 | ||
52f232cb SR |
153 | #define TRACE_FUNC_SIZE 30 |
154 | #define TRACE_FILE_SIZE 20 | |
9f029e83 | 155 | struct trace_branch { |
52f232cb SR |
156 | struct trace_entry ent; |
157 | unsigned line; | |
158 | char func[TRACE_FUNC_SIZE+1]; | |
159 | char file[TRACE_FILE_SIZE+1]; | |
160 | char correct; | |
161 | }; | |
162 | ||
a93751ca | 163 | struct hw_branch_entry { |
1e9b51c2 | 164 | struct trace_entry ent; |
a93751ca MM |
165 | u64 from; |
166 | u64 to; | |
1e9b51c2 MM |
167 | }; |
168 | ||
f3f47a67 AV |
169 | struct trace_power { |
170 | struct trace_entry ent; | |
171 | struct power_trace state_data; | |
172 | }; | |
173 | ||
ca2b84cb EGM |
174 | enum kmemtrace_type_id { |
175 | KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */ | |
176 | KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */ | |
177 | KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */ | |
178 | }; | |
179 | ||
36994e58 FW |
180 | struct kmemtrace_alloc_entry { |
181 | struct trace_entry ent; | |
182 | enum kmemtrace_type_id type_id; | |
183 | unsigned long call_site; | |
184 | const void *ptr; | |
185 | size_t bytes_req; | |
186 | size_t bytes_alloc; | |
187 | gfp_t gfp_flags; | |
188 | int node; | |
189 | }; | |
190 | ||
191 | struct kmemtrace_free_entry { | |
192 | struct trace_entry ent; | |
193 | enum kmemtrace_type_id type_id; | |
194 | unsigned long call_site; | |
195 | const void *ptr; | |
196 | }; | |
197 | ||
bed1ffca FW |
198 | struct syscall_trace_enter { |
199 | struct trace_entry ent; | |
200 | int nr; | |
201 | unsigned long args[]; | |
202 | }; | |
203 | ||
204 | struct syscall_trace_exit { | |
205 | struct trace_entry ent; | |
206 | int nr; | |
207 | unsigned long ret; | |
208 | }; | |
209 | ||
210 | ||
fc5e27ae PP |
211 | /* |
212 | * trace_flag_type is an enumeration that holds different | |
213 | * states when a trace occurs. These are: | |
9244489a | 214 | * IRQS_OFF - interrupts were disabled |
9de36825 | 215 | * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags |
9244489a SR |
216 | * NEED_RESCED - reschedule is requested |
217 | * HARDIRQ - inside an interrupt handler | |
218 | * SOFTIRQ - inside a softirq handler | |
fc5e27ae PP |
219 | */ |
220 | enum trace_flag_type { | |
221 | TRACE_FLAG_IRQS_OFF = 0x01, | |
9244489a SR |
222 | TRACE_FLAG_IRQS_NOSUPPORT = 0x02, |
223 | TRACE_FLAG_NEED_RESCHED = 0x04, | |
224 | TRACE_FLAG_HARDIRQ = 0x08, | |
225 | TRACE_FLAG_SOFTIRQ = 0x10, | |
fc5e27ae PP |
226 | }; |
227 | ||
5bf9a1ee | 228 | #define TRACE_BUF_SIZE 1024 |
bc0c38d1 SR |
229 | |
230 | /* | |
231 | * The CPU trace array - it consists of thousands of trace entries | |
232 | * plus some other descriptor data: (for example which task started | |
233 | * the trace, etc.) | |
234 | */ | |
235 | struct trace_array_cpu { | |
bc0c38d1 | 236 | atomic_t disabled; |
2cadf913 | 237 | void *buffer_page; /* ring buffer spare */ |
4e3c3333 | 238 | |
c7aafc54 | 239 | /* these fields get copied into max-trace: */ |
c7aafc54 | 240 | unsigned long trace_idx; |
53d0aa77 | 241 | unsigned long overrun; |
bc0c38d1 SR |
242 | unsigned long saved_latency; |
243 | unsigned long critical_start; | |
244 | unsigned long critical_end; | |
245 | unsigned long critical_sequence; | |
246 | unsigned long nice; | |
247 | unsigned long policy; | |
248 | unsigned long rt_priority; | |
249 | cycle_t preempt_timestamp; | |
250 | pid_t pid; | |
251 | uid_t uid; | |
252 | char comm[TASK_COMM_LEN]; | |
253 | }; | |
254 | ||
bc0c38d1 SR |
255 | /* |
256 | * The trace array - an array of per-CPU trace arrays. This is the | |
257 | * highest level data structure that individual tracers deal with. | |
258 | * They have on/off state as well: | |
259 | */ | |
260 | struct trace_array { | |
3928a8a2 | 261 | struct ring_buffer *buffer; |
bc0c38d1 | 262 | unsigned long entries; |
bc0c38d1 SR |
263 | int cpu; |
264 | cycle_t time_start; | |
b3806b43 | 265 | struct task_struct *waiter; |
bc0c38d1 SR |
266 | struct trace_array_cpu *data[NR_CPUS]; |
267 | }; | |
268 | ||
7104f300 SR |
269 | #define FTRACE_CMP_TYPE(var, type) \ |
270 | __builtin_types_compatible_p(typeof(var), type *) | |
271 | ||
272 | #undef IF_ASSIGN | |
273 | #define IF_ASSIGN(var, entry, etype, id) \ | |
274 | if (FTRACE_CMP_TYPE(var, etype)) { \ | |
275 | var = (typeof(var))(entry); \ | |
276 | WARN_ON(id && (entry)->type != id); \ | |
277 | break; \ | |
278 | } | |
279 | ||
280 | /* Will cause compile errors if type is not found. */ | |
281 | extern void __ftrace_bad_type(void); | |
282 | ||
283 | /* | |
284 | * The trace_assign_type is a verifier that the entry type is | |
285 | * the same as the type being assigned. To add new types simply | |
286 | * add a line with the following format: | |
287 | * | |
288 | * IF_ASSIGN(var, ent, type, id); | |
289 | * | |
290 | * Where "type" is the trace type that includes the trace_entry | |
291 | * as the "ent" item. And "id" is the trace identifier that is | |
292 | * used in the trace_type enum. | |
293 | * | |
294 | * If the type can have more than one id, then use zero. | |
295 | */ | |
296 | #define trace_assign_type(var, ent) \ | |
297 | do { \ | |
298 | IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ | |
299 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ | |
7104f300 | 300 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ |
02b67518 | 301 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ |
7104f300 | 302 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
48ead020 | 303 | IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ |
7104f300 SR |
304 | IF_ASSIGN(var, ent, struct special_entry, 0); \ |
305 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ | |
306 | TRACE_MMIO_RW); \ | |
307 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ | |
308 | TRACE_MMIO_MAP); \ | |
74239072 FW |
309 | IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ |
310 | IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ | |
9f029e83 | 311 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ |
287b6e68 FW |
312 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ |
313 | TRACE_GRAPH_ENT); \ | |
314 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ | |
315 | TRACE_GRAPH_RET); \ | |
a93751ca | 316 | IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ |
9de36825 | 317 | IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ |
36994e58 FW |
318 | IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ |
319 | TRACE_KMEM_ALLOC); \ | |
320 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ | |
321 | TRACE_KMEM_FREE); \ | |
bed1ffca FW |
322 | IF_ASSIGN(var, ent, struct syscall_trace_enter, \ |
323 | TRACE_SYSCALL_ENTER); \ | |
324 | IF_ASSIGN(var, ent, struct syscall_trace_exit, \ | |
325 | TRACE_SYSCALL_EXIT); \ | |
7104f300 SR |
326 | __ftrace_bad_type(); \ |
327 | } while (0) | |
2c4f035f | 328 | |
adf9f195 FW |
329 | /* |
330 | * An option specific to a tracer. This is a boolean value. | |
331 | * The bit is the bit index that sets its value on the | |
332 | * flags value in struct tracer_flags. | |
333 | */ | |
334 | struct tracer_opt { | |
9de36825 IM |
335 | const char *name; /* Will appear on the trace_options file */ |
336 | u32 bit; /* Mask assigned in val field in tracer_flags */ | |
adf9f195 FW |
337 | }; |
338 | ||
339 | /* | |
340 | * The set of specific options for a tracer. Your tracer | |
341 | * have to set the initial value of the flags val. | |
342 | */ | |
343 | struct tracer_flags { | |
344 | u32 val; | |
9de36825 | 345 | struct tracer_opt *opts; |
adf9f195 FW |
346 | }; |
347 | ||
348 | /* Makes more easy to define a tracer opt */ | |
349 | #define TRACER_OPT(s, b) .name = #s, .bit = b | |
350 | ||
034939b6 | 351 | |
6eaaa5d5 FW |
352 | /** |
353 | * struct tracer - a specific tracer and its callbacks to interact with debugfs | |
354 | * @name: the name chosen to select it on the available_tracers file | |
355 | * @init: called when one switches to this tracer (echo name > current_tracer) | |
356 | * @reset: called when one switches to another tracer | |
357 | * @start: called when tracing is unpaused (echo 1 > tracing_enabled) | |
358 | * @stop: called when tracing is paused (echo 0 > tracing_enabled) | |
359 | * @open: called when the trace file is opened | |
360 | * @pipe_open: called when the trace_pipe file is opened | |
361 | * @wait_pipe: override how the user waits for traces on trace_pipe | |
362 | * @close: called when the trace file is released | |
363 | * @read: override the default read callback on trace_pipe | |
364 | * @splice_read: override the default splice_read callback on trace_pipe | |
365 | * @selftest: selftest to run on boot (see trace_selftest.c) | |
366 | * @print_headers: override the first lines that describe your columns | |
367 | * @print_line: callback that prints a trace | |
368 | * @set_flag: signals one of your private flags changed (trace_options file) | |
369 | * @flags: your private flags | |
bc0c38d1 SR |
370 | */ |
371 | struct tracer { | |
372 | const char *name; | |
1c80025a | 373 | int (*init)(struct trace_array *tr); |
bc0c38d1 | 374 | void (*reset)(struct trace_array *tr); |
9036990d SR |
375 | void (*start)(struct trace_array *tr); |
376 | void (*stop)(struct trace_array *tr); | |
bc0c38d1 | 377 | void (*open)(struct trace_iterator *iter); |
107bad8b | 378 | void (*pipe_open)(struct trace_iterator *iter); |
6eaaa5d5 | 379 | void (*wait_pipe)(struct trace_iterator *iter); |
bc0c38d1 | 380 | void (*close)(struct trace_iterator *iter); |
107bad8b SR |
381 | ssize_t (*read)(struct trace_iterator *iter, |
382 | struct file *filp, char __user *ubuf, | |
383 | size_t cnt, loff_t *ppos); | |
3c56819b EGM |
384 | ssize_t (*splice_read)(struct trace_iterator *iter, |
385 | struct file *filp, | |
386 | loff_t *ppos, | |
387 | struct pipe_inode_info *pipe, | |
388 | size_t len, | |
389 | unsigned int flags); | |
60a11774 SR |
390 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
391 | int (*selftest)(struct tracer *trace, | |
392 | struct trace_array *tr); | |
393 | #endif | |
8bba1bf5 | 394 | void (*print_header)(struct seq_file *m); |
2c4f035f | 395 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
adf9f195 FW |
396 | /* If you handled the flag setting, return 0 */ |
397 | int (*set_flag)(u32 old_flags, u32 bit, int set); | |
bc0c38d1 SR |
398 | struct tracer *next; |
399 | int print_max; | |
9de36825 | 400 | struct tracer_flags *flags; |
034939b6 | 401 | struct tracer_stat *stats; |
bc0c38d1 SR |
402 | }; |
403 | ||
f9520750 | 404 | |
b04cc6b1 FW |
405 | #define TRACE_PIPE_ALL_CPU -1 |
406 | ||
b6f11df2 | 407 | int tracer_init(struct tracer *t, struct trace_array *tr); |
9036990d | 408 | int tracing_is_enabled(void); |
45dcd8b8 | 409 | void trace_wake_up(void); |
3928a8a2 | 410 | void tracing_reset(struct trace_array *tr, int cpu); |
213cc060 | 411 | void tracing_reset_online_cpus(struct trace_array *tr); |
9456f0fa SR |
412 | void tracing_reset_current(int cpu); |
413 | void tracing_reset_current_online_cpus(void); | |
bc0c38d1 | 414 | int tracing_open_generic(struct inode *inode, struct file *filp); |
5452af66 FW |
415 | struct dentry *trace_create_file(const char *name, |
416 | mode_t mode, | |
417 | struct dentry *parent, | |
418 | void *data, | |
419 | const struct file_operations *fops); | |
420 | ||
bc0c38d1 | 421 | struct dentry *tracing_init_dentry(void); |
d618b3e6 IM |
422 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); |
423 | ||
51a763dd ACM |
424 | struct ring_buffer_event; |
425 | ||
426 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | |
7a4f453b | 427 | int type, |
51a763dd ACM |
428 | unsigned long len, |
429 | unsigned long flags, | |
430 | int pc); | |
431 | void trace_buffer_unlock_commit(struct trace_array *tr, | |
432 | struct ring_buffer_event *event, | |
433 | unsigned long flags, int pc); | |
434 | ||
45dcd8b8 PP |
435 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
436 | struct trace_array_cpu *data); | |
c4a8e8be FW |
437 | |
438 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | |
439 | int *ent_cpu, u64 *ent_ts); | |
440 | ||
45dcd8b8 | 441 | void tracing_generic_entry_update(struct trace_entry *entry, |
38697053 SR |
442 | unsigned long flags, |
443 | int pc); | |
45dcd8b8 | 444 | |
6eaaa5d5 FW |
445 | void default_wait_pipe(struct trace_iterator *iter); |
446 | void poll_wait_pipe(struct trace_iterator *iter); | |
447 | ||
bc0c38d1 SR |
448 | void ftrace(struct trace_array *tr, |
449 | struct trace_array_cpu *data, | |
450 | unsigned long ip, | |
451 | unsigned long parent_ip, | |
38697053 | 452 | unsigned long flags, int pc); |
bc0c38d1 | 453 | void tracing_sched_switch_trace(struct trace_array *tr, |
bc0c38d1 SR |
454 | struct task_struct *prev, |
455 | struct task_struct *next, | |
38697053 | 456 | unsigned long flags, int pc); |
57422797 IM |
457 | |
458 | void tracing_sched_wakeup_trace(struct trace_array *tr, | |
57422797 IM |
459 | struct task_struct *wakee, |
460 | struct task_struct *cur, | |
38697053 | 461 | unsigned long flags, int pc); |
f0a920d5 IM |
462 | void trace_special(struct trace_array *tr, |
463 | struct trace_array_cpu *data, | |
464 | unsigned long arg1, | |
465 | unsigned long arg2, | |
38697053 | 466 | unsigned long arg3, int pc); |
6fb44b71 | 467 | void trace_function(struct trace_array *tr, |
6fb44b71 SR |
468 | unsigned long ip, |
469 | unsigned long parent_ip, | |
38697053 | 470 | unsigned long flags, int pc); |
bc0c38d1 | 471 | |
287b6e68 | 472 | void trace_graph_return(struct ftrace_graph_ret *trace); |
e49dc19c | 473 | int trace_graph_entry(struct ftrace_graph_ent *trace); |
1e9b51c2 | 474 | |
41bc8144 SR |
475 | void tracing_start_cmdline_record(void); |
476 | void tracing_stop_cmdline_record(void); | |
e168e051 SR |
477 | void tracing_sched_switch_assign_trace(struct trace_array *tr); |
478 | void tracing_stop_sched_switch_record(void); | |
479 | void tracing_start_sched_switch_record(void); | |
bc0c38d1 SR |
480 | int register_tracer(struct tracer *type); |
481 | void unregister_tracer(struct tracer *type); | |
482 | ||
483 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); | |
484 | ||
485 | extern unsigned long tracing_max_latency; | |
486 | extern unsigned long tracing_thresh; | |
487 | ||
488 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); | |
489 | void update_max_tr_single(struct trace_array *tr, | |
490 | struct task_struct *tsk, int cpu); | |
491 | ||
c0a0d0d3 FW |
492 | #ifdef CONFIG_STACKTRACE |
493 | void ftrace_trace_stack(struct trace_array *tr, unsigned long flags, | |
494 | int skip, int pc); | |
495 | ||
496 | void ftrace_trace_userstack(struct trace_array *tr, unsigned long flags, | |
497 | int pc); | |
498 | ||
499 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | |
500 | int pc); | |
501 | #else | |
502 | static inline void ftrace_trace_stack(struct trace_array *tr, | |
503 | unsigned long flags, int skip, int pc) | |
504 | { | |
505 | } | |
506 | ||
507 | static inline void ftrace_trace_userstack(struct trace_array *tr, | |
508 | unsigned long flags, int pc) | |
509 | { | |
510 | } | |
511 | ||
512 | static inline void __trace_stack(struct trace_array *tr, unsigned long flags, | |
513 | int skip, int pc) | |
514 | { | |
515 | } | |
516 | #endif /* CONFIG_STACKTRACE */ | |
53614991 | 517 | |
e309b41d | 518 | extern cycle_t ftrace_now(int cpu); |
bc0c38d1 | 519 | |
bc0c38d1 SR |
520 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER |
521 | typedef void | |
522 | (*tracer_switch_func_t)(void *private, | |
5b82a1b0 | 523 | void *__rq, |
bc0c38d1 SR |
524 | struct task_struct *prev, |
525 | struct task_struct *next); | |
526 | ||
527 | struct tracer_switch_ops { | |
528 | tracer_switch_func_t func; | |
529 | void *private; | |
530 | struct tracer_switch_ops *next; | |
531 | }; | |
bc0c38d1 SR |
532 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ |
533 | ||
4ca53085 | 534 | extern void trace_find_cmdline(int pid, char comm[]); |
f7d48cbd | 535 | |
bc0c38d1 SR |
536 | #ifdef CONFIG_DYNAMIC_FTRACE |
537 | extern unsigned long ftrace_update_tot_cnt; | |
d05cdb25 SR |
538 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
539 | extern int DYN_FTRACE_TEST_NAME(void); | |
bc0c38d1 SR |
540 | #endif |
541 | ||
020e5f85 LZ |
542 | extern int ring_buffer_expanded; |
543 | extern bool tracing_selftest_disabled; | |
5e5bf483 | 544 | DECLARE_PER_CPU(local_t, ftrace_cpu_disabled); |
020e5f85 | 545 | |
60a11774 | 546 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
60a11774 SR |
547 | extern int trace_selftest_startup_function(struct tracer *trace, |
548 | struct trace_array *tr); | |
7447dce9 FW |
549 | extern int trace_selftest_startup_function_graph(struct tracer *trace, |
550 | struct trace_array *tr); | |
60a11774 SR |
551 | extern int trace_selftest_startup_irqsoff(struct tracer *trace, |
552 | struct trace_array *tr); | |
60a11774 SR |
553 | extern int trace_selftest_startup_preemptoff(struct tracer *trace, |
554 | struct trace_array *tr); | |
60a11774 SR |
555 | extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, |
556 | struct trace_array *tr); | |
60a11774 SR |
557 | extern int trace_selftest_startup_wakeup(struct tracer *trace, |
558 | struct trace_array *tr); | |
fb1b6d8b SN |
559 | extern int trace_selftest_startup_nop(struct tracer *trace, |
560 | struct trace_array *tr); | |
60a11774 SR |
561 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, |
562 | struct trace_array *tr); | |
a6dd24f8 IM |
563 | extern int trace_selftest_startup_sysprof(struct tracer *trace, |
564 | struct trace_array *tr); | |
80e5ea45 SR |
565 | extern int trace_selftest_startup_branch(struct tracer *trace, |
566 | struct trace_array *tr); | |
321bb5e1 MM |
567 | extern int trace_selftest_startup_hw_branches(struct tracer *trace, |
568 | struct trace_array *tr); | |
60a11774 SR |
569 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
570 | ||
c7aafc54 | 571 | extern void *head_page(struct trace_array_cpu *data); |
cf8e3474 | 572 | extern unsigned long long ns2usecs(cycle_t nsec); |
1fd8f2a3 | 573 | extern int |
40ce74f1 | 574 | trace_vbprintk(unsigned long ip, const char *fmt, va_list args); |
48ead020 | 575 | extern int |
40ce74f1 | 576 | trace_vprintk(unsigned long ip, const char *fmt, va_list args); |
c7aafc54 | 577 | |
4e655519 IM |
578 | extern unsigned long trace_flags; |
579 | ||
15e6cb36 | 580 | /* Standard output formatting function used for function return traces */ |
fb52607a FW |
581 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
582 | extern enum print_line_t print_graph_function(struct trace_iterator *iter); | |
0706f1c4 SR |
583 | extern enum print_line_t |
584 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); | |
ea4e2bc4 SR |
585 | |
586 | #ifdef CONFIG_DYNAMIC_FTRACE | |
587 | /* TODO: make this variable */ | |
588 | #define FTRACE_GRAPH_MAX_FUNCS 32 | |
589 | extern int ftrace_graph_count; | |
590 | extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; | |
591 | ||
592 | static inline int ftrace_graph_addr(unsigned long addr) | |
593 | { | |
594 | int i; | |
595 | ||
596 | if (!ftrace_graph_count || test_tsk_trace_graph(current)) | |
597 | return 1; | |
598 | ||
599 | for (i = 0; i < ftrace_graph_count; i++) { | |
600 | if (addr == ftrace_graph_funcs[i]) | |
601 | return 1; | |
602 | } | |
603 | ||
604 | return 0; | |
605 | } | |
15e6cb36 | 606 | #else |
ea4e2bc4 SR |
607 | static inline int ftrace_trace_addr(unsigned long addr) |
608 | { | |
6b253930 IM |
609 | return 1; |
610 | } | |
611 | static inline int ftrace_graph_addr(unsigned long addr) | |
612 | { | |
613 | return 1; | |
ea4e2bc4 SR |
614 | } |
615 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
ea4e2bc4 | 616 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ |
15e6cb36 | 617 | static inline enum print_line_t |
fb52607a | 618 | print_graph_function(struct trace_iterator *iter) |
15e6cb36 FW |
619 | { |
620 | return TRACE_TYPE_UNHANDLED; | |
621 | } | |
ea4e2bc4 | 622 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
15e6cb36 | 623 | |
978f3a45 | 624 | extern struct pid *ftrace_pid_trace; |
804a6851 | 625 | |
1155de47 | 626 | #ifdef CONFIG_FUNCTION_TRACER |
804a6851 SR |
627 | static inline int ftrace_trace_task(struct task_struct *task) |
628 | { | |
77d683f3 | 629 | if (!ftrace_pid_trace) |
804a6851 SR |
630 | return 1; |
631 | ||
632 | return test_tsk_trace_trace(task); | |
633 | } | |
1155de47 PM |
634 | #else |
635 | static inline int ftrace_trace_task(struct task_struct *task) | |
636 | { | |
637 | return 1; | |
638 | } | |
639 | #endif | |
804a6851 | 640 | |
4fcdae83 SR |
641 | /* |
642 | * trace_iterator_flags is an enumeration that defines bit | |
643 | * positions into trace_flags that controls the output. | |
644 | * | |
645 | * NOTE: These bits must match the trace_options array in | |
646 | * trace.c. | |
647 | */ | |
4e655519 IM |
648 | enum trace_iterator_flags { |
649 | TRACE_ITER_PRINT_PARENT = 0x01, | |
650 | TRACE_ITER_SYM_OFFSET = 0x02, | |
651 | TRACE_ITER_SYM_ADDR = 0x04, | |
652 | TRACE_ITER_VERBOSE = 0x08, | |
653 | TRACE_ITER_RAW = 0x10, | |
654 | TRACE_ITER_HEX = 0x20, | |
655 | TRACE_ITER_BIN = 0x40, | |
656 | TRACE_ITER_BLOCK = 0x80, | |
657 | TRACE_ITER_STACKTRACE = 0x100, | |
4ac3ba41 | 658 | TRACE_ITER_SCHED_TREE = 0x200, |
f09ce573 | 659 | TRACE_ITER_PRINTK = 0x400, |
b2a866f9 | 660 | TRACE_ITER_PREEMPTONLY = 0x800, |
9f029e83 | 661 | TRACE_ITER_BRANCH = 0x1000, |
12ef7d44 | 662 | TRACE_ITER_ANNOTATE = 0x2000, |
b54d3de9 | 663 | TRACE_ITER_USERSTACKTRACE = 0x4000, |
66896a85 | 664 | TRACE_ITER_SYM_USEROBJ = 0x8000, |
c4a8e8be | 665 | TRACE_ITER_PRINTK_MSGONLY = 0x10000, |
c032ef64 SR |
666 | TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */ |
667 | TRACE_ITER_LATENCY_FMT = 0x40000, | |
af4617bd | 668 | TRACE_ITER_GLOBAL_CLK = 0x80000, |
be6f164a | 669 | TRACE_ITER_SLEEP_TIME = 0x100000, |
a2a16d6a | 670 | TRACE_ITER_GRAPH_TIME = 0x200000, |
4e655519 IM |
671 | }; |
672 | ||
15e6cb36 FW |
673 | /* |
674 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that | |
675 | * control the output of kernel symbols. | |
676 | */ | |
677 | #define TRACE_ITER_SYM_MASK \ | |
678 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) | |
679 | ||
43a15386 FW |
680 | extern struct tracer nop_trace; |
681 | ||
8f0a056f SR |
682 | /** |
683 | * ftrace_preempt_disable - disable preemption scheduler safe | |
684 | * | |
685 | * When tracing can happen inside the scheduler, there exists | |
686 | * cases that the tracing might happen before the need_resched | |
687 | * flag is checked. If this happens and the tracer calls | |
688 | * preempt_enable (after a disable), a schedule might take place | |
689 | * causing an infinite recursion. | |
690 | * | |
57794a9d | 691 | * To prevent this, we read the need_resched flag before |
8f0a056f SR |
692 | * disabling preemption. When we want to enable preemption we |
693 | * check the flag, if it is set, then we call preempt_enable_no_resched. | |
694 | * Otherwise, we call preempt_enable. | |
695 | * | |
57794a9d | 696 | * The rational for doing the above is that if need_resched is set |
8f0a056f SR |
697 | * and we have yet to reschedule, we are either in an atomic location |
698 | * (where we do not need to check for scheduling) or we are inside | |
699 | * the scheduler and do not want to resched. | |
700 | */ | |
701 | static inline int ftrace_preempt_disable(void) | |
702 | { | |
703 | int resched; | |
704 | ||
705 | resched = need_resched(); | |
706 | preempt_disable_notrace(); | |
707 | ||
708 | return resched; | |
709 | } | |
710 | ||
711 | /** | |
712 | * ftrace_preempt_enable - enable preemption scheduler safe | |
713 | * @resched: the return value from ftrace_preempt_disable | |
714 | * | |
715 | * This is a scheduler safe way to enable preemption and not miss | |
716 | * any preemption checks. The disabled saved the state of preemption. | |
57794a9d | 717 | * If resched is set, then we are either inside an atomic or |
8f0a056f SR |
718 | * are inside the scheduler (we would have already scheduled |
719 | * otherwise). In this case, we do not want to call normal | |
720 | * preempt_enable, but preempt_enable_no_resched instead. | |
721 | */ | |
722 | static inline void ftrace_preempt_enable(int resched) | |
723 | { | |
724 | if (resched) | |
725 | preempt_enable_no_resched_notrace(); | |
726 | else | |
727 | preempt_enable_notrace(); | |
728 | } | |
729 | ||
2ed84eeb | 730 | #ifdef CONFIG_BRANCH_TRACER |
9f029e83 SR |
731 | extern int enable_branch_tracing(struct trace_array *tr); |
732 | extern void disable_branch_tracing(void); | |
733 | static inline int trace_branch_enable(struct trace_array *tr) | |
52f232cb | 734 | { |
9f029e83 SR |
735 | if (trace_flags & TRACE_ITER_BRANCH) |
736 | return enable_branch_tracing(tr); | |
52f232cb SR |
737 | return 0; |
738 | } | |
9f029e83 | 739 | static inline void trace_branch_disable(void) |
52f232cb SR |
740 | { |
741 | /* due to races, always disable */ | |
9f029e83 | 742 | disable_branch_tracing(); |
52f232cb SR |
743 | } |
744 | #else | |
9f029e83 | 745 | static inline int trace_branch_enable(struct trace_array *tr) |
52f232cb SR |
746 | { |
747 | return 0; | |
748 | } | |
9f029e83 | 749 | static inline void trace_branch_disable(void) |
52f232cb SR |
750 | { |
751 | } | |
2ed84eeb | 752 | #endif /* CONFIG_BRANCH_TRACER */ |
52f232cb | 753 | |
1852fcce SR |
754 | /* set ring buffers to default size if not already done so */ |
755 | int tracing_update_buffers(void); | |
756 | ||
fd994989 SR |
757 | /* trace event type bit fields, not numeric */ |
758 | enum { | |
759 | TRACE_EVENT_TYPE_PRINTF = 1, | |
760 | TRACE_EVENT_TYPE_RAW = 2, | |
761 | }; | |
762 | ||
cf027f64 TZ |
763 | struct ftrace_event_field { |
764 | struct list_head link; | |
765 | char *name; | |
766 | char *type; | |
767 | int offset; | |
768 | int size; | |
a118e4d1 | 769 | int is_signed; |
cf027f64 TZ |
770 | }; |
771 | ||
30e673b2 TZ |
772 | struct event_filter { |
773 | int n_preds; | |
774 | struct filter_pred **preds; | |
8b372562 | 775 | char *filter_string; |
1f9963cb | 776 | bool no_reset; |
30e673b2 TZ |
777 | }; |
778 | ||
cfb180f3 TZ |
779 | struct event_subsystem { |
780 | struct list_head list; | |
781 | const char *name; | |
782 | struct dentry *entry; | |
1f9963cb | 783 | struct event_filter *filter; |
dc82ec98 | 784 | int nr_events; |
cfb180f3 TZ |
785 | }; |
786 | ||
7ce7e424 TZ |
787 | struct filter_pred; |
788 | ||
8b372562 TZ |
789 | typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event, |
790 | int val1, int val2); | |
7ce7e424 TZ |
791 | |
792 | struct filter_pred { | |
793 | filter_pred_fn_t fn; | |
794 | u64 val; | |
0a19e53c | 795 | char str_val[MAX_FILTER_STR_VAL]; |
7ce7e424 TZ |
796 | int str_len; |
797 | char *field_name; | |
798 | int offset; | |
799 | int not; | |
8b372562 TZ |
800 | int op; |
801 | int pop_n; | |
7ce7e424 TZ |
802 | }; |
803 | ||
8b372562 | 804 | extern void print_event_filter(struct ftrace_event_call *call, |
4bda2d51 | 805 | struct trace_seq *s); |
8b372562 TZ |
806 | extern int apply_event_filter(struct ftrace_event_call *call, |
807 | char *filter_string); | |
808 | extern int apply_subsystem_event_filter(struct event_subsystem *system, | |
809 | char *filter_string); | |
810 | extern void print_subsystem_event_filter(struct event_subsystem *system, | |
ac1adc55 | 811 | struct trace_seq *s); |
7ce7e424 | 812 | |
eb02ce01 | 813 | static inline int |
e1112b4d | 814 | filter_check_discard(struct ftrace_event_call *call, void *rec, |
eb02ce01 | 815 | struct ring_buffer *buffer, |
e1112b4d TZ |
816 | struct ring_buffer_event *event) |
817 | { | |
30e673b2 | 818 | if (unlikely(call->filter_active) && !filter_match_preds(call, rec)) { |
eb02ce01 TZ |
819 | ring_buffer_discard_commit(buffer, event); |
820 | return 1; | |
821 | } | |
822 | ||
823 | return 0; | |
e1112b4d TZ |
824 | } |
825 | ||
8b372562 TZ |
826 | #define DEFINE_COMPARISON_PRED(type) \ |
827 | static int filter_pred_##type(struct filter_pred *pred, void *event, \ | |
828 | int val1, int val2) \ | |
829 | { \ | |
830 | type *addr = (type *)(event + pred->offset); \ | |
831 | type val = (type)pred->val; \ | |
832 | int match = 0; \ | |
833 | \ | |
834 | switch (pred->op) { \ | |
835 | case OP_LT: \ | |
836 | match = (*addr < val); \ | |
837 | break; \ | |
838 | case OP_LE: \ | |
839 | match = (*addr <= val); \ | |
840 | break; \ | |
841 | case OP_GT: \ | |
842 | match = (*addr > val); \ | |
843 | break; \ | |
844 | case OP_GE: \ | |
845 | match = (*addr >= val); \ | |
846 | break; \ | |
847 | default: \ | |
848 | break; \ | |
849 | } \ | |
850 | \ | |
851 | return match; \ | |
852 | } | |
853 | ||
854 | #define DEFINE_EQUALITY_PRED(size) \ | |
855 | static int filter_pred_##size(struct filter_pred *pred, void *event, \ | |
856 | int val1, int val2) \ | |
857 | { \ | |
858 | u##size *addr = (u##size *)(event + pred->offset); \ | |
859 | u##size val = (u##size)pred->val; \ | |
860 | int match; \ | |
861 | \ | |
862 | match = (val == *addr) ^ pred->not; \ | |
863 | \ | |
864 | return match; \ | |
865 | } | |
866 | ||
20c8928a | 867 | extern struct mutex event_mutex; |
a59fd602 | 868 | extern struct list_head ftrace_events; |
ac199db0 | 869 | |
e9fb2b6d SR |
870 | extern const char *__start___trace_bprintk_fmt[]; |
871 | extern const char *__stop___trace_bprintk_fmt[]; | |
872 | ||
e1112b4d TZ |
873 | #undef TRACE_EVENT_FORMAT |
874 | #define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ | |
875 | extern struct ftrace_event_call event_##call; | |
e45f2e2b TZ |
876 | #undef TRACE_EVENT_FORMAT_NOFILTER |
877 | #define TRACE_EVENT_FORMAT_NOFILTER(call, proto, args, fmt, tstruct, tpfmt) | |
e1112b4d TZ |
878 | #include "trace_event_types.h" |
879 | ||
bc0c38d1 | 880 | #endif /* _LINUX_KERNEL_TRACE_H */ |