Commit | Line | Data |
---|---|---|
bb730b58 | 1 | // SPDX-License-Identifier: GPL-2.0 |
bac5fb97 | 2 | |
bc0c38d1 SR |
3 | #ifndef _LINUX_KERNEL_TRACE_H |
4 | #define _LINUX_KERNEL_TRACE_H | |
5 | ||
6 | #include <linux/fs.h> | |
60063497 | 7 | #include <linux/atomic.h> |
bc0c38d1 SR |
8 | #include <linux/sched.h> |
9 | #include <linux/clocksource.h> | |
3928a8a2 | 10 | #include <linux/ring_buffer.h> |
bd8ac686 | 11 | #include <linux/mmiotrace.h> |
4e5292ea | 12 | #include <linux/tracepoint.h> |
d13744cd | 13 | #include <linux/ftrace.h> |
2d6425af | 14 | #include <linux/trace.h> |
24f1e32c | 15 | #include <linux/hw_breakpoint.h> |
9504504c | 16 | #include <linux/trace_seq.h> |
af658dca | 17 | #include <linux/trace_events.h> |
52f5684c | 18 | #include <linux/compiler.h> |
60f1d5e3 | 19 | #include <linux/glob.h> |
91edde2e VRB |
20 | #include <linux/irq_work.h> |
21 | #include <linux/workqueue.h> | |
42d120e2 | 22 | #include <linux/ctype.h> |
a358f406 | 23 | #include <linux/once_lite.h> |
9504504c | 24 | |
6954e415 SRV |
25 | #include "pid_list.h" |
26 | ||
12ab74ee | 27 | #ifdef CONFIG_FTRACE_SYSCALLS |
d6e59579 | 28 | #include <asm/unistd.h> /* For NR_syscalls */ |
12ab74ee SR |
29 | #include <asm/syscall.h> /* some archs define it here */ |
30 | #endif | |
31 | ||
21ccc9cd SRV |
32 | #define TRACE_MODE_WRITE 0640 |
33 | #define TRACE_MODE_READ 0440 | |
34 | ||
72829bc3 TG |
35 | enum trace_type { |
36 | __TRACE_FIRST_TYPE = 0, | |
37 | ||
38 | TRACE_FN, | |
39 | TRACE_CTX, | |
40 | TRACE_WAKE, | |
41 | TRACE_STACK, | |
dd0e545f | 42 | TRACE_PRINT, |
48ead020 | 43 | TRACE_BPRINT, |
bd8ac686 PP |
44 | TRACE_MMIO_RW, |
45 | TRACE_MMIO_MAP, | |
9f029e83 | 46 | TRACE_BRANCH, |
287b6e68 FW |
47 | TRACE_GRAPH_RET, |
48 | TRACE_GRAPH_ENT, | |
02b67518 | 49 | TRACE_USER_STACK, |
c71a8961 | 50 | TRACE_BLK, |
09ae7234 | 51 | TRACE_BPUTS, |
e7c15cd8 | 52 | TRACE_HWLAT, |
bce29ac9 | 53 | TRACE_OSNOISE, |
a955d7ea | 54 | TRACE_TIMERLAT, |
fa32e855 | 55 | TRACE_RAW_DATA, |
f689e4f2 | 56 | TRACE_FUNC_REPEATS, |
72829bc3 | 57 | |
f0868d1e | 58 | __TRACE_LAST_TYPE, |
72829bc3 TG |
59 | }; |
60 | ||
bc0c38d1 | 61 | |
0a1c49db SR |
62 | #undef __field |
63 | #define __field(type, item) type item; | |
86387f7e | 64 | |
04ae87a5 PZ |
65 | #undef __field_fn |
66 | #define __field_fn(type, item) type item; | |
67 | ||
d7315094 SR |
68 | #undef __field_struct |
69 | #define __field_struct(type, item) __field(type, item) | |
86387f7e | 70 | |
d7315094 SR |
71 | #undef __field_desc |
72 | #define __field_desc(type, container, item) | |
02b67518 | 73 | |
4649079b SRV |
74 | #undef __field_packed |
75 | #define __field_packed(type, container, item) | |
76 | ||
0a1c49db SR |
77 | #undef __array |
78 | #define __array(type, item, size) type item[size]; | |
1427cdf0 | 79 | |
e7186af7 SRG |
80 | /* |
81 | * For backward compatibility, older user space expects to see the | |
82 | * kernel_stack event with a fixed size caller field. But today the fix | |
83 | * size is ignored by the kernel, and the real structure is dynamic. | |
84 | * Expose to user space: "unsigned long caller[8];" but the real structure | |
85 | * will be "unsigned long caller[] __counted_by(size)" | |
86 | */ | |
87 | #undef __stack_array | |
88 | #define __stack_array(type, item, size, field) type item[] __counted_by(field); | |
89 | ||
d7315094 SR |
90 | #undef __array_desc |
91 | #define __array_desc(type, container, item, size) | |
777e208d | 92 | |
0a1c49db SR |
93 | #undef __dynamic_array |
94 | #define __dynamic_array(type, item) type item[]; | |
777e208d | 95 | |
55de2c0b MH |
96 | #undef __rel_dynamic_array |
97 | #define __rel_dynamic_array(type, item) type item[]; | |
98 | ||
0a1c49db SR |
99 | #undef F_STRUCT |
100 | #define F_STRUCT(args...) args | |
74239072 | 101 | |
0a1c49db | 102 | #undef FTRACE_ENTRY |
04ae87a5 | 103 | #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ |
02aa3162 JO |
104 | struct struct_name { \ |
105 | struct trace_entry ent; \ | |
106 | tstruct \ | |
0a1c49db | 107 | } |
777e208d | 108 | |
0a1c49db | 109 | #undef FTRACE_ENTRY_DUP |
04ae87a5 | 110 | #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk) |
1e9b51c2 | 111 | |
e59a0bff | 112 | #undef FTRACE_ENTRY_REG |
04ae87a5 PZ |
113 | #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \ |
114 | FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) | |
e59a0bff | 115 | |
a4a551b8 | 116 | #undef FTRACE_ENTRY_PACKED |
04ae87a5 PZ |
117 | #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \ |
118 | FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed | |
a4a551b8 | 119 | |
0a1c49db | 120 | #include "trace_entries.h" |
36994e58 | 121 | |
24589e3a | 122 | /* Use this for memory failure errors */ |
a358f406 TL |
123 | #define MEM_FAIL(condition, fmt, ...) \ |
124 | DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__) | |
24589e3a | 125 | |
4ed8f337 MHG |
126 | #define FAULT_STRING "(fault)" |
127 | ||
00cf3d67 SRG |
128 | #define HIST_STACKTRACE_DEPTH 16 |
129 | #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long)) | |
130 | #define HIST_STACKTRACE_SKIP 5 | |
131 | ||
0a1c49db SR |
132 | /* |
133 | * syscalls are special, and need special handling, this is why | |
134 | * they are not included in trace_entries.h | |
135 | */ | |
bed1ffca FW |
136 | struct syscall_trace_enter { |
137 | struct trace_entry ent; | |
138 | int nr; | |
139 | unsigned long args[]; | |
140 | }; | |
141 | ||
142 | struct syscall_trace_exit { | |
143 | struct trace_entry ent; | |
144 | int nr; | |
99df5a6a | 145 | long ret; |
bed1ffca FW |
146 | }; |
147 | ||
93ccae7a | 148 | struct kprobe_trace_entry_head { |
413d37d1 MH |
149 | struct trace_entry ent; |
150 | unsigned long ip; | |
413d37d1 MH |
151 | }; |
152 | ||
7491e2c4 TSV |
153 | struct eprobe_trace_entry_head { |
154 | struct trace_entry ent; | |
7491e2c4 TSV |
155 | }; |
156 | ||
93ccae7a | 157 | struct kretprobe_trace_entry_head { |
413d37d1 MH |
158 | struct trace_entry ent; |
159 | unsigned long func; | |
160 | unsigned long ret_ip; | |
413d37d1 MH |
161 | }; |
162 | ||
334e5519 MHG |
163 | struct fentry_trace_entry_head { |
164 | struct trace_entry ent; | |
165 | unsigned long ip; | |
166 | }; | |
167 | ||
168 | struct fexit_trace_entry_head { | |
169 | struct trace_entry ent; | |
170 | unsigned long func; | |
171 | unsigned long ret_ip; | |
172 | }; | |
173 | ||
5bf9a1ee | 174 | #define TRACE_BUF_SIZE 1024 |
bc0c38d1 | 175 | |
2b6080f2 SR |
176 | struct trace_array; |
177 | ||
bc0c38d1 SR |
178 | /* |
179 | * The CPU trace array - it consists of thousands of trace entries | |
180 | * plus some other descriptor data: (for example which task started | |
181 | * the trace, etc.) | |
182 | */ | |
183 | struct trace_array_cpu { | |
bc0c38d1 | 184 | atomic_t disabled; |
2cadf913 | 185 | void *buffer_page; /* ring buffer spare */ |
4e3c3333 | 186 | |
438ced17 | 187 | unsigned long entries; |
bc0c38d1 SR |
188 | unsigned long saved_latency; |
189 | unsigned long critical_start; | |
190 | unsigned long critical_end; | |
191 | unsigned long critical_sequence; | |
192 | unsigned long nice; | |
193 | unsigned long policy; | |
194 | unsigned long rt_priority; | |
2f26ebd5 | 195 | unsigned long skipped_entries; |
a5a1d1c2 | 196 | u64 preempt_timestamp; |
bc0c38d1 | 197 | pid_t pid; |
d20b92ab | 198 | kuid_t uid; |
bc0c38d1 | 199 | char comm[TASK_COMM_LEN]; |
3fdaf80f | 200 | |
345ddcc8 | 201 | #ifdef CONFIG_FUNCTION_TRACER |
717e3f5e | 202 | int ftrace_ignore_pid; |
345ddcc8 | 203 | #endif |
717e3f5e | 204 | bool ignore_pid; |
bc0c38d1 SR |
205 | }; |
206 | ||
2b6080f2 | 207 | struct tracer; |
37aea98b | 208 | struct trace_option_dentry; |
2b6080f2 | 209 | |
1c5eb448 | 210 | struct array_buffer { |
12883efb | 211 | struct trace_array *tr; |
13292494 | 212 | struct trace_buffer *buffer; |
12883efb | 213 | struct trace_array_cpu __percpu *data; |
a5a1d1c2 | 214 | u64 time_start; |
12883efb SRRH |
215 | int cpu; |
216 | }; | |
217 | ||
9a38a885 SRRH |
218 | #define TRACE_FLAGS_MAX_SIZE 32 |
219 | ||
37aea98b SRRH |
220 | struct trace_options { |
221 | struct tracer *tracer; | |
222 | struct trace_option_dentry *topts; | |
223 | }; | |
224 | ||
6954e415 SRV |
225 | struct trace_pid_list *trace_pid_list_alloc(void); |
226 | void trace_pid_list_free(struct trace_pid_list *pid_list); | |
227 | bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsigned int pid); | |
228 | int trace_pid_list_set(struct trace_pid_list *pid_list, unsigned int pid); | |
229 | int trace_pid_list_clear(struct trace_pid_list *pid_list, unsigned int pid); | |
230 | int trace_pid_list_first(struct trace_pid_list *pid_list, unsigned int *pid); | |
231 | int trace_pid_list_next(struct trace_pid_list *pid_list, unsigned int pid, | |
232 | unsigned int *next); | |
49090107 | 233 | |
27683626 SRV |
234 | enum { |
235 | TRACE_PIDS = BIT(0), | |
236 | TRACE_NO_PIDS = BIT(1), | |
237 | }; | |
238 | ||
239 | static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list, | |
240 | struct trace_pid_list *no_pid_list) | |
241 | { | |
242 | /* Return true if the pid list in type has pids */ | |
243 | return ((type & TRACE_PIDS) && pid_list) || | |
244 | ((type & TRACE_NO_PIDS) && no_pid_list); | |
245 | } | |
246 | ||
247 | static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list, | |
248 | struct trace_pid_list *no_pid_list) | |
249 | { | |
250 | /* | |
251 | * Turning off what is in @type, return true if the "other" | |
252 | * pid list, still has pids in it. | |
253 | */ | |
254 | return (!(type & TRACE_PIDS) && pid_list) || | |
255 | (!(type & TRACE_NO_PIDS) && no_pid_list); | |
256 | } | |
257 | ||
a35873a0 TZ |
258 | typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data); |
259 | ||
260 | /** | |
261 | * struct cond_snapshot - conditional snapshot data and callback | |
262 | * | |
263 | * The cond_snapshot structure encapsulates a callback function and | |
264 | * data associated with the snapshot for a given tracing instance. | |
265 | * | |
266 | * When a snapshot is taken conditionally, by invoking | |
267 | * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is | |
268 | * passed in turn to the cond_snapshot.update() function. That data | |
269 | * can be compared by the update() implementation with the cond_data | |
499f7bb0 | 270 | * contained within the struct cond_snapshot instance associated with |
a35873a0 TZ |
271 | * the trace_array. Because the tr->max_lock is held throughout the |
272 | * update() call, the update() function can directly retrieve the | |
273 | * cond_snapshot and cond_data associated with the per-instance | |
274 | * snapshot associated with the trace_array. | |
275 | * | |
276 | * The cond_snapshot.update() implementation can save data to be | |
277 | * associated with the snapshot if it decides to, and returns 'true' | |
278 | * in that case, or it returns 'false' if the conditional snapshot | |
279 | * shouldn't be taken. | |
280 | * | |
281 | * The cond_snapshot instance is created and associated with the | |
282 | * user-defined cond_data by tracing_cond_snapshot_enable(). | |
283 | * Likewise, the cond_snapshot instance is destroyed and is no longer | |
284 | * associated with the trace instance by | |
285 | * tracing_cond_snapshot_disable(). | |
286 | * | |
287 | * The method below is required. | |
288 | * | |
289 | * @update: When a conditional snapshot is invoked, the update() | |
290 | * callback function is invoked with the tr->max_lock held. The | |
291 | * update() implementation signals whether or not to actually | |
292 | * take the snapshot, by returning 'true' if so, 'false' if no | |
293 | * snapshot should be taken. Because the max_lock is held for | |
294 | * the duration of update(), the implementation is safe to | |
499f7bb0 | 295 | * directly retrieved and save any implementation data it needs |
a35873a0 TZ |
296 | * to in association with the snapshot. |
297 | */ | |
298 | struct cond_snapshot { | |
299 | void *cond_data; | |
300 | cond_update_fn_t update; | |
301 | }; | |
302 | ||
20344c54 YKV |
303 | /* |
304 | * struct trace_func_repeats - used to keep track of the consecutive | |
305 | * (on the same CPU) calls of a single function. | |
306 | */ | |
307 | struct trace_func_repeats { | |
308 | unsigned long ip; | |
309 | unsigned long parent_ip; | |
310 | unsigned long count; | |
311 | u64 ts_last_call; | |
312 | }; | |
313 | ||
bc0c38d1 SR |
314 | /* |
315 | * The trace array - an array of per-CPU trace arrays. This is the | |
316 | * highest level data structure that individual tracers deal with. | |
317 | * They have on/off state as well: | |
318 | */ | |
319 | struct trace_array { | |
ae63b31e | 320 | struct list_head list; |
277ba044 | 321 | char *name; |
1c5eb448 | 322 | struct array_buffer array_buffer; |
12883efb SRRH |
323 | #ifdef CONFIG_TRACER_MAX_TRACE |
324 | /* | |
325 | * The max_buffer is used to snapshot the trace when a maximum | |
326 | * latency is reached, or when the user initiates a snapshot. | |
327 | * Some tracers will use this to store a maximum trace while | |
328 | * it continues examining live traces. | |
329 | * | |
1c5eb448 | 330 | * The buffers for the max_buffer are set up the same as the array_buffer |
12883efb | 331 | * When a snapshot is taken, the buffer of the max_buffer is swapped |
1c5eb448 SRV |
332 | * with the buffer of the array_buffer and the buffers are reset for |
333 | * the array_buffer so the tracing can continue. | |
12883efb | 334 | */ |
1c5eb448 | 335 | struct array_buffer max_buffer; |
45ad21ca | 336 | bool allocated_snapshot; |
180e4e39 VD |
337 | spinlock_t snapshot_trigger_lock; |
338 | unsigned int snapshot; | |
6d9b3fa5 | 339 | unsigned long max_latency; |
91edde2e VRB |
340 | #ifdef CONFIG_FSNOTIFY |
341 | struct dentry *d_max_latency; | |
342 | struct work_struct fsnotify_work; | |
343 | struct irq_work fsnotify_irqwork; | |
344 | #endif | |
12883efb | 345 | #endif |
2124de79 SRG |
346 | /* The below is for memory mapped ring buffer */ |
347 | unsigned int mapped; | |
348 | unsigned long range_addr_start; | |
349 | unsigned long range_addr_size; | |
7a1d1e4b SRG |
350 | long text_delta; |
351 | long data_delta; | |
2124de79 | 352 | |
49090107 | 353 | struct trace_pid_list __rcu *filtered_pids; |
27683626 | 354 | struct trace_pid_list __rcu *filtered_no_pids; |
0b9b12c1 SRRH |
355 | /* |
356 | * max_lock is used to protect the swapping of buffers | |
357 | * when taking a max snapshot. The buffers themselves are | |
358 | * protected by per_cpu spinlocks. But the action of the swap | |
359 | * needs its own lock. | |
360 | * | |
361 | * This is defined as a arch_spinlock_t in order to help | |
362 | * with performance when lockdep debugging is enabled. | |
363 | * | |
364 | * It is also used in other places outside the update_max_tr | |
365 | * so it needs to be defined outside of the | |
366 | * CONFIG_TRACER_MAX_TRACE. | |
367 | */ | |
368 | arch_spinlock_t max_lock; | |
499e5470 | 369 | int buffer_disabled; |
12ab74ee SR |
370 | #ifdef CONFIG_FTRACE_SYSCALLS |
371 | int sys_refcount_enter; | |
372 | int sys_refcount_exit; | |
7f1d2f82 SRRH |
373 | struct trace_event_file __rcu *enter_syscall_files[NR_syscalls]; |
374 | struct trace_event_file __rcu *exit_syscall_files[NR_syscalls]; | |
12ab74ee | 375 | #endif |
2b6080f2 SR |
376 | int stop_count; |
377 | int clock_id; | |
37aea98b | 378 | int nr_topts; |
065e63f9 | 379 | bool clear_trace; |
03329f99 | 380 | int buffer_percent; |
2f754e77 | 381 | unsigned int n_err_log_entries; |
2b6080f2 | 382 | struct tracer *current_trace; |
983f938a | 383 | unsigned int trace_flags; |
9a38a885 | 384 | unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; |
ae63b31e | 385 | unsigned int flags; |
2b6080f2 | 386 | raw_spinlock_t start_lock; |
d2356997 | 387 | const char *system_names; |
2f754e77 | 388 | struct list_head err_log; |
ae63b31e | 389 | struct dentry *dir; |
2b6080f2 SR |
390 | struct dentry *options; |
391 | struct dentry *percpu_dir; | |
5790b1fb | 392 | struct eventfs_inode *event_dir; |
37aea98b | 393 | struct trace_options *topts; |
ae63b31e SR |
394 | struct list_head systems; |
395 | struct list_head events; | |
3dd80953 | 396 | struct trace_event_file *trace_marker_file; |
ccfe9e42 | 397 | cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ |
c2489bb7 ZY |
398 | /* one per_cpu trace_pipe can be opened by only one user */ |
399 | cpumask_var_t pipe_cpumask; | |
a695cb58 | 400 | int ref; |
7ef282e0 | 401 | int trace_ref; |
f20a5806 SRRH |
402 | #ifdef CONFIG_FUNCTION_TRACER |
403 | struct ftrace_ops *ops; | |
345ddcc8 | 404 | struct trace_pid_list __rcu *function_pids; |
b3b1e6ed | 405 | struct trace_pid_list __rcu *function_no_pids; |
26dda563 SRV |
406 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
407 | struct fgraph_ops *gops; | |
408 | #endif | |
04ec7bb6 | 409 | #ifdef CONFIG_DYNAMIC_FTRACE |
673feb9d | 410 | /* All of these are protected by the ftrace_lock */ |
04ec7bb6 | 411 | struct list_head func_probes; |
673feb9d SRV |
412 | struct list_head mod_trace; |
413 | struct list_head mod_notrace; | |
04ec7bb6 | 414 | #endif |
f20a5806 SRRH |
415 | /* function tracing enabled */ |
416 | int function_enabled; | |
417 | #endif | |
b94bc80d | 418 | int no_filter_buffering_ref; |
067fe038 | 419 | struct list_head hist_vars; |
a35873a0 TZ |
420 | #ifdef CONFIG_TRACER_SNAPSHOT |
421 | struct cond_snapshot *cond_snapshot; | |
422 | #endif | |
20344c54 | 423 | struct trace_func_repeats __percpu *last_func_repeats; |
a1f157c7 ZY |
424 | /* |
425 | * On boot up, the ring buffer is set to the minimum size, so that | |
426 | * we do not waste memory on systems that are not using tracing. | |
427 | */ | |
428 | bool ring_buffer_expanded; | |
bc0c38d1 SR |
429 | }; |
430 | ||
ae63b31e | 431 | enum { |
9b7bdf6f SR |
432 | TRACE_ARRAY_FL_GLOBAL = BIT(0), |
433 | TRACE_ARRAY_FL_BOOT = BIT(1), | |
ae63b31e SR |
434 | }; |
435 | ||
436 | extern struct list_head ftrace_trace_arrays; | |
437 | ||
a8227415 AL |
438 | extern struct mutex trace_types_lock; |
439 | ||
8e2e2fa4 | 440 | extern int trace_array_get(struct trace_array *tr); |
8530dec6 | 441 | extern int tracing_check_open_get_tr(struct trace_array *tr); |
89c95fce TZ |
442 | extern struct trace_array *trace_array_find(const char *instance); |
443 | extern struct trace_array *trace_array_find_get(const char *instance); | |
8e2e2fa4 | 444 | |
d8279bfc | 445 | extern u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe); |
b94bc80d | 446 | extern int tracing_set_filter_buffering(struct trace_array *tr, bool set); |
d71bd34d | 447 | extern int tracing_set_clock(struct trace_array *tr, const char *clockstr); |
00b41452 | 448 | |
860f9f6b TZ |
449 | extern bool trace_clock_in_ns(struct trace_array *tr); |
450 | ||
ae63b31e SR |
451 | /* |
452 | * The global tracer (top) should be the first trace array added, | |
453 | * but we check the flag anyway. | |
454 | */ | |
455 | static inline struct trace_array *top_trace_array(void) | |
456 | { | |
457 | struct trace_array *tr; | |
458 | ||
da9c3413 | 459 | if (list_empty(&ftrace_trace_arrays)) |
dc81e5e3 YY |
460 | return NULL; |
461 | ||
ae63b31e SR |
462 | tr = list_entry(ftrace_trace_arrays.prev, |
463 | typeof(*tr), list); | |
464 | WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); | |
465 | return tr; | |
466 | } | |
467 | ||
7104f300 SR |
468 | #define FTRACE_CMP_TYPE(var, type) \ |
469 | __builtin_types_compatible_p(typeof(var), type *) | |
470 | ||
471 | #undef IF_ASSIGN | |
968e5170 NC |
472 | #define IF_ASSIGN(var, entry, etype, id) \ |
473 | if (FTRACE_CMP_TYPE(var, etype)) { \ | |
474 | var = (typeof(var))(entry); \ | |
475 | WARN_ON(id != 0 && (entry)->type != id); \ | |
476 | break; \ | |
7104f300 SR |
477 | } |
478 | ||
479 | /* Will cause compile errors if type is not found. */ | |
480 | extern void __ftrace_bad_type(void); | |
481 | ||
482 | /* | |
483 | * The trace_assign_type is a verifier that the entry type is | |
484 | * the same as the type being assigned. To add new types simply | |
485 | * add a line with the following format: | |
486 | * | |
487 | * IF_ASSIGN(var, ent, type, id); | |
488 | * | |
489 | * Where "type" is the trace type that includes the trace_entry | |
490 | * as the "ent" item. And "id" is the trace identifier that is | |
491 | * used in the trace_type enum. | |
492 | * | |
493 | * If the type can have more than one id, then use zero. | |
494 | */ | |
495 | #define trace_assign_type(var, ent) \ | |
496 | do { \ | |
497 | IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ | |
498 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ | |
7104f300 | 499 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ |
02b67518 | 500 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ |
7104f300 | 501 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
48ead020 | 502 | IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ |
09ae7234 | 503 | IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \ |
e7c15cd8 | 504 | IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \ |
bce29ac9 | 505 | IF_ASSIGN(var, ent, struct osnoise_entry, TRACE_OSNOISE);\ |
a955d7ea | 506 | IF_ASSIGN(var, ent, struct timerlat_entry, TRACE_TIMERLAT);\ |
fa32e855 | 507 | IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\ |
7104f300 SR |
508 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ |
509 | TRACE_MMIO_RW); \ | |
510 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ | |
511 | TRACE_MMIO_MAP); \ | |
9f029e83 | 512 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ |
287b6e68 FW |
513 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ |
514 | TRACE_GRAPH_ENT); \ | |
515 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ | |
516 | TRACE_GRAPH_RET); \ | |
f689e4f2 YKV |
517 | IF_ASSIGN(var, ent, struct func_repeats_entry, \ |
518 | TRACE_FUNC_REPEATS); \ | |
7104f300 SR |
519 | __ftrace_bad_type(); \ |
520 | } while (0) | |
2c4f035f | 521 | |
adf9f195 FW |
522 | /* |
523 | * An option specific to a tracer. This is a boolean value. | |
524 | * The bit is the bit index that sets its value on the | |
525 | * flags value in struct tracer_flags. | |
526 | */ | |
527 | struct tracer_opt { | |
9de36825 IM |
528 | const char *name; /* Will appear on the trace_options file */ |
529 | u32 bit; /* Mask assigned in val field in tracer_flags */ | |
adf9f195 FW |
530 | }; |
531 | ||
532 | /* | |
533 | * The set of specific options for a tracer. Your tracer | |
534 | * have to set the initial value of the flags val. | |
535 | */ | |
536 | struct tracer_flags { | |
537 | u32 val; | |
9de36825 | 538 | struct tracer_opt *opts; |
d39cdd20 | 539 | struct tracer *trace; |
adf9f195 FW |
540 | }; |
541 | ||
542 | /* Makes more easy to define a tracer opt */ | |
543 | #define TRACER_OPT(s, b) .name = #s, .bit = b | |
544 | ||
034939b6 | 545 | |
41d9c0be SRRH |
546 | struct trace_option_dentry { |
547 | struct tracer_opt *opt; | |
548 | struct tracer_flags *flags; | |
549 | struct trace_array *tr; | |
550 | struct dentry *entry; | |
551 | }; | |
552 | ||
6eaaa5d5 | 553 | /** |
8434dc93 | 554 | * struct tracer - a specific tracer and its callbacks to interact with tracefs |
6eaaa5d5 FW |
555 | * @name: the name chosen to select it on the available_tracers file |
556 | * @init: called when one switches to this tracer (echo name > current_tracer) | |
557 | * @reset: called when one switches to another tracer | |
05a724bd CH |
558 | * @start: called when tracing is unpaused (echo 1 > tracing_on) |
559 | * @stop: called when tracing is paused (echo 0 > tracing_on) | |
6508fa76 | 560 | * @update_thresh: called when tracing_thresh is updated |
6eaaa5d5 FW |
561 | * @open: called when the trace file is opened |
562 | * @pipe_open: called when the trace_pipe file is opened | |
6eaaa5d5 | 563 | * @close: called when the trace file is released |
c521efd1 | 564 | * @pipe_close: called when the trace_pipe file is released |
6eaaa5d5 FW |
565 | * @read: override the default read callback on trace_pipe |
566 | * @splice_read: override the default splice_read callback on trace_pipe | |
567 | * @selftest: selftest to run on boot (see trace_selftest.c) | |
568 | * @print_headers: override the first lines that describe your columns | |
569 | * @print_line: callback that prints a trace | |
570 | * @set_flag: signals one of your private flags changed (trace_options file) | |
571 | * @flags: your private flags | |
bc0c38d1 SR |
572 | */ |
573 | struct tracer { | |
574 | const char *name; | |
1c80025a | 575 | int (*init)(struct trace_array *tr); |
bc0c38d1 | 576 | void (*reset)(struct trace_array *tr); |
9036990d SR |
577 | void (*start)(struct trace_array *tr); |
578 | void (*stop)(struct trace_array *tr); | |
6508fa76 | 579 | int (*update_thresh)(struct trace_array *tr); |
bc0c38d1 | 580 | void (*open)(struct trace_iterator *iter); |
107bad8b | 581 | void (*pipe_open)(struct trace_iterator *iter); |
bc0c38d1 | 582 | void (*close)(struct trace_iterator *iter); |
c521efd1 | 583 | void (*pipe_close)(struct trace_iterator *iter); |
107bad8b SR |
584 | ssize_t (*read)(struct trace_iterator *iter, |
585 | struct file *filp, char __user *ubuf, | |
586 | size_t cnt, loff_t *ppos); | |
3c56819b EGM |
587 | ssize_t (*splice_read)(struct trace_iterator *iter, |
588 | struct file *filp, | |
589 | loff_t *ppos, | |
590 | struct pipe_inode_info *pipe, | |
591 | size_t len, | |
592 | unsigned int flags); | |
60a11774 SR |
593 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
594 | int (*selftest)(struct tracer *trace, | |
595 | struct trace_array *tr); | |
596 | #endif | |
8bba1bf5 | 597 | void (*print_header)(struct seq_file *m); |
2c4f035f | 598 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
adf9f195 | 599 | /* If you handled the flag setting, return 0 */ |
8c1a49ae SRRH |
600 | int (*set_flag)(struct trace_array *tr, |
601 | u32 old_flags, u32 bit, int set); | |
613f04a0 | 602 | /* Return 0 if OK with change, else return non-zero */ |
bf6065b5 | 603 | int (*flag_changed)(struct trace_array *tr, |
613f04a0 | 604 | u32 mask, int set); |
bc0c38d1 | 605 | struct tracer *next; |
9de36825 | 606 | struct tracer_flags *flags; |
50512ab5 | 607 | int enabled; |
f43c738b | 608 | bool print_max; |
607e2ea1 | 609 | bool allow_instances; |
12883efb | 610 | #ifdef CONFIG_TRACER_MAX_TRACE |
f43c738b | 611 | bool use_max_tr; |
12883efb | 612 | #endif |
c7b3ae0b ZSZ |
613 | /* True if tracer cannot be enabled in kernel param */ |
614 | bool noboot; | |
bc0c38d1 SR |
615 | }; |
616 | ||
6d158a81 SR |
617 | static inline struct ring_buffer_iter * |
618 | trace_buffer_iter(struct trace_iterator *iter, int cpu) | |
619 | { | |
f26808ba | 620 | return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL; |
6d158a81 SR |
621 | } |
622 | ||
b6f11df2 | 623 | int tracer_init(struct tracer *t, struct trace_array *tr); |
9036990d | 624 | int tracing_is_enabled(void); |
1c5eb448 | 625 | void tracing_reset_online_cpus(struct array_buffer *buf); |
873c642f | 626 | void tracing_reset_all_online_cpus(void); |
e18eb878 | 627 | void tracing_reset_all_online_cpus_unlocked(void); |
bc0c38d1 | 628 | int tracing_open_generic(struct inode *inode, struct file *filp); |
aa07d71f | 629 | int tracing_open_generic_tr(struct inode *inode, struct file *filp); |
139f8400 | 630 | int tracing_release_generic_tr(struct inode *inode, struct file *file); |
f5ca233e SRG |
631 | int tracing_open_file_tr(struct inode *inode, struct file *filp); |
632 | int tracing_release_file_tr(struct inode *inode, struct file *filp); | |
1cc111b9 | 633 | int tracing_single_release_file_tr(struct inode *inode, struct file *filp); |
2e86421d | 634 | bool tracing_is_disabled(void); |
ec573508 | 635 | bool tracer_tracing_is_on(struct trace_array *tr); |
2290f2c5 SRV |
636 | void tracer_tracing_on(struct trace_array *tr); |
637 | void tracer_tracing_off(struct trace_array *tr); | |
5452af66 | 638 | struct dentry *trace_create_file(const char *name, |
f4ae40a6 | 639 | umode_t mode, |
5452af66 FW |
640 | struct dentry *parent, |
641 | void *data, | |
642 | const struct file_operations *fops); | |
643 | ||
22c36b18 | 644 | int tracing_init_dentry(void); |
d618b3e6 | 645 | |
51a763dd ACM |
646 | struct ring_buffer_event; |
647 | ||
e77405ad | 648 | struct ring_buffer_event * |
13292494 | 649 | trace_buffer_lock_reserve(struct trace_buffer *buffer, |
e77405ad SR |
650 | int type, |
651 | unsigned long len, | |
36590c50 | 652 | unsigned int trace_ctx); |
51a763dd | 653 | |
950032ff SRG |
654 | int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu); |
655 | ||
45dcd8b8 PP |
656 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
657 | struct trace_array_cpu *data); | |
c4a8e8be FW |
658 | |
659 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | |
660 | int *ent_cpu, u64 *ent_ts); | |
661 | ||
13292494 | 662 | void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, |
52ffabe3 | 663 | struct ring_buffer_event *event); |
7ffbd48d | 664 | |
9a6944fe | 665 | bool trace_is_tracepoint_string(const char *str); |
efbbdaa2 | 666 | const char *trace_event_format(struct trace_iterator *iter, const char *fmt); |
9a6944fe | 667 | void trace_check_vprintf(struct trace_iterator *iter, const char *fmt, |
bfd5a5e8 | 668 | va_list ap) __printf(2, 0); |
80a76994 | 669 | char *trace_iter_expand_format(struct trace_iterator *iter); |
efbbdaa2 | 670 | |
955b61e5 JW |
671 | int trace_empty(struct trace_iterator *iter); |
672 | ||
673 | void *trace_find_next_entry_inc(struct trace_iterator *iter); | |
674 | ||
675 | void trace_init_global_iter(struct trace_iterator *iter); | |
676 | ||
677 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); | |
678 | ||
ecffc8a8 DA |
679 | unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu); |
680 | unsigned long trace_total_entries(struct trace_array *tr); | |
681 | ||
6fb44b71 | 682 | void trace_function(struct trace_array *tr, |
6fb44b71 SR |
683 | unsigned long ip, |
684 | unsigned long parent_ip, | |
36590c50 | 685 | unsigned int trace_ctx); |
0a772620 JO |
686 | void trace_graph_function(struct trace_array *tr, |
687 | unsigned long ip, | |
688 | unsigned long parent_ip, | |
36590c50 | 689 | unsigned int trace_ctx); |
7e9a49ef | 690 | void trace_latency_header(struct seq_file *m); |
62b915f1 JO |
691 | void trace_default_header(struct seq_file *m); |
692 | void print_trace_header(struct seq_file *m, struct trace_iterator *iter); | |
bc0c38d1 | 693 | |
37238abe SRV |
694 | void trace_graph_return(struct ftrace_graph_ret *trace, struct fgraph_ops *gops); |
695 | int trace_graph_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops); | |
1e9b51c2 | 696 | |
41bc8144 SR |
697 | void tracing_start_cmdline_record(void); |
698 | void tracing_stop_cmdline_record(void); | |
d914ba37 JF |
699 | void tracing_start_tgid_record(void); |
700 | void tracing_stop_tgid_record(void); | |
701 | ||
bc0c38d1 | 702 | int register_tracer(struct tracer *type); |
b5130b1e | 703 | int is_tracing_stopped(void); |
955b61e5 | 704 | |
098c879e SRRH |
705 | loff_t tracing_lseek(struct file *file, loff_t offset, int whence); |
706 | ||
955b61e5 JW |
707 | extern cpumask_var_t __read_mostly tracing_buffer_mask; |
708 | ||
709 | #define for_each_tracing_cpu(cpu) \ | |
710 | for_each_cpu(cpu, tracing_buffer_mask) | |
bc0c38d1 SR |
711 | |
712 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); | |
713 | ||
0e950173 TB |
714 | extern unsigned long tracing_thresh; |
715 | ||
4e267db1 | 716 | /* PID filtering */ |
76c813e2 SRRH |
717 | |
718 | extern int pid_max; | |
719 | ||
4e267db1 SR |
720 | bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, |
721 | pid_t search_pid); | |
722 | bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, | |
b3b1e6ed | 723 | struct trace_pid_list *filtered_no_pids, |
4e267db1 SR |
724 | struct task_struct *task); |
725 | void trace_filter_add_remove_task(struct trace_pid_list *pid_list, | |
726 | struct task_struct *self, | |
727 | struct task_struct *task); | |
5cc8976b SRRH |
728 | void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos); |
729 | void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos); | |
730 | int trace_pid_show(struct seq_file *m, void *v); | |
76c813e2 SRRH |
731 | int trace_pid_write(struct trace_pid_list *filtered_pids, |
732 | struct trace_pid_list **new_pid_list, | |
733 | const char __user *ubuf, size_t cnt); | |
4e267db1 | 734 | |
5d4a9dba | 735 | #ifdef CONFIG_TRACER_MAX_TRACE |
a35873a0 TZ |
736 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, |
737 | void *cond_data); | |
bc0c38d1 SR |
738 | void update_max_tr_single(struct trace_array *tr, |
739 | struct task_struct *tsk, int cpu); | |
740 | ||
e25e43a4 | 741 | #ifdef CONFIG_FSNOTIFY |
6880c987 SRV |
742 | #define LATENCY_FS_NOTIFY |
743 | #endif | |
e25e43a4 | 744 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
91edde2e | 745 | |
6880c987 | 746 | #ifdef LATENCY_FS_NOTIFY |
91edde2e | 747 | void latency_fsnotify(struct trace_array *tr); |
91edde2e | 748 | #else |
36b3615d | 749 | static inline void latency_fsnotify(struct trace_array *tr) { } |
91edde2e VRB |
750 | #endif |
751 | ||
c0a0d0d3 | 752 | #ifdef CONFIG_STACKTRACE |
36590c50 | 753 | void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip); |
c0a0d0d3 | 754 | #else |
36590c50 SAS |
755 | static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, |
756 | int skip) | |
c0a0d0d3 FW |
757 | { |
758 | } | |
759 | #endif /* CONFIG_STACKTRACE */ | |
53614991 | 760 | |
c658797f YKV |
761 | void trace_last_func_repeats(struct trace_array *tr, |
762 | struct trace_func_repeats *last_info, | |
763 | unsigned int trace_ctx); | |
764 | ||
a5a1d1c2 | 765 | extern u64 ftrace_now(int cpu); |
bc0c38d1 | 766 | |
4ca53085 | 767 | extern void trace_find_cmdline(int pid, char comm[]); |
d914ba37 | 768 | extern int trace_find_tgid(int pid); |
c37775d5 | 769 | extern void trace_event_follow_fork(struct trace_array *tr, bool enable); |
f7d48cbd | 770 | |
bc0c38d1 SR |
771 | #ifdef CONFIG_DYNAMIC_FTRACE |
772 | extern unsigned long ftrace_update_tot_cnt; | |
da537f0a SRV |
773 | extern unsigned long ftrace_number_of_pages; |
774 | extern unsigned long ftrace_number_of_groups; | |
04ec7bb6 SRV |
775 | void ftrace_init_trace_array(struct trace_array *tr); |
776 | #else | |
777 | static inline void ftrace_init_trace_array(struct trace_array *tr) { } | |
ad97772a | 778 | #endif |
d05cdb25 SR |
779 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
780 | extern int DYN_FTRACE_TEST_NAME(void); | |
95950c2e SR |
781 | #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 |
782 | extern int DYN_FTRACE_TEST_NAME2(void); | |
bc0c38d1 | 783 | |
a1f157c7 | 784 | extern void trace_set_ring_buffer_expanded(struct trace_array *tr); |
020e5f85 LZ |
785 | extern bool tracing_selftest_disabled; |
786 | ||
60a11774 | 787 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
60efe21e MH |
788 | extern void __init disable_tracing_selftest(const char *reason); |
789 | ||
60a11774 SR |
790 | extern int trace_selftest_startup_function(struct tracer *trace, |
791 | struct trace_array *tr); | |
7447dce9 FW |
792 | extern int trace_selftest_startup_function_graph(struct tracer *trace, |
793 | struct trace_array *tr); | |
60a11774 SR |
794 | extern int trace_selftest_startup_irqsoff(struct tracer *trace, |
795 | struct trace_array *tr); | |
60a11774 SR |
796 | extern int trace_selftest_startup_preemptoff(struct tracer *trace, |
797 | struct trace_array *tr); | |
60a11774 SR |
798 | extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, |
799 | struct trace_array *tr); | |
60a11774 SR |
800 | extern int trace_selftest_startup_wakeup(struct tracer *trace, |
801 | struct trace_array *tr); | |
fb1b6d8b SN |
802 | extern int trace_selftest_startup_nop(struct tracer *trace, |
803 | struct trace_array *tr); | |
80e5ea45 SR |
804 | extern int trace_selftest_startup_branch(struct tracer *trace, |
805 | struct trace_array *tr); | |
8f768993 SRRH |
806 | /* |
807 | * Tracer data references selftest functions that only occur | |
808 | * on boot up. These can be __init functions. Thus, when selftests | |
809 | * are enabled, then the tracers need to reference __init functions. | |
810 | */ | |
811 | #define __tracer_data __refdata | |
812 | #else | |
60efe21e MH |
813 | static inline void __init disable_tracing_selftest(const char *reason) |
814 | { | |
815 | } | |
8f768993 SRRH |
816 | /* Tracers are seldom changed. Optimize when selftests are disabled. */ |
817 | #define __tracer_data __read_mostly | |
60a11774 SR |
818 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
819 | ||
c7aafc54 | 820 | extern void *head_page(struct trace_array_cpu *data); |
a5a1d1c2 | 821 | extern unsigned long long ns2usecs(u64 nsec); |
1fd8f2a3 | 822 | extern int |
40ce74f1 | 823 | trace_vbprintk(unsigned long ip, const char *fmt, va_list args); |
48ead020 | 824 | extern int |
40ce74f1 | 825 | trace_vprintk(unsigned long ip, const char *fmt, va_list args); |
659372d3 SR |
826 | extern int |
827 | trace_array_vprintk(struct trace_array *tr, | |
828 | unsigned long ip, const char *fmt, va_list args); | |
13292494 | 829 | int trace_array_printk_buf(struct trace_buffer *buffer, |
12883efb | 830 | unsigned long ip, const char *fmt, ...); |
955b61e5 JW |
831 | void trace_printk_seq(struct trace_seq *s); |
832 | enum print_line_t print_trace_line(struct trace_iterator *iter); | |
c7aafc54 | 833 | |
8e1e1df2 BP |
834 | extern char trace_find_mark(unsigned long long duration); |
835 | ||
673feb9d SRV |
836 | struct ftrace_hash; |
837 | ||
838 | struct ftrace_mod_load { | |
839 | struct list_head list; | |
840 | char *func; | |
841 | char *module; | |
842 | int enable; | |
843 | }; | |
844 | ||
8c08f0d5 SRV |
845 | enum { |
846 | FTRACE_HASH_FL_MOD = (1 << 0), | |
847 | }; | |
848 | ||
4046bf02 NK |
849 | struct ftrace_hash { |
850 | unsigned long size_bits; | |
851 | struct hlist_head *buckets; | |
852 | unsigned long count; | |
8c08f0d5 | 853 | unsigned long flags; |
4046bf02 NK |
854 | struct rcu_head rcu; |
855 | }; | |
856 | ||
857 | struct ftrace_func_entry * | |
858 | ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip); | |
859 | ||
eb583cd4 | 860 | static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash) |
4046bf02 | 861 | { |
8c08f0d5 | 862 | return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD)); |
4046bf02 NK |
863 | } |
864 | ||
15e6cb36 | 865 | /* Standard output formatting function used for function return traces */ |
fb52607a | 866 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
62b915f1 JO |
867 | |
868 | /* Flag options */ | |
869 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 | |
870 | #define TRACE_GRAPH_PRINT_CPU 0x2 | |
871 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | |
872 | #define TRACE_GRAPH_PRINT_PROC 0x8 | |
873 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | |
874 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 | |
9acd8de6 CD |
875 | #define TRACE_GRAPH_PRINT_REL_TIME 0x40 |
876 | #define TRACE_GRAPH_PRINT_IRQS 0x80 | |
877 | #define TRACE_GRAPH_PRINT_TAIL 0x100 | |
878 | #define TRACE_GRAPH_SLEEP_TIME 0x200 | |
879 | #define TRACE_GRAPH_GRAPH_TIME 0x400 | |
a1be9ccc DP |
880 | #define TRACE_GRAPH_PRINT_RETVAL 0x800 |
881 | #define TRACE_GRAPH_PRINT_RETVAL_HEX 0x1000 | |
6fc84ea7 SRRH |
882 | #define TRACE_GRAPH_PRINT_FILL_SHIFT 28 |
883 | #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) | |
62b915f1 | 884 | |
55577204 | 885 | extern void ftrace_graph_sleep_time_control(bool enable); |
c8dd0f45 SRV |
886 | |
887 | #ifdef CONFIG_FUNCTION_PROFILER | |
55577204 | 888 | extern void ftrace_graph_graph_time_control(bool enable); |
c8dd0f45 SRV |
889 | #else |
890 | static inline void ftrace_graph_graph_time_control(bool enable) { } | |
891 | #endif | |
55577204 | 892 | |
d7a8d9e9 JO |
893 | extern enum print_line_t |
894 | print_graph_function_flags(struct trace_iterator *iter, u32 flags); | |
895 | extern void print_graph_headers_flags(struct seq_file *s, u32 flags); | |
9d9add34 | 896 | extern void |
0706f1c4 | 897 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); |
62b915f1 JO |
898 | extern void graph_trace_open(struct trace_iterator *iter); |
899 | extern void graph_trace_close(struct trace_iterator *iter); | |
900 | extern int __trace_graph_entry(struct trace_array *tr, | |
901 | struct ftrace_graph_ent *trace, | |
36590c50 | 902 | unsigned int trace_ctx); |
62b915f1 JO |
903 | extern void __trace_graph_return(struct trace_array *tr, |
904 | struct ftrace_graph_ret *trace, | |
36590c50 | 905 | unsigned int trace_ctx); |
c132be2c SRV |
906 | extern void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops); |
907 | extern int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops); | |
26dda563 | 908 | extern void free_fgraph_ops(struct trace_array *tr); |
62b915f1 | 909 | |
12117f33 SRV |
910 | enum { |
911 | TRACE_GRAPH_FL = 1, | |
068da098 SRV |
912 | |
913 | /* | |
914 | * In the very unlikely case that an interrupt came in | |
915 | * at a start of graph tracing, and we want to trace | |
916 | * the function in that interrupt, the depth can be greater | |
917 | * than zero, because of the preempted start of a previous | |
918 | * trace. In an even more unlikely case, depth could be 2 | |
919 | * if a softirq interrupted the start of graph tracing, | |
920 | * followed by an interrupt preempting a start of graph | |
921 | * tracing in the softirq, and depth can even be 3 | |
922 | * if an NMI came in at the start of an interrupt function | |
923 | * that preempted a softirq start of a function that | |
924 | * preempted normal context!!!! Luckily, it can't be | |
925 | * greater than 3, so the next two bits are a mask | |
926 | * of what the depth is when we set TRACE_GRAPH_FL | |
927 | */ | |
928 | ||
929 | TRACE_GRAPH_DEPTH_START_BIT, | |
930 | TRACE_GRAPH_DEPTH_END_BIT, | |
b8421489 SRV |
931 | |
932 | /* | |
933 | * To implement set_graph_notrace, if this bit is set, we ignore | |
934 | * function graph tracing of called functions, until the return | |
935 | * function is called to clear it. | |
936 | */ | |
937 | TRACE_GRAPH_NOTRACE_BIT, | |
12117f33 SRV |
938 | }; |
939 | ||
b8421489 SRV |
940 | #define TRACE_GRAPH_NOTRACE (1 << TRACE_GRAPH_NOTRACE_BIT) |
941 | ||
068da098 SRV |
942 | static inline unsigned long ftrace_graph_depth(unsigned long *task_var) |
943 | { | |
944 | return (*task_var >> TRACE_GRAPH_DEPTH_START_BIT) & 3; | |
945 | } | |
946 | ||
947 | static inline void ftrace_graph_set_depth(unsigned long *task_var, int depth) | |
948 | { | |
949 | *task_var &= ~(3 << TRACE_GRAPH_DEPTH_START_BIT); | |
950 | *task_var |= (depth & 3) << TRACE_GRAPH_DEPTH_START_BIT; | |
951 | } | |
62b915f1 | 952 | |
ea4e2bc4 | 953 | #ifdef CONFIG_DYNAMIC_FTRACE |
24a9729f | 954 | extern struct ftrace_hash __rcu *ftrace_graph_hash; |
fd0e6852 | 955 | extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash; |
ea4e2bc4 | 956 | |
12117f33 SRV |
957 | static inline int |
958 | ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace) | |
ea4e2bc4 | 959 | { |
5cf99a0f | 960 | unsigned long addr = trace->func; |
b9b0c831 | 961 | int ret = 0; |
24a9729f | 962 | struct ftrace_hash *hash; |
b9b0c831 NK |
963 | |
964 | preempt_disable_notrace(); | |
965 | ||
16052dd5 SRV |
966 | /* |
967 | * Have to open code "rcu_dereference_sched()" because the | |
968 | * function graph tracer can be called when RCU is not | |
969 | * "watching". | |
54a16ff6 | 970 | * Protected with schedule_on_each_cpu(ftrace_sync) |
16052dd5 | 971 | */ |
24a9729f AG |
972 | hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible()); |
973 | ||
974 | if (ftrace_hash_empty(hash)) { | |
b9b0c831 NK |
975 | ret = 1; |
976 | goto out; | |
ea4e2bc4 SR |
977 | } |
978 | ||
24a9729f | 979 | if (ftrace_lookup_ip(hash, addr)) { |
5cf99a0f SRV |
980 | /* |
981 | * This needs to be cleared on the return functions | |
982 | * when the depth is zero. | |
983 | */ | |
12117f33 | 984 | *task_var |= TRACE_GRAPH_FL; |
068da098 | 985 | ftrace_graph_set_depth(task_var, trace->depth); |
5cf99a0f | 986 | |
b9b0c831 NK |
987 | /* |
988 | * If no irqs are to be traced, but a set_graph_function | |
989 | * is set, and called by an interrupt handler, we still | |
990 | * want to trace it. | |
991 | */ | |
affc6592 | 992 | if (in_hardirq()) |
b9b0c831 NK |
993 | trace_recursion_set(TRACE_IRQ_BIT); |
994 | else | |
995 | trace_recursion_clear(TRACE_IRQ_BIT); | |
996 | ret = 1; | |
997 | } | |
998 | ||
999 | out: | |
1000 | preempt_enable_notrace(); | |
1001 | return ret; | |
ea4e2bc4 | 1002 | } |
29ad23b0 | 1003 | |
12117f33 SRV |
1004 | static inline void |
1005 | ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace) | |
5cf99a0f | 1006 | { |
12117f33 SRV |
1007 | unsigned long *task_var = fgraph_get_task_var(gops); |
1008 | ||
1009 | if ((*task_var & TRACE_GRAPH_FL) && | |
068da098 | 1010 | trace->depth == ftrace_graph_depth(task_var)) |
12117f33 | 1011 | *task_var &= ~TRACE_GRAPH_FL; |
5cf99a0f SRV |
1012 | } |
1013 | ||
29ad23b0 NK |
1014 | static inline int ftrace_graph_notrace_addr(unsigned long addr) |
1015 | { | |
b9b0c831 | 1016 | int ret = 0; |
fd0e6852 | 1017 | struct ftrace_hash *notrace_hash; |
29ad23b0 | 1018 | |
b9b0c831 | 1019 | preempt_disable_notrace(); |
29ad23b0 | 1020 | |
16052dd5 SRV |
1021 | /* |
1022 | * Have to open code "rcu_dereference_sched()" because the | |
1023 | * function graph tracer can be called when RCU is not | |
1024 | * "watching". | |
54a16ff6 | 1025 | * Protected with schedule_on_each_cpu(ftrace_sync) |
16052dd5 | 1026 | */ |
fd0e6852 AG |
1027 | notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, |
1028 | !preemptible()); | |
1029 | ||
1030 | if (ftrace_lookup_ip(notrace_hash, addr)) | |
b9b0c831 | 1031 | ret = 1; |
29ad23b0 | 1032 | |
b9b0c831 NK |
1033 | preempt_enable_notrace(); |
1034 | return ret; | |
29ad23b0 | 1035 | } |
15e6cb36 | 1036 | #else |
12117f33 | 1037 | static inline int ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace) |
6b253930 IM |
1038 | { |
1039 | return 1; | |
ea4e2bc4 | 1040 | } |
29ad23b0 NK |
1041 | |
1042 | static inline int ftrace_graph_notrace_addr(unsigned long addr) | |
1043 | { | |
1044 | return 0; | |
1045 | } | |
12117f33 | 1046 | static inline void ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace) |
5cf99a0f | 1047 | { } |
ea4e2bc4 | 1048 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1a414428 SRRH |
1049 | |
1050 | extern unsigned int fgraph_max_depth; | |
1051 | ||
12117f33 SRV |
1052 | static inline bool |
1053 | ftrace_graph_ignore_func(struct fgraph_ops *gops, struct ftrace_graph_ent *trace) | |
1a414428 | 1054 | { |
12117f33 SRV |
1055 | unsigned long *task_var = fgraph_get_task_var(gops); |
1056 | ||
1a414428 | 1057 | /* trace it when it is-nested-in or is a function enabled. */ |
12117f33 SRV |
1058 | return !((*task_var & TRACE_GRAPH_FL) || |
1059 | ftrace_graph_addr(task_var, trace)) || | |
1a414428 SRRH |
1060 | (trace->depth < 0) || |
1061 | (fgraph_max_depth && trace->depth >= fgraph_max_depth); | |
1062 | } | |
1063 | ||
c132be2c SRV |
1064 | void fgraph_init_ops(struct ftrace_ops *dst_ops, |
1065 | struct ftrace_ops *src_ops); | |
1066 | ||
ea4e2bc4 | 1067 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ |
15e6cb36 | 1068 | static inline enum print_line_t |
d7a8d9e9 | 1069 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) |
15e6cb36 FW |
1070 | { |
1071 | return TRACE_TYPE_UNHANDLED; | |
1072 | } | |
26dda563 | 1073 | static inline void free_fgraph_ops(struct trace_array *tr) { } |
c132be2c SRV |
1074 | /* ftrace_ops may not be defined */ |
1075 | #define init_array_fgraph_ops(tr, ops) do { } while (0) | |
1076 | #define allocate_fgraph_ops(tr, ops) ({ 0; }) | |
ea4e2bc4 | 1077 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
15e6cb36 | 1078 | |
756d17ee | 1079 | extern struct list_head ftrace_pids; |
804a6851 | 1080 | |
1155de47 | 1081 | #ifdef CONFIG_FUNCTION_TRACER |
c58b6b03 JB |
1082 | |
1083 | #define FTRACE_PID_IGNORE -1 | |
1084 | #define FTRACE_PID_TRACE -2 | |
1085 | ||
92a68fa0 SRV |
1086 | struct ftrace_func_command { |
1087 | struct list_head list; | |
1088 | char *name; | |
04ec7bb6 SRV |
1089 | int (*func)(struct trace_array *tr, |
1090 | struct ftrace_hash *hash, | |
92a68fa0 SRV |
1091 | char *func, char *cmd, |
1092 | char *params, int enable); | |
1093 | }; | |
f1ed7c74 | 1094 | extern bool ftrace_filter_param __initdata; |
345ddcc8 | 1095 | static inline int ftrace_trace_task(struct trace_array *tr) |
804a6851 | 1096 | { |
c58b6b03 JB |
1097 | return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) != |
1098 | FTRACE_PID_IGNORE; | |
804a6851 | 1099 | } |
e0a413f6 | 1100 | extern int ftrace_is_dead(void); |
591dffda SRRH |
1101 | int ftrace_create_function_files(struct trace_array *tr, |
1102 | struct dentry *parent); | |
1103 | void ftrace_destroy_function_files(struct trace_array *tr); | |
4114fbfd MH |
1104 | int ftrace_allocate_ftrace_ops(struct trace_array *tr); |
1105 | void ftrace_free_ftrace_ops(struct trace_array *tr); | |
4104d326 SRRH |
1106 | void ftrace_init_global_array_ops(struct trace_array *tr); |
1107 | void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func); | |
1108 | void ftrace_reset_array_ops(struct trace_array *tr); | |
345ddcc8 | 1109 | void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer); |
501c2375 SRRH |
1110 | void ftrace_init_tracefs_toplevel(struct trace_array *tr, |
1111 | struct dentry *d_tracer); | |
d879d0b8 | 1112 | void ftrace_clear_pids(struct trace_array *tr); |
dbeafd0d | 1113 | int init_function_trace(void); |
1e10486f | 1114 | void ftrace_pid_follow_fork(struct trace_array *tr, bool enable); |
1155de47 | 1115 | #else |
345ddcc8 | 1116 | static inline int ftrace_trace_task(struct trace_array *tr) |
1155de47 PM |
1117 | { |
1118 | return 1; | |
1119 | } | |
e0a413f6 | 1120 | static inline int ftrace_is_dead(void) { return 0; } |
591dffda SRRH |
1121 | static inline int |
1122 | ftrace_create_function_files(struct trace_array *tr, | |
1123 | struct dentry *parent) | |
1124 | { | |
1125 | return 0; | |
1126 | } | |
4114fbfd MH |
1127 | static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr) |
1128 | { | |
1129 | return 0; | |
1130 | } | |
1131 | static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { } | |
591dffda | 1132 | static inline void ftrace_destroy_function_files(struct trace_array *tr) { } |
4104d326 SRRH |
1133 | static inline __init void |
1134 | ftrace_init_global_array_ops(struct trace_array *tr) { } | |
1135 | static inline void ftrace_reset_array_ops(struct trace_array *tr) { } | |
345ddcc8 | 1136 | static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } |
501c2375 | 1137 | static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } |
d879d0b8 | 1138 | static inline void ftrace_clear_pids(struct trace_array *tr) { } |
dbeafd0d | 1139 | static inline int init_function_trace(void) { return 0; } |
1e10486f | 1140 | static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { } |
4104d326 SRRH |
1141 | /* ftace_func_t type is not defined, use macro instead of static inline */ |
1142 | #define ftrace_init_array_ops(tr, func) do { } while (0) | |
591dffda SRRH |
1143 | #endif /* CONFIG_FUNCTION_TRACER */ |
1144 | ||
1145 | #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) | |
ec19b859 SRV |
1146 | |
1147 | struct ftrace_probe_ops { | |
1148 | void (*func)(unsigned long ip, | |
1149 | unsigned long parent_ip, | |
b5f081b5 | 1150 | struct trace_array *tr, |
bca6c8d0 | 1151 | struct ftrace_probe_ops *ops, |
6e444319 | 1152 | void *data); |
ec19b859 | 1153 | int (*init)(struct ftrace_probe_ops *ops, |
b5f081b5 | 1154 | struct trace_array *tr, |
6e444319 SRV |
1155 | unsigned long ip, void *init_data, |
1156 | void **data); | |
ec19b859 | 1157 | void (*free)(struct ftrace_probe_ops *ops, |
b5f081b5 | 1158 | struct trace_array *tr, |
6e444319 | 1159 | unsigned long ip, void *data); |
ec19b859 SRV |
1160 | int (*print)(struct seq_file *m, |
1161 | unsigned long ip, | |
1162 | struct ftrace_probe_ops *ops, | |
1163 | void *data); | |
1164 | }; | |
1165 | ||
41794f19 SRV |
1166 | struct ftrace_func_mapper; |
1167 | typedef int (*ftrace_mapper_func)(void *data); | |
1168 | ||
1169 | struct ftrace_func_mapper *allocate_ftrace_func_mapper(void); | |
1170 | void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, | |
1171 | unsigned long ip); | |
1172 | int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, | |
1173 | unsigned long ip, void *data); | |
1174 | void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, | |
1175 | unsigned long ip); | |
1176 | void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, | |
1177 | ftrace_mapper_func free_func); | |
1178 | ||
ec19b859 | 1179 | extern int |
04ec7bb6 SRV |
1180 | register_ftrace_function_probe(char *glob, struct trace_array *tr, |
1181 | struct ftrace_probe_ops *ops, void *data); | |
d3d532d7 | 1182 | extern int |
7b60f3d8 SRV |
1183 | unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, |
1184 | struct ftrace_probe_ops *ops); | |
a0e6369e | 1185 | extern void clear_ftrace_function_probes(struct trace_array *tr); |
ec19b859 | 1186 | |
92a68fa0 SRV |
1187 | int register_ftrace_command(struct ftrace_func_command *cmd); |
1188 | int unregister_ftrace_command(struct ftrace_func_command *cmd); | |
1189 | ||
591dffda SRRH |
1190 | void ftrace_create_filter_files(struct ftrace_ops *ops, |
1191 | struct dentry *parent); | |
1192 | void ftrace_destroy_filter_files(struct ftrace_ops *ops); | |
5c3469cb MH |
1193 | |
1194 | extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, | |
1195 | int len, int reset); | |
1196 | extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, | |
1197 | int len, int reset); | |
591dffda | 1198 | #else |
92a68fa0 SRV |
1199 | struct ftrace_func_command; |
1200 | ||
1201 | static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) | |
1202 | { | |
1203 | return -EINVAL; | |
1204 | } | |
1205 | static inline __init int unregister_ftrace_command(char *cmd_name) | |
1206 | { | |
1207 | return -EINVAL; | |
1208 | } | |
8a49f3e0 SRV |
1209 | static inline void clear_ftrace_function_probes(struct trace_array *tr) |
1210 | { | |
1211 | } | |
1212 | ||
591dffda SRRH |
1213 | /* |
1214 | * The ops parameter passed in is usually undefined. | |
1215 | * This must be a macro. | |
1216 | */ | |
1217 | #define ftrace_create_filter_files(ops, parent) do { } while (0) | |
1218 | #define ftrace_destroy_filter_files(ops) do { } while (0) | |
1219 | #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ | |
804a6851 | 1220 | |
c6650b2e | 1221 | bool ftrace_event_is_function(struct trace_event_call *call); |
ced39002 | 1222 | |
b63f39ea | 1223 | /* |
1224 | * struct trace_parser - servers for reading the user input separated by spaces | |
1225 | * @cont: set if the input is not complete - no final space char was found | |
1226 | * @buffer: holds the parsed user input | |
1537a363 | 1227 | * @idx: user input length |
b63f39ea | 1228 | * @size: buffer size |
1229 | */ | |
1230 | struct trace_parser { | |
1231 | bool cont; | |
1232 | char *buffer; | |
1233 | unsigned idx; | |
1234 | unsigned size; | |
1235 | }; | |
1236 | ||
1237 | static inline bool trace_parser_loaded(struct trace_parser *parser) | |
1238 | { | |
1239 | return (parser->idx != 0); | |
1240 | } | |
1241 | ||
1242 | static inline bool trace_parser_cont(struct trace_parser *parser) | |
1243 | { | |
1244 | return parser->cont; | |
1245 | } | |
1246 | ||
1247 | static inline void trace_parser_clear(struct trace_parser *parser) | |
1248 | { | |
1249 | parser->cont = false; | |
1250 | parser->idx = 0; | |
1251 | } | |
1252 | ||
1253 | extern int trace_parser_get_init(struct trace_parser *parser, int size); | |
1254 | extern void trace_parser_put(struct trace_parser *parser); | |
1255 | extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, | |
1256 | size_t cnt, loff_t *ppos); | |
1257 | ||
729358da SRRH |
1258 | /* |
1259 | * Only create function graph options if function graph is configured. | |
1260 | */ | |
1261 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
1262 | # define FGRAPH_FLAGS \ | |
729358da | 1263 | C(DISPLAY_GRAPH, "display-graph"), |
729358da SRRH |
1264 | #else |
1265 | # define FGRAPH_FLAGS | |
729358da SRRH |
1266 | #endif |
1267 | ||
4ee4301c SRRH |
1268 | #ifdef CONFIG_BRANCH_TRACER |
1269 | # define BRANCH_FLAGS \ | |
1270 | C(BRANCH, "branch"), | |
1271 | #else | |
1272 | # define BRANCH_FLAGS | |
1273 | #endif | |
1274 | ||
8179e8a1 SRRH |
1275 | #ifdef CONFIG_FUNCTION_TRACER |
1276 | # define FUNCTION_FLAGS \ | |
1e10486f NK |
1277 | C(FUNCTION, "function-trace"), \ |
1278 | C(FUNC_FORK, "function-fork"), | |
8179e8a1 SRRH |
1279 | # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION |
1280 | #else | |
1281 | # define FUNCTION_FLAGS | |
1282 | # define FUNCTION_DEFAULT_FLAGS 0UL | |
1e10486f | 1283 | # define TRACE_ITER_FUNC_FORK 0UL |
8179e8a1 SRRH |
1284 | #endif |
1285 | ||
73dddbb5 SRRH |
1286 | #ifdef CONFIG_STACKTRACE |
1287 | # define STACK_FLAGS \ | |
1288 | C(STACKTRACE, "stacktrace"), | |
1289 | #else | |
1290 | # define STACK_FLAGS | |
1291 | #endif | |
1292 | ||
4fcdae83 SR |
1293 | /* |
1294 | * trace_iterator_flags is an enumeration that defines bit | |
1295 | * positions into trace_flags that controls the output. | |
1296 | * | |
1297 | * NOTE: These bits must match the trace_options array in | |
a3418a36 | 1298 | * trace.c (this macro guarantees it). |
4fcdae83 | 1299 | */ |
a3418a36 SRRH |
1300 | #define TRACE_FLAGS \ |
1301 | C(PRINT_PARENT, "print-parent"), \ | |
1302 | C(SYM_OFFSET, "sym-offset"), \ | |
1303 | C(SYM_ADDR, "sym-addr"), \ | |
1304 | C(VERBOSE, "verbose"), \ | |
1305 | C(RAW, "raw"), \ | |
1306 | C(HEX, "hex"), \ | |
1307 | C(BIN, "bin"), \ | |
1308 | C(BLOCK, "block"), \ | |
80a76994 | 1309 | C(FIELDS, "fields"), \ |
a3418a36 | 1310 | C(PRINTK, "trace_printk"), \ |
a3418a36 SRRH |
1311 | C(ANNOTATE, "annotate"), \ |
1312 | C(USERSTACKTRACE, "userstacktrace"), \ | |
1313 | C(SYM_USEROBJ, "sym-userobj"), \ | |
1314 | C(PRINTK_MSGONLY, "printk-msg-only"), \ | |
1315 | C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \ | |
1316 | C(LATENCY_FMT, "latency-format"), \ | |
a3418a36 | 1317 | C(RECORD_CMD, "record-cmd"), \ |
d914ba37 | 1318 | C(RECORD_TGID, "record-tgid"), \ |
a3418a36 SRRH |
1319 | C(OVERWRITE, "overwrite"), \ |
1320 | C(STOP_ON_FREE, "disable_on_free"), \ | |
1321 | C(IRQ_INFO, "irq-info"), \ | |
1322 | C(MARKERS, "markers"), \ | |
c37775d5 | 1323 | C(EVENT_FORK, "event-fork"), \ |
ef2bd81d | 1324 | C(TRACE_PRINTK, "trace_printk_dest"), \ |
06e0a548 | 1325 | C(PAUSE_ON_TRACE, "pause-on-trace"), \ |
a345a671 | 1326 | C(HASH_PTR, "hash-ptr"), /* Print hashed pointer */ \ |
8179e8a1 | 1327 | FUNCTION_FLAGS \ |
4ee4301c | 1328 | FGRAPH_FLAGS \ |
73dddbb5 | 1329 | STACK_FLAGS \ |
4ee4301c | 1330 | BRANCH_FLAGS |
ce3fed62 | 1331 | |
a3418a36 SRRH |
1332 | /* |
1333 | * By defining C, we can make TRACE_FLAGS a list of bit names | |
1334 | * that will define the bits for the flag masks. | |
1335 | */ | |
1336 | #undef C | |
1337 | #define C(a, b) TRACE_ITER_##a##_BIT | |
1338 | ||
b5e87c05 SRRH |
1339 | enum trace_iterator_bits { |
1340 | TRACE_FLAGS | |
1341 | /* Make sure we don't go more than we have bits for */ | |
1342 | TRACE_ITER_LAST_BIT | |
1343 | }; | |
a3418a36 SRRH |
1344 | |
1345 | /* | |
1346 | * By redefining C, we can make TRACE_FLAGS a list of masks that | |
1347 | * use the bits as defined above. | |
1348 | */ | |
1349 | #undef C | |
1350 | #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT) | |
1351 | ||
1352 | enum trace_iterator_flags { TRACE_FLAGS }; | |
4e655519 | 1353 | |
15e6cb36 FW |
1354 | /* |
1355 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that | |
1356 | * control the output of kernel symbols. | |
1357 | */ | |
1358 | #define TRACE_ITER_SYM_MASK \ | |
1359 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) | |
1360 | ||
43a15386 FW |
1361 | extern struct tracer nop_trace; |
1362 | ||
2ed84eeb | 1363 | #ifdef CONFIG_BRANCH_TRACER |
9f029e83 SR |
1364 | extern int enable_branch_tracing(struct trace_array *tr); |
1365 | extern void disable_branch_tracing(void); | |
1366 | static inline int trace_branch_enable(struct trace_array *tr) | |
52f232cb | 1367 | { |
983f938a | 1368 | if (tr->trace_flags & TRACE_ITER_BRANCH) |
9f029e83 | 1369 | return enable_branch_tracing(tr); |
52f232cb SR |
1370 | return 0; |
1371 | } | |
9f029e83 | 1372 | static inline void trace_branch_disable(void) |
52f232cb SR |
1373 | { |
1374 | /* due to races, always disable */ | |
9f029e83 | 1375 | disable_branch_tracing(); |
52f232cb SR |
1376 | } |
1377 | #else | |
9f029e83 | 1378 | static inline int trace_branch_enable(struct trace_array *tr) |
52f232cb SR |
1379 | { |
1380 | return 0; | |
1381 | } | |
9f029e83 | 1382 | static inline void trace_branch_disable(void) |
52f232cb SR |
1383 | { |
1384 | } | |
2ed84eeb | 1385 | #endif /* CONFIG_BRANCH_TRACER */ |
52f232cb | 1386 | |
1852fcce | 1387 | /* set ring buffers to default size if not already done so */ |
a1f157c7 | 1388 | int tracing_update_buffers(struct trace_array *tr); |
1852fcce | 1389 | |
ddeea494 SS |
1390 | union trace_synth_field { |
1391 | u8 as_u8; | |
1392 | u16 as_u16; | |
1393 | u32 as_u32; | |
1394 | u64 as_u64; | |
1395 | struct trace_dynamic_info as_dynamic; | |
1396 | }; | |
1397 | ||
cf027f64 TZ |
1398 | struct ftrace_event_field { |
1399 | struct list_head link; | |
92edca07 SR |
1400 | const char *name; |
1401 | const char *type; | |
aa38e9fc | 1402 | int filter_type; |
cf027f64 TZ |
1403 | int offset; |
1404 | int size; | |
a118e4d1 | 1405 | int is_signed; |
b6c7abd1 | 1406 | int len; |
cf027f64 TZ |
1407 | }; |
1408 | ||
80765597 SRV |
1409 | struct prog_entry; |
1410 | ||
30e673b2 | 1411 | struct event_filter { |
80765597 SRV |
1412 | struct prog_entry __rcu *prog; |
1413 | char *filter_string; | |
30e673b2 TZ |
1414 | }; |
1415 | ||
cfb180f3 TZ |
1416 | struct event_subsystem { |
1417 | struct list_head list; | |
1418 | const char *name; | |
1f9963cb | 1419 | struct event_filter *filter; |
e9dbfae5 | 1420 | int ref_count; |
cfb180f3 TZ |
1421 | }; |
1422 | ||
7967b3e0 | 1423 | struct trace_subsystem_dir { |
ae63b31e SR |
1424 | struct list_head list; |
1425 | struct event_subsystem *subsystem; | |
1426 | struct trace_array *tr; | |
5790b1fb | 1427 | struct eventfs_inode *ei; |
ae63b31e SR |
1428 | int ref_count; |
1429 | int nr_events; | |
1430 | }; | |
1431 | ||
65da9a0a | 1432 | extern int call_filter_check_discard(struct trace_event_call *call, void *rec, |
13292494 | 1433 | struct trace_buffer *buffer, |
65da9a0a | 1434 | struct ring_buffer_event *event); |
fa66ddb8 | 1435 | |
fa66ddb8 | 1436 | void trace_buffer_unlock_commit_regs(struct trace_array *tr, |
13292494 | 1437 | struct trace_buffer *buffer, |
fa66ddb8 | 1438 | struct ring_buffer_event *event, |
36590c50 | 1439 | unsigned int trcace_ctx, |
fa66ddb8 | 1440 | struct pt_regs *regs); |
33fddff2 SRRH |
1441 | |
1442 | static inline void trace_buffer_unlock_commit(struct trace_array *tr, | |
13292494 | 1443 | struct trace_buffer *buffer, |
33fddff2 | 1444 | struct ring_buffer_event *event, |
36590c50 | 1445 | unsigned int trace_ctx) |
33fddff2 | 1446 | { |
36590c50 | 1447 | trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL); |
33fddff2 SRRH |
1448 | } |
1449 | ||
2cc621fd SRG |
1450 | DECLARE_PER_CPU(bool, trace_taskinfo_save); |
1451 | int trace_save_cmdline(struct task_struct *tsk); | |
1452 | int trace_create_savedcmd(void); | |
1453 | int trace_alloc_tgid_map(void); | |
1454 | void trace_free_saved_cmdlines_buffer(void); | |
1455 | ||
1456 | extern const struct file_operations tracing_saved_cmdlines_fops; | |
1457 | extern const struct file_operations tracing_saved_tgids_fops; | |
1458 | extern const struct file_operations tracing_saved_cmdlines_size_fops; | |
1459 | ||
0fc1b09f SRRH |
1460 | DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); |
1461 | DECLARE_PER_CPU(int, trace_buffered_event_cnt); | |
1462 | void trace_buffered_event_disable(void); | |
1463 | void trace_buffered_event_enable(void); | |
1464 | ||
c4846480 SRG |
1465 | void early_enable_events(struct trace_array *tr, char *buf, bool disable_first); |
1466 | ||
0fc1b09f | 1467 | static inline void |
13292494 | 1468 | __trace_event_discard_commit(struct trace_buffer *buffer, |
0fc1b09f SRRH |
1469 | struct ring_buffer_event *event) |
1470 | { | |
1471 | if (this_cpu_read(trace_buffered_event) == event) { | |
6c536d76 | 1472 | /* Simply release the temp buffer and enable preemption */ |
0fc1b09f | 1473 | this_cpu_dec(trace_buffered_event_cnt); |
6c536d76 | 1474 | preempt_enable_notrace(); |
0fc1b09f SRRH |
1475 | return; |
1476 | } | |
6c536d76 | 1477 | /* ring_buffer_discard_commit() enables preemption */ |
0fc1b09f SRRH |
1478 | ring_buffer_discard_commit(buffer, event); |
1479 | } | |
1480 | ||
dad56ee7 SRRH |
1481 | /* |
1482 | * Helper function for event_trigger_unlock_commit{_regs}(). | |
1483 | * If there are event triggers attached to this event that requires | |
499f7bb0 | 1484 | * filtering against its fields, then they will be called as the |
dad56ee7 SRRH |
1485 | * entry already holds the field information of the current event. |
1486 | * | |
1487 | * It also checks if the event should be discarded or not. | |
1488 | * It is to be discarded if the event is soft disabled and the | |
1489 | * event was only recorded to process triggers, or if the event | |
1490 | * filter is active and this event did not match the filters. | |
1491 | * | |
1492 | * Returns true if the event is discarded, false otherwise. | |
1493 | */ | |
1494 | static inline bool | |
1495 | __event_trigger_test_discard(struct trace_event_file *file, | |
13292494 | 1496 | struct trace_buffer *buffer, |
dad56ee7 SRRH |
1497 | struct ring_buffer_event *event, |
1498 | void *entry, | |
1499 | enum event_trigger_type *tt) | |
1500 | { | |
1501 | unsigned long eflags = file->flags; | |
1502 | ||
1503 | if (eflags & EVENT_FILE_FL_TRIGGER_COND) | |
b47e3302 | 1504 | *tt = event_triggers_call(file, buffer, entry, event); |
dad56ee7 | 1505 | |
a55f224f SRV |
1506 | if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED | |
1507 | EVENT_FILE_FL_FILTERED | | |
1508 | EVENT_FILE_FL_PID_FILTER)))) | |
1509 | return false; | |
1510 | ||
1511 | if (file->flags & EVENT_FILE_FL_SOFT_DISABLED) | |
1512 | goto discard; | |
1513 | ||
1514 | if (file->flags & EVENT_FILE_FL_FILTERED && | |
1515 | !filter_match_preds(file->filter, entry)) | |
1516 | goto discard; | |
1517 | ||
1518 | if ((file->flags & EVENT_FILE_FL_PID_FILTER) && | |
1519 | trace_event_ignore_this_pid(file)) | |
1520 | goto discard; | |
dad56ee7 | 1521 | |
9cbb1506 | 1522 | return false; |
a55f224f SRV |
1523 | discard: |
1524 | __trace_event_discard_commit(buffer, event); | |
1525 | return true; | |
dad56ee7 SRRH |
1526 | } |
1527 | ||
1528 | /** | |
1529 | * event_trigger_unlock_commit - handle triggers and finish event commit | |
f2cc020d | 1530 | * @file: The file pointer associated with the event |
dad56ee7 SRRH |
1531 | * @buffer: The ring buffer that the event is being written to |
1532 | * @event: The event meta data in the ring buffer | |
1533 | * @entry: The event itself | |
36590c50 | 1534 | * @trace_ctx: The tracing context flags. |
dad56ee7 SRRH |
1535 | * |
1536 | * This is a helper function to handle triggers that require data | |
1537 | * from the event itself. It also tests the event against filters and | |
1538 | * if the event is soft disabled and should be discarded. | |
1539 | */ | |
1540 | static inline void | |
1541 | event_trigger_unlock_commit(struct trace_event_file *file, | |
13292494 | 1542 | struct trace_buffer *buffer, |
dad56ee7 | 1543 | struct ring_buffer_event *event, |
36590c50 | 1544 | void *entry, unsigned int trace_ctx) |
dad56ee7 SRRH |
1545 | { |
1546 | enum event_trigger_type tt = ETT_NONE; | |
1547 | ||
1548 | if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) | |
36590c50 | 1549 | trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx); |
dad56ee7 SRRH |
1550 | |
1551 | if (tt) | |
c94e45bc | 1552 | event_triggers_post_call(file, tt); |
dad56ee7 SRRH |
1553 | } |
1554 | ||
61e9dea2 SR |
1555 | #define FILTER_PRED_INVALID ((unsigned short)-1) |
1556 | #define FILTER_PRED_IS_RIGHT (1 << 15) | |
43cd4145 | 1557 | #define FILTER_PRED_FOLD (1 << 15) |
61e9dea2 | 1558 | |
bf93f9ed SR |
1559 | /* |
1560 | * The max preds is the size of unsigned short with | |
1561 | * two flags at the MSBs. One bit is used for both the IS_RIGHT | |
1562 | * and FOLD flags. The other is reserved. | |
1563 | * | |
1564 | * 2^14 preds is way more than enough. | |
1565 | */ | |
1566 | #define MAX_FILTER_PRED 16384 | |
4a3d27e9 | 1567 | |
7ce7e424 | 1568 | struct filter_pred; |
1889d209 | 1569 | struct regex; |
7ce7e424 | 1570 | |
1889d209 FW |
1571 | typedef int (*regex_match_func)(char *str, struct regex *r, int len); |
1572 | ||
3f6fe06d | 1573 | enum regex_type { |
b0f1a59a | 1574 | MATCH_FULL = 0, |
3f6fe06d FW |
1575 | MATCH_FRONT_ONLY, |
1576 | MATCH_MIDDLE_ONLY, | |
1577 | MATCH_END_ONLY, | |
60f1d5e3 | 1578 | MATCH_GLOB, |
f79b3f33 | 1579 | MATCH_INDEX, |
3f6fe06d FW |
1580 | }; |
1581 | ||
1889d209 FW |
1582 | struct regex { |
1583 | char pattern[MAX_FILTER_STR_VAL]; | |
1584 | int len; | |
1585 | int field_len; | |
1586 | regex_match_func match; | |
1587 | }; | |
1588 | ||
4ef56902 TZ |
1589 | static inline bool is_string_field(struct ftrace_event_field *field) |
1590 | { | |
1591 | return field->filter_type == FILTER_DYN_STRING || | |
05770dd0 | 1592 | field->filter_type == FILTER_RDYN_STRING || |
4ef56902 | 1593 | field->filter_type == FILTER_STATIC_STRING || |
4c738413 SRV |
1594 | field->filter_type == FILTER_PTR_STRING || |
1595 | field->filter_type == FILTER_COMM; | |
4ef56902 TZ |
1596 | } |
1597 | ||
1598 | static inline bool is_function_field(struct ftrace_event_field *field) | |
1599 | { | |
1600 | return field->filter_type == FILTER_TRACE_FN; | |
1601 | } | |
1602 | ||
3f6fe06d FW |
1603 | extern enum regex_type |
1604 | filter_parse_regex(char *buff, int len, char **search, int *not); | |
7f1d2f82 | 1605 | extern void print_event_filter(struct trace_event_file *file, |
4bda2d51 | 1606 | struct trace_seq *s); |
7f1d2f82 | 1607 | extern int apply_event_filter(struct trace_event_file *file, |
8b372562 | 1608 | char *filter_string); |
7967b3e0 | 1609 | extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, |
8b372562 TZ |
1610 | char *filter_string); |
1611 | extern void print_subsystem_event_filter(struct event_subsystem *system, | |
ac1adc55 | 1612 | struct trace_seq *s); |
aa38e9fc | 1613 | extern int filter_assign_type(const char *type); |
1e144d73 SRV |
1614 | extern int create_event_filter(struct trace_array *tr, |
1615 | struct trace_event_call *call, | |
bac5fb97 TZ |
1616 | char *filter_str, bool set_str, |
1617 | struct event_filter **filterp); | |
1618 | extern void free_event_filter(struct event_filter *filter); | |
7ce7e424 | 1619 | |
b3a8c6fd | 1620 | struct ftrace_event_field * |
2425bcb9 | 1621 | trace_find_event_field(struct trace_event_call *call, char *name); |
2e33af02 | 1622 | |
e870e9a1 | 1623 | extern void trace_event_enable_cmd_record(bool enable); |
d914ba37 JF |
1624 | extern void trace_event_enable_tgid_record(bool enable); |
1625 | ||
58b92547 | 1626 | extern int event_trace_init(void); |
3bb06eb6 | 1627 | extern int init_events(void); |
277ba044 | 1628 | extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); |
0c8916c3 | 1629 | extern int event_trace_del_tracer(struct trace_array *tr); |
720dee53 | 1630 | extern void __trace_early_add_events(struct trace_array *tr); |
e870e9a1 | 1631 | |
3c96529c SRV |
1632 | extern struct trace_event_file *__find_event_file(struct trace_array *tr, |
1633 | const char *system, | |
1634 | const char *event); | |
7f1d2f82 SRRH |
1635 | extern struct trace_event_file *find_event_file(struct trace_array *tr, |
1636 | const char *system, | |
1637 | const char *event); | |
7862ad18 | 1638 | |
85f2b082 TZ |
1639 | static inline void *event_file_data(struct file *filp) |
1640 | { | |
6aa7de05 | 1641 | return READ_ONCE(file_inode(filp)->i_private); |
85f2b082 TZ |
1642 | } |
1643 | ||
20c8928a | 1644 | extern struct mutex event_mutex; |
a59fd602 | 1645 | extern struct list_head ftrace_events; |
ac199db0 | 1646 | |
b1560408 SR |
1647 | /* |
1648 | * When the trace_event_file is the filp->i_private pointer, | |
1649 | * it must be taken under the event_mutex lock, and then checked | |
1650 | * if the EVENT_FILE_FL_FREED flag is set. If it is, then the | |
1651 | * data pointed to by the trace_event_file can not be trusted. | |
1652 | * | |
1653 | * Use the event_file_file() to access the trace_event_file from | |
1654 | * the filp the first time under the event_mutex and check for | |
1655 | * NULL. If it is needed to be retrieved again and the event_mutex | |
1656 | * is still held, then the event_file_data() can be used and it | |
1657 | * is guaranteed to be valid. | |
1658 | */ | |
1659 | static inline struct trace_event_file *event_file_file(struct file *filp) | |
1660 | { | |
1661 | struct trace_event_file *file; | |
1662 | ||
1663 | lockdep_assert_held(&event_mutex); | |
1664 | file = READ_ONCE(file_inode(filp)->i_private); | |
1665 | if (!file || file->flags & EVENT_FILE_FL_FREED) | |
1666 | return NULL; | |
1667 | return file; | |
1668 | } | |
1669 | ||
85f2b082 | 1670 | extern const struct file_operations event_trigger_fops; |
7ef224d1 | 1671 | extern const struct file_operations event_hist_fops; |
2d19bd79 | 1672 | extern const struct file_operations event_hist_debug_fops; |
6c3edaf9 | 1673 | extern const struct file_operations event_inject_fops; |
7ef224d1 TZ |
1674 | |
1675 | #ifdef CONFIG_HIST_TRIGGERS | |
1676 | extern int register_trigger_hist_cmd(void); | |
d0bad49b | 1677 | extern int register_trigger_hist_enable_disable_cmds(void); |
7ef224d1 TZ |
1678 | #else |
1679 | static inline int register_trigger_hist_cmd(void) { return 0; } | |
d0bad49b | 1680 | static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; } |
7ef224d1 | 1681 | #endif |
85f2b082 TZ |
1682 | |
1683 | extern int register_trigger_cmds(void); | |
1684 | extern void clear_event_triggers(struct trace_array *tr); | |
1685 | ||
7491e2c4 TSV |
1686 | enum { |
1687 | EVENT_TRIGGER_FL_PROBE = BIT(0), | |
1688 | }; | |
1689 | ||
85f2b082 TZ |
1690 | struct event_trigger_data { |
1691 | unsigned long count; | |
1692 | int ref; | |
7491e2c4 | 1693 | int flags; |
85f2b082 TZ |
1694 | struct event_trigger_ops *ops; |
1695 | struct event_command *cmd_ops; | |
d8a30f20 | 1696 | struct event_filter __rcu *filter; |
85f2b082 TZ |
1697 | char *filter_str; |
1698 | void *private_data; | |
104f2810 | 1699 | bool paused; |
db1388b4 | 1700 | bool paused_tmp; |
85f2b082 | 1701 | struct list_head list; |
db1388b4 TZ |
1702 | char *name; |
1703 | struct list_head named_list; | |
1704 | struct event_trigger_data *named_data; | |
85f2b082 TZ |
1705 | }; |
1706 | ||
d0bad49b TZ |
1707 | /* Avoid typos */ |
1708 | #define ENABLE_EVENT_STR "enable_event" | |
1709 | #define DISABLE_EVENT_STR "disable_event" | |
1710 | #define ENABLE_HIST_STR "enable_hist" | |
1711 | #define DISABLE_HIST_STR "disable_hist" | |
1712 | ||
1713 | struct enable_trigger_data { | |
1714 | struct trace_event_file *file; | |
1715 | bool enable; | |
1716 | bool hist; | |
1717 | }; | |
1718 | ||
1719 | extern int event_enable_trigger_print(struct seq_file *m, | |
d0bad49b | 1720 | struct event_trigger_data *data); |
47670541 | 1721 | extern void event_enable_trigger_free(struct event_trigger_data *data); |
9ec5a7d1 TZ |
1722 | extern int event_enable_trigger_parse(struct event_command *cmd_ops, |
1723 | struct trace_event_file *file, | |
e1f187d0 TZ |
1724 | char *glob, char *cmd, |
1725 | char *param_and_filter); | |
d0bad49b | 1726 | extern int event_enable_register_trigger(char *glob, |
d0bad49b TZ |
1727 | struct event_trigger_data *data, |
1728 | struct trace_event_file *file); | |
1729 | extern void event_enable_unregister_trigger(char *glob, | |
d0bad49b TZ |
1730 | struct event_trigger_data *test, |
1731 | struct trace_event_file *file); | |
ab4bf008 | 1732 | extern void trigger_data_free(struct event_trigger_data *data); |
47670541 | 1733 | extern int event_trigger_init(struct event_trigger_data *data); |
ab4bf008 TZ |
1734 | extern int trace_event_trigger_enable_disable(struct trace_event_file *file, |
1735 | int trigger_enable); | |
1736 | extern void update_cond_flag(struct trace_event_file *file); | |
ab4bf008 TZ |
1737 | extern int set_trigger_filter(char *filter_str, |
1738 | struct event_trigger_data *trigger_data, | |
1739 | struct trace_event_file *file); | |
db1388b4 TZ |
1740 | extern struct event_trigger_data *find_named_trigger(const char *name); |
1741 | extern bool is_named_trigger(struct event_trigger_data *test); | |
1742 | extern int save_named_trigger(const char *name, | |
1743 | struct event_trigger_data *data); | |
1744 | extern void del_named_trigger(struct event_trigger_data *data); | |
1745 | extern void pause_named_trigger(struct event_trigger_data *data); | |
1746 | extern void unpause_named_trigger(struct event_trigger_data *data); | |
1747 | extern void set_named_trigger_data(struct event_trigger_data *data, | |
1748 | struct event_trigger_data *named_data); | |
067fe038 TZ |
1749 | extern struct event_trigger_data * |
1750 | get_named_trigger_data(struct event_trigger_data *data); | |
ab4bf008 | 1751 | extern int register_event_command(struct event_command *cmd); |
d0bad49b TZ |
1752 | extern int unregister_event_command(struct event_command *cmd); |
1753 | extern int register_trigger_hist_enable_disable_cmds(void); | |
86599dbe TZ |
1754 | extern bool event_trigger_check_remove(const char *glob); |
1755 | extern bool event_trigger_empty_param(const char *param); | |
1756 | extern int event_trigger_separate_filter(char *param_and_filter, char **param, | |
1757 | char **filter, bool param_required); | |
1758 | extern struct event_trigger_data * | |
1759 | event_trigger_alloc(struct event_command *cmd_ops, | |
1760 | char *cmd, | |
1761 | char *param, | |
1762 | void *private_data); | |
1763 | extern int event_trigger_parse_num(char *trigger, | |
1764 | struct event_trigger_data *trigger_data); | |
1765 | extern int event_trigger_set_filter(struct event_command *cmd_ops, | |
1766 | struct trace_event_file *file, | |
1767 | char *param, | |
1768 | struct event_trigger_data *trigger_data); | |
1769 | extern void event_trigger_reset_filter(struct event_command *cmd_ops, | |
1770 | struct event_trigger_data *trigger_data); | |
1771 | extern int event_trigger_register(struct event_command *cmd_ops, | |
1772 | struct trace_event_file *file, | |
1773 | char *glob, | |
b8cc44a4 TZ |
1774 | struct event_trigger_data *trigger_data); |
1775 | extern void event_trigger_unregister(struct event_command *cmd_ops, | |
1776 | struct trace_event_file *file, | |
1777 | char *glob, | |
1778 | struct event_trigger_data *trigger_data); | |
ab4bf008 | 1779 | |
bb32500f SRG |
1780 | extern void event_file_get(struct trace_event_file *file); |
1781 | extern void event_file_put(struct trace_event_file *file); | |
1782 | ||
85f2b082 TZ |
1783 | /** |
1784 | * struct event_trigger_ops - callbacks for trace event triggers | |
1785 | * | |
1786 | * The methods in this structure provide per-event trigger hooks for | |
1787 | * various trigger operations. | |
1788 | * | |
fb339e53 TZ |
1789 | * The @init and @free methods are used during trigger setup and |
1790 | * teardown, typically called from an event_command's @parse() | |
1791 | * function implementation. | |
1792 | * | |
1793 | * The @print method is used to print the trigger spec. | |
1794 | * | |
1795 | * The @trigger method is the function that actually implements the | |
1796 | * trigger and is called in the context of the triggering event | |
1797 | * whenever that event occurs. | |
1798 | * | |
85f2b082 TZ |
1799 | * All the methods below, except for @init() and @free(), must be |
1800 | * implemented. | |
1801 | * | |
fb339e53 | 1802 | * @trigger: The trigger 'probe' function called when the triggering |
85f2b082 TZ |
1803 | * event occurs. The data passed into this callback is the data |
1804 | * that was supplied to the event_command @reg() function that | |
c4a59230 TZ |
1805 | * registered the trigger (see struct event_command) along with |
1806 | * the trace record, rec. | |
85f2b082 TZ |
1807 | * |
1808 | * @init: An optional initialization function called for the trigger | |
1809 | * when the trigger is registered (via the event_command reg() | |
1810 | * function). This can be used to perform per-trigger | |
1811 | * initialization such as incrementing a per-trigger reference | |
1812 | * count, for instance. This is usually implemented by the | |
1813 | * generic utility function @event_trigger_init() (see | |
1814 | * trace_event_triggers.c). | |
1815 | * | |
1816 | * @free: An optional de-initialization function called for the | |
1817 | * trigger when the trigger is unregistered (via the | |
1818 | * event_command @reg() function). This can be used to perform | |
1819 | * per-trigger de-initialization such as decrementing a | |
1820 | * per-trigger reference count and freeing corresponding trigger | |
1821 | * data, for instance. This is usually implemented by the | |
1822 | * generic utility function @event_trigger_free() (see | |
1823 | * trace_event_triggers.c). | |
1824 | * | |
1825 | * @print: The callback function invoked to have the trigger print | |
1826 | * itself. This is usually implemented by a wrapper function | |
1827 | * that calls the generic utility function @event_trigger_print() | |
1828 | * (see trace_event_triggers.c). | |
1829 | */ | |
1830 | struct event_trigger_ops { | |
fb339e53 TZ |
1831 | void (*trigger)(struct event_trigger_data *data, |
1832 | struct trace_buffer *buffer, | |
1833 | void *rec, | |
1834 | struct ring_buffer_event *rbe); | |
47670541 TZ |
1835 | int (*init)(struct event_trigger_data *data); |
1836 | void (*free)(struct event_trigger_data *data); | |
85f2b082 | 1837 | int (*print)(struct seq_file *m, |
85f2b082 TZ |
1838 | struct event_trigger_data *data); |
1839 | }; | |
1840 | ||
1841 | /** | |
1842 | * struct event_command - callbacks and data members for event commands | |
1843 | * | |
1844 | * Event commands are invoked by users by writing the command name | |
1845 | * into the 'trigger' file associated with a trace event. The | |
1846 | * parameters associated with a specific invocation of an event | |
1847 | * command are used to create an event trigger instance, which is | |
1848 | * added to the list of trigger instances associated with that trace | |
1849 | * event. When the event is hit, the set of triggers associated with | |
1850 | * that event is invoked. | |
1851 | * | |
1852 | * The data members in this structure provide per-event command data | |
1853 | * for various event commands. | |
1854 | * | |
1855 | * All the data members below, except for @post_trigger, must be set | |
1856 | * for each event command. | |
1857 | * | |
1858 | * @name: The unique name that identifies the event command. This is | |
1859 | * the name used when setting triggers via trigger files. | |
1860 | * | |
1861 | * @trigger_type: A unique id that identifies the event command | |
1862 | * 'type'. This value has two purposes, the first to ensure that | |
1863 | * only one trigger of the same type can be set at a given time | |
1864 | * for a particular event e.g. it doesn't make sense to have both | |
1865 | * a traceon and traceoff trigger attached to a single event at | |
1866 | * the same time, so traceon and traceoff have the same type | |
1867 | * though they have different names. The @trigger_type value is | |
1868 | * also used as a bit value for deferring the actual trigger | |
1869 | * action until after the current event is finished. Some | |
1870 | * commands need to do this if they themselves log to the trace | |
1871 | * buffer (see the @post_trigger() member below). @trigger_type | |
1872 | * values are defined by adding new values to the trigger_type | |
af658dca | 1873 | * enum in include/linux/trace_events.h. |
85f2b082 | 1874 | * |
353206f5 | 1875 | * @flags: See the enum event_command_flags below. |
a5863dae | 1876 | * |
a88e1cfb TZ |
1877 | * All the methods below, except for @set_filter() and @unreg_all(), |
1878 | * must be implemented. | |
85f2b082 | 1879 | * |
9ec5a7d1 | 1880 | * @parse: The callback function responsible for parsing and |
85f2b082 TZ |
1881 | * registering the trigger written to the 'trigger' file by the |
1882 | * user. It allocates the trigger instance and registers it with | |
1883 | * the appropriate trace event. It makes use of the other | |
1884 | * event_command callback functions to orchestrate this, and is | |
1885 | * usually implemented by the generic utility function | |
1886 | * @event_trigger_callback() (see trace_event_triggers.c). | |
1887 | * | |
1888 | * @reg: Adds the trigger to the list of triggers associated with the | |
1889 | * event, and enables the event trigger itself, after | |
1890 | * initializing it (via the event_trigger_ops @init() function). | |
1891 | * This is also where commands can use the @trigger_type value to | |
1892 | * make the decision as to whether or not multiple instances of | |
1893 | * the trigger should be allowed. This is usually implemented by | |
1894 | * the generic utility function @register_trigger() (see | |
1895 | * trace_event_triggers.c). | |
1896 | * | |
1897 | * @unreg: Removes the trigger from the list of triggers associated | |
1898 | * with the event, and disables the event trigger itself, after | |
1899 | * initializing it (via the event_trigger_ops @free() function). | |
1900 | * This is usually implemented by the generic utility function | |
1901 | * @unregister_trigger() (see trace_event_triggers.c). | |
1902 | * | |
a88e1cfb TZ |
1903 | * @unreg_all: An optional function called to remove all the triggers |
1904 | * from the list of triggers associated with the event. Called | |
1905 | * when a trigger file is opened in truncate mode. | |
1906 | * | |
85f2b082 TZ |
1907 | * @set_filter: An optional function called to parse and set a filter |
1908 | * for the trigger. If no @set_filter() method is set for the | |
1909 | * event command, filters set by the user for the command will be | |
1910 | * ignored. This is usually implemented by the generic utility | |
1911 | * function @set_trigger_filter() (see trace_event_triggers.c). | |
1912 | * | |
1913 | * @get_trigger_ops: The callback function invoked to retrieve the | |
1914 | * event_trigger_ops implementation associated with the command. | |
9ec5a7d1 TZ |
1915 | * This callback function allows a single event_command to |
1916 | * support multiple trigger implementations via different sets of | |
1917 | * event_trigger_ops, depending on the value of the @param | |
1918 | * string. | |
85f2b082 TZ |
1919 | */ |
1920 | struct event_command { | |
1921 | struct list_head list; | |
1922 | char *name; | |
1923 | enum event_trigger_type trigger_type; | |
353206f5 | 1924 | int flags; |
9ec5a7d1 TZ |
1925 | int (*parse)(struct event_command *cmd_ops, |
1926 | struct trace_event_file *file, | |
1927 | char *glob, char *cmd, | |
1928 | char *param_and_filter); | |
85f2b082 | 1929 | int (*reg)(char *glob, |
85f2b082 | 1930 | struct event_trigger_data *data, |
7f1d2f82 | 1931 | struct trace_event_file *file); |
85f2b082 | 1932 | void (*unreg)(char *glob, |
85f2b082 | 1933 | struct event_trigger_data *data, |
7f1d2f82 | 1934 | struct trace_event_file *file); |
a88e1cfb | 1935 | void (*unreg_all)(struct trace_event_file *file); |
85f2b082 TZ |
1936 | int (*set_filter)(char *filter_str, |
1937 | struct event_trigger_data *data, | |
7f1d2f82 | 1938 | struct trace_event_file *file); |
85f2b082 TZ |
1939 | struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param); |
1940 | }; | |
1941 | ||
353206f5 SRRH |
1942 | /** |
1943 | * enum event_command_flags - flags for struct event_command | |
1944 | * | |
1945 | * @POST_TRIGGER: A flag that says whether or not this command needs | |
1946 | * to have its action delayed until after the current event has | |
1947 | * been closed. Some triggers need to avoid being invoked while | |
1948 | * an event is currently in the process of being logged, since | |
1949 | * the trigger may itself log data into the trace buffer. Thus | |
1950 | * we make sure the current event is committed before invoking | |
1951 | * those triggers. To do that, the trigger invocation is split | |
1952 | * in two - the first part checks the filter using the current | |
1953 | * trace record; if a command has the @post_trigger flag set, it | |
1954 | * sets a bit for itself in the return value, otherwise it | |
1955 | * directly invokes the trigger. Once all commands have been | |
1956 | * either invoked or set their return flag, the current record is | |
1957 | * either committed or discarded. At that point, if any commands | |
1958 | * have deferred their triggers, those commands are finally | |
1959 | * invoked following the close of the current event. In other | |
1960 | * words, if the event_trigger_ops @func() probe implementation | |
1961 | * itself logs to the trace buffer, this flag should be set, | |
1962 | * otherwise it can be left unspecified. | |
1963 | * | |
1964 | * @NEEDS_REC: A flag that says whether or not this command needs | |
1965 | * access to the trace record in order to perform its function, | |
1966 | * regardless of whether or not it has a filter associated with | |
1967 | * it (filters make a trigger require access to the trace record | |
1968 | * but are not always present). | |
1969 | */ | |
1970 | enum event_command_flags { | |
1971 | EVENT_CMD_FL_POST_TRIGGER = 1, | |
1972 | EVENT_CMD_FL_NEEDS_REC = 2, | |
1973 | }; | |
1974 | ||
1975 | static inline bool event_command_post_trigger(struct event_command *cmd_ops) | |
1976 | { | |
1977 | return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER; | |
1978 | } | |
1979 | ||
1980 | static inline bool event_command_needs_rec(struct event_command *cmd_ops) | |
1981 | { | |
1982 | return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC; | |
1983 | } | |
1984 | ||
7f1d2f82 | 1985 | extern int trace_event_enable_disable(struct trace_event_file *file, |
85f2b082 | 1986 | int enable, int soft_disable); |
93e31ffb | 1987 | extern int tracing_alloc_snapshot(void); |
a35873a0 TZ |
1988 | extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data); |
1989 | extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update); | |
1990 | ||
1991 | extern int tracing_snapshot_cond_disable(struct trace_array *tr); | |
1992 | extern void *tracing_cond_snapshot_data(struct trace_array *tr); | |
85f2b082 | 1993 | |
e9fb2b6d SR |
1994 | extern const char *__start___trace_bprintk_fmt[]; |
1995 | extern const char *__stop___trace_bprintk_fmt[]; | |
1996 | ||
102c9323 SRRH |
1997 | extern const char *__start___tracepoint_str[]; |
1998 | extern const char *__stop___tracepoint_str[]; | |
1999 | ||
b9f9108c | 2000 | void trace_printk_control(bool enabled); |
81698831 | 2001 | void trace_printk_start_comm(void); |
613f04a0 | 2002 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); |
2b6080f2 | 2003 | int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); |
07d777fe | 2004 | |
5c3469cb MH |
2005 | /* Used from boot time tracer */ |
2006 | extern int trace_set_options(struct trace_array *tr, char *option); | |
2007 | extern int tracing_set_tracer(struct trace_array *tr, const char *buf); | |
2008 | extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr, | |
2009 | unsigned long size, int cpu_id); | |
2010 | extern int tracing_set_cpumask(struct trace_array *tr, | |
2011 | cpumask_var_t tracing_cpumask_new); | |
2012 | ||
2013 | ||
7e465baa TZ |
2014 | #define MAX_EVENT_NAME_LEN 64 |
2015 | ||
7e465baa TZ |
2016 | extern ssize_t trace_parse_run_command(struct file *file, |
2017 | const char __user *buffer, size_t count, loff_t *ppos, | |
d262271d | 2018 | int (*createfn)(const char *)); |
7e465baa | 2019 | |
8a062902 | 2020 | extern unsigned int err_pos(char *cmd, const char *str); |
2f754e77 SRV |
2021 | extern void tracing_log_err(struct trace_array *tr, |
2022 | const char *loc, const char *cmd, | |
1581a884 | 2023 | const char **errs, u8 type, u16 pos); |
8a062902 | 2024 | |
ca268da6 SRRH |
2025 | /* |
2026 | * Normal trace_printk() and friends allocates special buffers | |
2027 | * to do the manipulation, as well as saves the print formats | |
2028 | * into sections to display. But the trace infrastructure wants | |
2029 | * to use these without the added overhead at the price of being | |
2030 | * a bit slower (used mainly for warnings, where we don't care | |
2031 | * about performance). The internal_trace_puts() is for such | |
2032 | * a purpose. | |
2033 | */ | |
2034 | #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str)) | |
2035 | ||
4e5292ea | 2036 | #undef FTRACE_ENTRY |
04ae87a5 | 2037 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ |
2425bcb9 | 2038 | extern struct trace_event_call \ |
52f5684c | 2039 | __aligned(4) event_##call; |
4e5292ea | 2040 | #undef FTRACE_ENTRY_DUP |
04ae87a5 PZ |
2041 | #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ |
2042 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) | |
a4a551b8 | 2043 | #undef FTRACE_ENTRY_PACKED |
04ae87a5 PZ |
2044 | #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \ |
2045 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) | |
a4a551b8 | 2046 | |
4e5292ea | 2047 | #include "trace_entries.h" |
e1112b4d | 2048 | |
6e48b550 | 2049 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) |
2425bcb9 | 2050 | int perf_ftrace_event_register(struct trace_event_call *call, |
ced39002 JO |
2051 | enum trace_reg type, void *data); |
2052 | #else | |
2053 | #define perf_ftrace_event_register NULL | |
6e48b550 | 2054 | #endif |
ced39002 | 2055 | |
5f893b26 SRRH |
2056 | #ifdef CONFIG_FTRACE_SYSCALLS |
2057 | void init_ftrace_syscalls(void); | |
dbfeaa7a | 2058 | const char *get_syscall_name(int syscall); |
5f893b26 SRRH |
2059 | #else |
2060 | static inline void init_ftrace_syscalls(void) { } | |
dbfeaa7a TZ |
2061 | static inline const char *get_syscall_name(int syscall) |
2062 | { | |
2063 | return NULL; | |
2064 | } | |
5f893b26 SRRH |
2065 | #endif |
2066 | ||
2067 | #ifdef CONFIG_EVENT_TRACING | |
2068 | void trace_event_init(void); | |
f57a4143 | 2069 | void trace_event_eval_update(struct trace_eval_map **map, int len); |
5c3469cb MH |
2070 | /* Used from boot time tracer */ |
2071 | extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); | |
2072 | extern int trigger_process_regex(struct trace_event_file *file, char *buff); | |
5f893b26 SRRH |
2073 | #else |
2074 | static inline void __init trace_event_init(void) { } | |
f57a4143 | 2075 | static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } |
5f893b26 SRRH |
2076 | #endif |
2077 | ||
2824f503 SRV |
2078 | #ifdef CONFIG_TRACER_SNAPSHOT |
2079 | void tracing_snapshot_instance(struct trace_array *tr); | |
2080 | int tracing_alloc_snapshot_instance(struct trace_array *tr); | |
180e4e39 VD |
2081 | int tracing_arm_snapshot(struct trace_array *tr); |
2082 | void tracing_disarm_snapshot(struct trace_array *tr); | |
2824f503 SRV |
2083 | #else |
2084 | static inline void tracing_snapshot_instance(struct trace_array *tr) { } | |
2085 | static inline int tracing_alloc_snapshot_instance(struct trace_array *tr) | |
2086 | { | |
2087 | return 0; | |
2088 | } | |
180e4e39 VD |
2089 | static inline int tracing_arm_snapshot(struct trace_array *tr) { return 0; } |
2090 | static inline void tracing_disarm_snapshot(struct trace_array *tr) { } | |
2824f503 SRV |
2091 | #endif |
2092 | ||
3f1756dc SRV |
2093 | #ifdef CONFIG_PREEMPT_TRACER |
2094 | void tracer_preempt_on(unsigned long a0, unsigned long a1); | |
2095 | void tracer_preempt_off(unsigned long a0, unsigned long a1); | |
2096 | #else | |
2097 | static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { } | |
2098 | static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { } | |
2099 | #endif | |
2100 | #ifdef CONFIG_IRQSOFF_TRACER | |
2101 | void tracer_hardirqs_on(unsigned long a0, unsigned long a1); | |
2102 | void tracer_hardirqs_off(unsigned long a0, unsigned long a1); | |
2103 | #else | |
2104 | static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { } | |
2105 | static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { } | |
2106 | #endif | |
2107 | ||
0c97bf86 MO |
2108 | /* |
2109 | * Reset the state of the trace_iterator so that it can read consumed data. | |
2110 | * Normally, the trace_iterator is used for reading the data when it is not | |
2111 | * consumed, and must retain state. | |
2112 | */ | |
2113 | static __always_inline void trace_iterator_reset(struct trace_iterator *iter) | |
2114 | { | |
dba87967 | 2115 | memset_startat(iter, 0, seq); |
0c97bf86 MO |
2116 | iter->pos = -1; |
2117 | } | |
2118 | ||
42d120e2 | 2119 | /* Check the name is good for event/group/fields */ |
575b76cb | 2120 | static inline bool __is_good_name(const char *name, bool hash_ok) |
42d120e2 | 2121 | { |
575b76cb | 2122 | if (!isalpha(*name) && *name != '_' && (!hash_ok || *name != '-')) |
42d120e2 TZ |
2123 | return false; |
2124 | while (*++name != '\0') { | |
575b76cb SRG |
2125 | if (!isalpha(*name) && !isdigit(*name) && *name != '_' && |
2126 | (!hash_ok || *name != '-')) | |
42d120e2 TZ |
2127 | return false; |
2128 | } | |
2129 | return true; | |
2130 | } | |
2131 | ||
575b76cb SRG |
2132 | /* Check the name is good for event/group/fields */ |
2133 | static inline bool is_good_name(const char *name) | |
2134 | { | |
2135 | return __is_good_name(name, false); | |
2136 | } | |
2137 | ||
2138 | /* Check the name is good for system */ | |
2139 | static inline bool is_good_system_name(const char *name) | |
2140 | { | |
2141 | return __is_good_name(name, true); | |
2142 | } | |
2143 | ||
7491e2c4 TSV |
2144 | /* Convert certain expected symbols into '_' when generating event names */ |
2145 | static inline void sanitize_event_name(char *name) | |
2146 | { | |
2147 | while (*name++ != '\0') | |
2148 | if (*name == ':' || *name == '.') | |
2149 | *name = '_'; | |
2150 | } | |
2151 | ||
bc87cf0a DBO |
2152 | /* |
2153 | * This is a generic way to read and write a u64 value from a file in tracefs. | |
2154 | * | |
2155 | * The value is stored on the variable pointed by *val. The value needs | |
2156 | * to be at least *min and at most *max. The write is protected by an | |
2157 | * existing *lock. | |
2158 | */ | |
2159 | struct trace_min_max_param { | |
2160 | struct mutex *lock; | |
2161 | u64 *val; | |
2162 | u64 *min; | |
2163 | u64 *max; | |
2164 | }; | |
2165 | ||
2166 | #define U64_STR_SIZE 24 /* 20 digits max */ | |
2167 | ||
2168 | extern const struct file_operations trace_min_max_fops; | |
2169 | ||
102227b9 DBO |
2170 | #ifdef CONFIG_RV |
2171 | extern int rv_init_interface(void); | |
2172 | #else | |
2173 | static inline int rv_init_interface(void) | |
2174 | { | |
2175 | return 0; | |
2176 | } | |
2177 | #endif | |
2178 | ||
bc0c38d1 | 2179 | #endif /* _LINUX_KERNEL_TRACE_H */ |