Merge tag 'pm-6.16-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[linux-2.6-block.git] / kernel / trace / trace.h
CommitLineData
bb730b58 1// SPDX-License-Identifier: GPL-2.0
bac5fb97 2
bc0c38d1
SR
3#ifndef _LINUX_KERNEL_TRACE_H
4#define _LINUX_KERNEL_TRACE_H
5
6#include <linux/fs.h>
60063497 7#include <linux/atomic.h>
bc0c38d1
SR
8#include <linux/sched.h>
9#include <linux/clocksource.h>
3928a8a2 10#include <linux/ring_buffer.h>
bd8ac686 11#include <linux/mmiotrace.h>
4e5292ea 12#include <linux/tracepoint.h>
d13744cd 13#include <linux/ftrace.h>
2d6425af 14#include <linux/trace.h>
24f1e32c 15#include <linux/hw_breakpoint.h>
9504504c 16#include <linux/trace_seq.h>
af658dca 17#include <linux/trace_events.h>
52f5684c 18#include <linux/compiler.h>
60f1d5e3 19#include <linux/glob.h>
91edde2e
VRB
20#include <linux/irq_work.h>
21#include <linux/workqueue.h>
42d120e2 22#include <linux/ctype.h>
a358f406 23#include <linux/once_lite.h>
76fe0337 24#include <linux/ftrace_regs.h>
9504504c 25
6954e415
SRV
26#include "pid_list.h"
27
12ab74ee 28#ifdef CONFIG_FTRACE_SYSCALLS
d6e59579 29#include <asm/unistd.h> /* For NR_syscalls */
12ab74ee
SR
30#include <asm/syscall.h> /* some archs define it here */
31#endif
32
21ccc9cd
SRV
33#define TRACE_MODE_WRITE 0640
34#define TRACE_MODE_READ 0440
35
72829bc3
TG
36enum trace_type {
37 __TRACE_FIRST_TYPE = 0,
38
39 TRACE_FN,
40 TRACE_CTX,
41 TRACE_WAKE,
42 TRACE_STACK,
dd0e545f 43 TRACE_PRINT,
48ead020 44 TRACE_BPRINT,
bd8ac686
PP
45 TRACE_MMIO_RW,
46 TRACE_MMIO_MAP,
9f029e83 47 TRACE_BRANCH,
287b6e68
FW
48 TRACE_GRAPH_RET,
49 TRACE_GRAPH_ENT,
21e92806 50 TRACE_GRAPH_RETADDR_ENT,
02b67518 51 TRACE_USER_STACK,
c71a8961 52 TRACE_BLK,
09ae7234 53 TRACE_BPUTS,
e7c15cd8 54 TRACE_HWLAT,
bce29ac9 55 TRACE_OSNOISE,
a955d7ea 56 TRACE_TIMERLAT,
fa32e855 57 TRACE_RAW_DATA,
f689e4f2 58 TRACE_FUNC_REPEATS,
72829bc3 59
f0868d1e 60 __TRACE_LAST_TYPE,
72829bc3
TG
61};
62
bc0c38d1 63
0a1c49db
SR
64#undef __field
65#define __field(type, item) type item;
86387f7e 66
04ae87a5
PZ
67#undef __field_fn
68#define __field_fn(type, item) type item;
69
d7315094
SR
70#undef __field_struct
71#define __field_struct(type, item) __field(type, item)
86387f7e 72
d7315094
SR
73#undef __field_desc
74#define __field_desc(type, container, item)
02b67518 75
4649079b
SRV
76#undef __field_packed
77#define __field_packed(type, container, item)
78
0a1c49db
SR
79#undef __array
80#define __array(type, item, size) type item[size];
1427cdf0 81
e7186af7
SRG
82/*
83 * For backward compatibility, older user space expects to see the
84 * kernel_stack event with a fixed size caller field. But today the fix
85 * size is ignored by the kernel, and the real structure is dynamic.
86 * Expose to user space: "unsigned long caller[8];" but the real structure
87 * will be "unsigned long caller[] __counted_by(size)"
88 */
89#undef __stack_array
90#define __stack_array(type, item, size, field) type item[] __counted_by(field);
91
d7315094
SR
92#undef __array_desc
93#define __array_desc(type, container, item, size)
777e208d 94
0a1c49db
SR
95#undef __dynamic_array
96#define __dynamic_array(type, item) type item[];
777e208d 97
55de2c0b
MH
98#undef __rel_dynamic_array
99#define __rel_dynamic_array(type, item) type item[];
100
0a1c49db
SR
101#undef F_STRUCT
102#define F_STRUCT(args...) args
74239072 103
0a1c49db 104#undef FTRACE_ENTRY
04ae87a5 105#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
02aa3162
JO
106 struct struct_name { \
107 struct trace_entry ent; \
108 tstruct \
0a1c49db 109 }
777e208d 110
0a1c49db 111#undef FTRACE_ENTRY_DUP
04ae87a5 112#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
1e9b51c2 113
e59a0bff 114#undef FTRACE_ENTRY_REG
04ae87a5
PZ
115#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \
116 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
e59a0bff 117
a4a551b8 118#undef FTRACE_ENTRY_PACKED
04ae87a5
PZ
119#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \
120 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed
a4a551b8 121
0a1c49db 122#include "trace_entries.h"
36994e58 123
24589e3a 124/* Use this for memory failure errors */
a358f406
TL
125#define MEM_FAIL(condition, fmt, ...) \
126 DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__)
24589e3a 127
4ed8f337
MHG
128#define FAULT_STRING "(fault)"
129
00cf3d67
SRG
130#define HIST_STACKTRACE_DEPTH 16
131#define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
132#define HIST_STACKTRACE_SKIP 5
133
0a1c49db
SR
134/*
135 * syscalls are special, and need special handling, this is why
136 * they are not included in trace_entries.h
137 */
bed1ffca
FW
138struct syscall_trace_enter {
139 struct trace_entry ent;
140 int nr;
141 unsigned long args[];
142};
143
144struct syscall_trace_exit {
145 struct trace_entry ent;
146 int nr;
99df5a6a 147 long ret;
bed1ffca
FW
148};
149
93ccae7a 150struct kprobe_trace_entry_head {
413d37d1
MH
151 struct trace_entry ent;
152 unsigned long ip;
413d37d1
MH
153};
154
7491e2c4
TSV
155struct eprobe_trace_entry_head {
156 struct trace_entry ent;
7491e2c4
TSV
157};
158
93ccae7a 159struct kretprobe_trace_entry_head {
413d37d1
MH
160 struct trace_entry ent;
161 unsigned long func;
162 unsigned long ret_ip;
413d37d1
MH
163};
164
334e5519
MHG
165struct fentry_trace_entry_head {
166 struct trace_entry ent;
167 unsigned long ip;
168};
169
170struct fexit_trace_entry_head {
171 struct trace_entry ent;
172 unsigned long func;
173 unsigned long ret_ip;
174};
175
5bf9a1ee 176#define TRACE_BUF_SIZE 1024
bc0c38d1 177
2b6080f2
SR
178struct trace_array;
179
bc0c38d1
SR
180/*
181 * The CPU trace array - it consists of thousands of trace entries
182 * plus some other descriptor data: (for example which task started
183 * the trace, etc.)
184 */
185struct trace_array_cpu {
90633c34 186 local_t disabled;
4e3c3333 187
438ced17 188 unsigned long entries;
bc0c38d1
SR
189 unsigned long saved_latency;
190 unsigned long critical_start;
191 unsigned long critical_end;
192 unsigned long critical_sequence;
193 unsigned long nice;
194 unsigned long policy;
195 unsigned long rt_priority;
2f26ebd5 196 unsigned long skipped_entries;
a5a1d1c2 197 u64 preempt_timestamp;
bc0c38d1 198 pid_t pid;
d20b92ab 199 kuid_t uid;
bc0c38d1 200 char comm[TASK_COMM_LEN];
3fdaf80f 201
345ddcc8 202#ifdef CONFIG_FUNCTION_TRACER
717e3f5e 203 int ftrace_ignore_pid;
345ddcc8 204#endif
717e3f5e 205 bool ignore_pid;
bc0c38d1
SR
206};
207
2b6080f2 208struct tracer;
37aea98b 209struct trace_option_dentry;
2b6080f2 210
1c5eb448 211struct array_buffer {
12883efb 212 struct trace_array *tr;
13292494 213 struct trace_buffer *buffer;
12883efb 214 struct trace_array_cpu __percpu *data;
a5a1d1c2 215 u64 time_start;
12883efb
SRRH
216 int cpu;
217};
218
9a38a885
SRRH
219#define TRACE_FLAGS_MAX_SIZE 32
220
37aea98b
SRRH
221struct trace_options {
222 struct tracer *tracer;
223 struct trace_option_dentry *topts;
224};
225
6954e415
SRV
226struct trace_pid_list *trace_pid_list_alloc(void);
227void trace_pid_list_free(struct trace_pid_list *pid_list);
228bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsigned int pid);
229int trace_pid_list_set(struct trace_pid_list *pid_list, unsigned int pid);
230int trace_pid_list_clear(struct trace_pid_list *pid_list, unsigned int pid);
231int trace_pid_list_first(struct trace_pid_list *pid_list, unsigned int *pid);
232int trace_pid_list_next(struct trace_pid_list *pid_list, unsigned int pid,
233 unsigned int *next);
49090107 234
27683626
SRV
235enum {
236 TRACE_PIDS = BIT(0),
237 TRACE_NO_PIDS = BIT(1),
238};
239
240static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list,
241 struct trace_pid_list *no_pid_list)
242{
243 /* Return true if the pid list in type has pids */
244 return ((type & TRACE_PIDS) && pid_list) ||
245 ((type & TRACE_NO_PIDS) && no_pid_list);
246}
247
248static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list,
249 struct trace_pid_list *no_pid_list)
250{
251 /*
252 * Turning off what is in @type, return true if the "other"
253 * pid list, still has pids in it.
254 */
255 return (!(type & TRACE_PIDS) && pid_list) ||
256 (!(type & TRACE_NO_PIDS) && no_pid_list);
257}
258
a35873a0
TZ
259typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
260
261/**
262 * struct cond_snapshot - conditional snapshot data and callback
263 *
264 * The cond_snapshot structure encapsulates a callback function and
265 * data associated with the snapshot for a given tracing instance.
266 *
267 * When a snapshot is taken conditionally, by invoking
268 * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
269 * passed in turn to the cond_snapshot.update() function. That data
270 * can be compared by the update() implementation with the cond_data
499f7bb0 271 * contained within the struct cond_snapshot instance associated with
a35873a0
TZ
272 * the trace_array. Because the tr->max_lock is held throughout the
273 * update() call, the update() function can directly retrieve the
274 * cond_snapshot and cond_data associated with the per-instance
275 * snapshot associated with the trace_array.
276 *
277 * The cond_snapshot.update() implementation can save data to be
278 * associated with the snapshot if it decides to, and returns 'true'
279 * in that case, or it returns 'false' if the conditional snapshot
280 * shouldn't be taken.
281 *
282 * The cond_snapshot instance is created and associated with the
283 * user-defined cond_data by tracing_cond_snapshot_enable().
284 * Likewise, the cond_snapshot instance is destroyed and is no longer
285 * associated with the trace instance by
286 * tracing_cond_snapshot_disable().
287 *
288 * The method below is required.
289 *
290 * @update: When a conditional snapshot is invoked, the update()
291 * callback function is invoked with the tr->max_lock held. The
292 * update() implementation signals whether or not to actually
293 * take the snapshot, by returning 'true' if so, 'false' if no
294 * snapshot should be taken. Because the max_lock is held for
295 * the duration of update(), the implementation is safe to
499f7bb0 296 * directly retrieved and save any implementation data it needs
a35873a0
TZ
297 * to in association with the snapshot.
298 */
299struct cond_snapshot {
300 void *cond_data;
301 cond_update_fn_t update;
302};
303
20344c54
YKV
304/*
305 * struct trace_func_repeats - used to keep track of the consecutive
306 * (on the same CPU) calls of a single function.
307 */
308struct trace_func_repeats {
309 unsigned long ip;
310 unsigned long parent_ip;
311 unsigned long count;
312 u64 ts_last_call;
313};
314
35a380dd
MHG
315struct trace_module_delta {
316 struct rcu_head rcu;
317 long delta[];
318};
319
bc0c38d1
SR
320/*
321 * The trace array - an array of per-CPU trace arrays. This is the
322 * highest level data structure that individual tracers deal with.
323 * They have on/off state as well:
324 */
325struct trace_array {
ae63b31e 326 struct list_head list;
277ba044 327 char *name;
1c5eb448 328 struct array_buffer array_buffer;
12883efb
SRRH
329#ifdef CONFIG_TRACER_MAX_TRACE
330 /*
331 * The max_buffer is used to snapshot the trace when a maximum
332 * latency is reached, or when the user initiates a snapshot.
333 * Some tracers will use this to store a maximum trace while
334 * it continues examining live traces.
335 *
1c5eb448 336 * The buffers for the max_buffer are set up the same as the array_buffer
12883efb 337 * When a snapshot is taken, the buffer of the max_buffer is swapped
1c5eb448
SRV
338 * with the buffer of the array_buffer and the buffers are reset for
339 * the array_buffer so the tracing can continue.
12883efb 340 */
1c5eb448 341 struct array_buffer max_buffer;
45ad21ca 342 bool allocated_snapshot;
180e4e39
VD
343 spinlock_t snapshot_trigger_lock;
344 unsigned int snapshot;
6d9b3fa5 345 unsigned long max_latency;
91edde2e
VRB
346#ifdef CONFIG_FSNOTIFY
347 struct dentry *d_max_latency;
348 struct work_struct fsnotify_work;
349 struct irq_work fsnotify_irqwork;
350#endif
12883efb 351#endif
2124de79
SRG
352 /* The below is for memory mapped ring buffer */
353 unsigned int mapped;
354 unsigned long range_addr_start;
355 unsigned long range_addr_size;
fb6d0323 356 char *range_name;
7a1d1e4b 357 long text_delta;
35a380dd 358 struct trace_module_delta *module_delta;
b6533482
SR
359 void *scratch; /* pointer in persistent memory */
360 int scratch_size;
361
362 int buffer_disabled;
2124de79 363
49090107 364 struct trace_pid_list __rcu *filtered_pids;
27683626 365 struct trace_pid_list __rcu *filtered_no_pids;
0b9b12c1
SRRH
366 /*
367 * max_lock is used to protect the swapping of buffers
368 * when taking a max snapshot. The buffers themselves are
369 * protected by per_cpu spinlocks. But the action of the swap
370 * needs its own lock.
371 *
372 * This is defined as a arch_spinlock_t in order to help
373 * with performance when lockdep debugging is enabled.
374 *
375 * It is also used in other places outside the update_max_tr
376 * so it needs to be defined outside of the
377 * CONFIG_TRACER_MAX_TRACE.
378 */
379 arch_spinlock_t max_lock;
12ab74ee
SR
380#ifdef CONFIG_FTRACE_SYSCALLS
381 int sys_refcount_enter;
382 int sys_refcount_exit;
7f1d2f82
SRRH
383 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
384 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
12ab74ee 385#endif
2b6080f2
SR
386 int stop_count;
387 int clock_id;
37aea98b 388 int nr_topts;
065e63f9 389 bool clear_trace;
03329f99 390 int buffer_percent;
2f754e77 391 unsigned int n_err_log_entries;
2b6080f2 392 struct tracer *current_trace;
983f938a 393 unsigned int trace_flags;
9a38a885 394 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
ae63b31e 395 unsigned int flags;
2b6080f2 396 raw_spinlock_t start_lock;
d2356997 397 const char *system_names;
2f754e77 398 struct list_head err_log;
ae63b31e 399 struct dentry *dir;
2b6080f2
SR
400 struct dentry *options;
401 struct dentry *percpu_dir;
5790b1fb 402 struct eventfs_inode *event_dir;
37aea98b 403 struct trace_options *topts;
ae63b31e
SR
404 struct list_head systems;
405 struct list_head events;
7b382efd 406 struct list_head marker_list;
3dd80953 407 struct trace_event_file *trace_marker_file;
ccfe9e42 408 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
c2489bb7
ZY
409 /* one per_cpu trace_pipe can be opened by only one user */
410 cpumask_var_t pipe_cpumask;
a695cb58 411 int ref;
7ef282e0 412 int trace_ref;
b355247d
SR
413#ifdef CONFIG_MODULES
414 struct list_head mod_events;
415#endif
f20a5806
SRRH
416#ifdef CONFIG_FUNCTION_TRACER
417 struct ftrace_ops *ops;
345ddcc8 418 struct trace_pid_list __rcu *function_pids;
b3b1e6ed 419 struct trace_pid_list __rcu *function_no_pids;
26dda563
SRV
420#ifdef CONFIG_FUNCTION_GRAPH_TRACER
421 struct fgraph_ops *gops;
422#endif
04ec7bb6 423#ifdef CONFIG_DYNAMIC_FTRACE
673feb9d 424 /* All of these are protected by the ftrace_lock */
04ec7bb6 425 struct list_head func_probes;
673feb9d
SRV
426 struct list_head mod_trace;
427 struct list_head mod_notrace;
04ec7bb6 428#endif
f20a5806
SRRH
429 /* function tracing enabled */
430 int function_enabled;
431#endif
b94bc80d 432 int no_filter_buffering_ref;
067fe038 433 struct list_head hist_vars;
a35873a0
TZ
434#ifdef CONFIG_TRACER_SNAPSHOT
435 struct cond_snapshot *cond_snapshot;
436#endif
20344c54 437 struct trace_func_repeats __percpu *last_func_repeats;
a1f157c7
ZY
438 /*
439 * On boot up, the ring buffer is set to the minimum size, so that
440 * we do not waste memory on systems that are not using tracing.
441 */
442 bool ring_buffer_expanded;
bc0c38d1
SR
443};
444
ae63b31e 445enum {
bcba8d4d
SR
446 TRACE_ARRAY_FL_GLOBAL = BIT(0),
447 TRACE_ARRAY_FL_BOOT = BIT(1),
448 TRACE_ARRAY_FL_LAST_BOOT = BIT(2),
449 TRACE_ARRAY_FL_MOD_INIT = BIT(3),
34ea8fa0 450 TRACE_ARRAY_FL_MEMMAP = BIT(4),
ae63b31e
SR
451};
452
b355247d
SR
453#ifdef CONFIG_MODULES
454bool module_exists(const char *module);
455#else
456static inline bool module_exists(const char *module)
457{
458 return false;
459}
460#endif
461
ae63b31e
SR
462extern struct list_head ftrace_trace_arrays;
463
a8227415
AL
464extern struct mutex trace_types_lock;
465
8e2e2fa4 466extern int trace_array_get(struct trace_array *tr);
8530dec6 467extern int tracing_check_open_get_tr(struct trace_array *tr);
89c95fce
TZ
468extern struct trace_array *trace_array_find(const char *instance);
469extern struct trace_array *trace_array_find_get(const char *instance);
8e2e2fa4 470
d8279bfc 471extern u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe);
b94bc80d 472extern int tracing_set_filter_buffering(struct trace_array *tr, bool set);
d71bd34d 473extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
00b41452 474
860f9f6b
TZ
475extern bool trace_clock_in_ns(struct trace_array *tr);
476
35a380dd
MHG
477extern unsigned long trace_adjust_address(struct trace_array *tr, unsigned long addr);
478
ae63b31e
SR
479/*
480 * The global tracer (top) should be the first trace array added,
481 * but we check the flag anyway.
482 */
483static inline struct trace_array *top_trace_array(void)
484{
485 struct trace_array *tr;
486
da9c3413 487 if (list_empty(&ftrace_trace_arrays))
dc81e5e3
YY
488 return NULL;
489
ae63b31e
SR
490 tr = list_entry(ftrace_trace_arrays.prev,
491 typeof(*tr), list);
492 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
493 return tr;
494}
495
7104f300
SR
496#define FTRACE_CMP_TYPE(var, type) \
497 __builtin_types_compatible_p(typeof(var), type *)
498
499#undef IF_ASSIGN
968e5170
NC
500#define IF_ASSIGN(var, entry, etype, id) \
501 if (FTRACE_CMP_TYPE(var, etype)) { \
502 var = (typeof(var))(entry); \
503 WARN_ON(id != 0 && (entry)->type != id); \
504 break; \
7104f300
SR
505 }
506
507/* Will cause compile errors if type is not found. */
508extern void __ftrace_bad_type(void);
509
510/*
511 * The trace_assign_type is a verifier that the entry type is
512 * the same as the type being assigned. To add new types simply
513 * add a line with the following format:
514 *
515 * IF_ASSIGN(var, ent, type, id);
516 *
517 * Where "type" is the trace type that includes the trace_entry
518 * as the "ent" item. And "id" is the trace identifier that is
519 * used in the trace_type enum.
520 *
521 * If the type can have more than one id, then use zero.
522 */
523#define trace_assign_type(var, ent) \
524 do { \
525 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
526 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
7104f300 527 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
02b67518 528 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
7104f300 529 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
48ead020 530 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
09ae7234 531 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
e7c15cd8 532 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
bce29ac9 533 IF_ASSIGN(var, ent, struct osnoise_entry, TRACE_OSNOISE);\
a955d7ea 534 IF_ASSIGN(var, ent, struct timerlat_entry, TRACE_TIMERLAT);\
fa32e855 535 IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
7104f300
SR
536 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
537 TRACE_MMIO_RW); \
538 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
539 TRACE_MMIO_MAP); \
9f029e83 540 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
287b6e68
FW
541 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
542 TRACE_GRAPH_ENT); \
21e92806
DP
543 IF_ASSIGN(var, ent, struct fgraph_retaddr_ent_entry,\
544 TRACE_GRAPH_RETADDR_ENT); \
287b6e68
FW
545 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
546 TRACE_GRAPH_RET); \
f689e4f2
YKV
547 IF_ASSIGN(var, ent, struct func_repeats_entry, \
548 TRACE_FUNC_REPEATS); \
7104f300
SR
549 __ftrace_bad_type(); \
550 } while (0)
2c4f035f 551
adf9f195
FW
552/*
553 * An option specific to a tracer. This is a boolean value.
554 * The bit is the bit index that sets its value on the
555 * flags value in struct tracer_flags.
556 */
557struct tracer_opt {
9de36825
IM
558 const char *name; /* Will appear on the trace_options file */
559 u32 bit; /* Mask assigned in val field in tracer_flags */
adf9f195
FW
560};
561
562/*
563 * The set of specific options for a tracer. Your tracer
564 * have to set the initial value of the flags val.
565 */
566struct tracer_flags {
567 u32 val;
9de36825 568 struct tracer_opt *opts;
d39cdd20 569 struct tracer *trace;
adf9f195
FW
570};
571
572/* Makes more easy to define a tracer opt */
573#define TRACER_OPT(s, b) .name = #s, .bit = b
574
034939b6 575
41d9c0be
SRRH
576struct trace_option_dentry {
577 struct tracer_opt *opt;
578 struct tracer_flags *flags;
579 struct trace_array *tr;
580 struct dentry *entry;
581};
582
6eaaa5d5 583/**
8434dc93 584 * struct tracer - a specific tracer and its callbacks to interact with tracefs
6eaaa5d5
FW
585 * @name: the name chosen to select it on the available_tracers file
586 * @init: called when one switches to this tracer (echo name > current_tracer)
587 * @reset: called when one switches to another tracer
05a724bd
CH
588 * @start: called when tracing is unpaused (echo 1 > tracing_on)
589 * @stop: called when tracing is paused (echo 0 > tracing_on)
6508fa76 590 * @update_thresh: called when tracing_thresh is updated
6eaaa5d5
FW
591 * @open: called when the trace file is opened
592 * @pipe_open: called when the trace_pipe file is opened
6eaaa5d5 593 * @close: called when the trace file is released
c521efd1 594 * @pipe_close: called when the trace_pipe file is released
6eaaa5d5
FW
595 * @read: override the default read callback on trace_pipe
596 * @splice_read: override the default splice_read callback on trace_pipe
597 * @selftest: selftest to run on boot (see trace_selftest.c)
598 * @print_headers: override the first lines that describe your columns
599 * @print_line: callback that prints a trace
600 * @set_flag: signals one of your private flags changed (trace_options file)
601 * @flags: your private flags
bc0c38d1
SR
602 */
603struct tracer {
604 const char *name;
1c80025a 605 int (*init)(struct trace_array *tr);
bc0c38d1 606 void (*reset)(struct trace_array *tr);
9036990d
SR
607 void (*start)(struct trace_array *tr);
608 void (*stop)(struct trace_array *tr);
6508fa76 609 int (*update_thresh)(struct trace_array *tr);
bc0c38d1 610 void (*open)(struct trace_iterator *iter);
107bad8b 611 void (*pipe_open)(struct trace_iterator *iter);
bc0c38d1 612 void (*close)(struct trace_iterator *iter);
c521efd1 613 void (*pipe_close)(struct trace_iterator *iter);
107bad8b
SR
614 ssize_t (*read)(struct trace_iterator *iter,
615 struct file *filp, char __user *ubuf,
616 size_t cnt, loff_t *ppos);
3c56819b
EGM
617 ssize_t (*splice_read)(struct trace_iterator *iter,
618 struct file *filp,
619 loff_t *ppos,
620 struct pipe_inode_info *pipe,
621 size_t len,
622 unsigned int flags);
60a11774
SR
623#ifdef CONFIG_FTRACE_STARTUP_TEST
624 int (*selftest)(struct tracer *trace,
625 struct trace_array *tr);
626#endif
8bba1bf5 627 void (*print_header)(struct seq_file *m);
2c4f035f 628 enum print_line_t (*print_line)(struct trace_iterator *iter);
adf9f195 629 /* If you handled the flag setting, return 0 */
8c1a49ae
SRRH
630 int (*set_flag)(struct trace_array *tr,
631 u32 old_flags, u32 bit, int set);
613f04a0 632 /* Return 0 if OK with change, else return non-zero */
bf6065b5 633 int (*flag_changed)(struct trace_array *tr,
613f04a0 634 u32 mask, int set);
bc0c38d1 635 struct tracer *next;
9de36825 636 struct tracer_flags *flags;
50512ab5 637 int enabled;
f43c738b 638 bool print_max;
607e2ea1 639 bool allow_instances;
12883efb 640#ifdef CONFIG_TRACER_MAX_TRACE
f43c738b 641 bool use_max_tr;
12883efb 642#endif
c7b3ae0b
ZSZ
643 /* True if tracer cannot be enabled in kernel param */
644 bool noboot;
bc0c38d1
SR
645};
646
6d158a81
SR
647static inline struct ring_buffer_iter *
648trace_buffer_iter(struct trace_iterator *iter, int cpu)
649{
f26808ba 650 return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
6d158a81
SR
651}
652
b6f11df2 653int tracer_init(struct tracer *t, struct trace_array *tr);
9036990d 654int tracing_is_enabled(void);
1c5eb448 655void tracing_reset_online_cpus(struct array_buffer *buf);
873c642f 656void tracing_reset_all_online_cpus(void);
e18eb878 657void tracing_reset_all_online_cpus_unlocked(void);
bc0c38d1 658int tracing_open_generic(struct inode *inode, struct file *filp);
aa07d71f 659int tracing_open_generic_tr(struct inode *inode, struct file *filp);
139f8400 660int tracing_release_generic_tr(struct inode *inode, struct file *file);
f5ca233e
SRG
661int tracing_open_file_tr(struct inode *inode, struct file *filp);
662int tracing_release_file_tr(struct inode *inode, struct file *filp);
1cc111b9 663int tracing_single_release_file_tr(struct inode *inode, struct file *filp);
2e86421d 664bool tracing_is_disabled(void);
ec573508 665bool tracer_tracing_is_on(struct trace_array *tr);
2290f2c5
SRV
666void tracer_tracing_on(struct trace_array *tr);
667void tracer_tracing_off(struct trace_array *tr);
dbecef68
SR
668void tracer_tracing_disable(struct trace_array *tr);
669void tracer_tracing_enable(struct trace_array *tr);
5452af66 670struct dentry *trace_create_file(const char *name,
f4ae40a6 671 umode_t mode,
5452af66
FW
672 struct dentry *parent,
673 void *data,
674 const struct file_operations *fops);
675
092a3856
SR
676
677/**
678 * tracer_tracing_is_on_cpu - show real state of ring buffer enabled on for a cpu
679 * @tr : the trace array to know if ring buffer is enabled
680 * @cpu: The cpu buffer to check if enabled
681 *
682 * Shows real state of the per CPU buffer if it is enabled or not.
683 */
684static inline bool tracer_tracing_is_on_cpu(struct trace_array *tr, int cpu)
685{
686 if (tr->array_buffer.buffer)
687 return ring_buffer_record_is_on_cpu(tr->array_buffer.buffer, cpu);
688 return false;
689}
690
22c36b18 691int tracing_init_dentry(void);
d618b3e6 692
51a763dd
ACM
693struct ring_buffer_event;
694
e77405ad 695struct ring_buffer_event *
13292494 696trace_buffer_lock_reserve(struct trace_buffer *buffer,
e77405ad
SR
697 int type,
698 unsigned long len,
36590c50 699 unsigned int trace_ctx);
51a763dd 700
950032ff
SRG
701int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu);
702
45dcd8b8
PP
703struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
704 struct trace_array_cpu *data);
c4a8e8be
FW
705
706struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
707 int *ent_cpu, u64 *ent_ts);
708
13292494 709void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
52ffabe3 710 struct ring_buffer_event *event);
7ffbd48d 711
9a6944fe 712bool trace_is_tracepoint_string(const char *str);
efbbdaa2 713const char *trace_event_format(struct trace_iterator *iter, const char *fmt);
80a76994 714char *trace_iter_expand_format(struct trace_iterator *iter);
afd2627f 715bool ignore_event(struct trace_iterator *iter);
efbbdaa2 716
955b61e5
JW
717int trace_empty(struct trace_iterator *iter);
718
719void *trace_find_next_entry_inc(struct trace_iterator *iter);
720
721void trace_init_global_iter(struct trace_iterator *iter);
722
723void tracing_iter_reset(struct trace_iterator *iter, int cpu);
724
ecffc8a8
DA
725unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
726unsigned long trace_total_entries(struct trace_array *tr);
727
6fb44b71 728void trace_function(struct trace_array *tr,
6fb44b71
SR
729 unsigned long ip,
730 unsigned long parent_ip,
76fe0337
SS
731 unsigned int trace_ctx,
732 struct ftrace_regs *regs);
0a772620
JO
733void trace_graph_function(struct trace_array *tr,
734 unsigned long ip,
735 unsigned long parent_ip,
36590c50 736 unsigned int trace_ctx);
7e9a49ef 737void trace_latency_header(struct seq_file *m);
62b915f1
JO
738void trace_default_header(struct seq_file *m);
739void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
bc0c38d1 740
2ca8c112
MHG
741void trace_graph_return(struct ftrace_graph_ret *trace, struct fgraph_ops *gops,
742 struct ftrace_regs *fregs);
41705c42
MHG
743int trace_graph_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops,
744 struct ftrace_regs *fregs);
1e9b51c2 745
41bc8144
SR
746void tracing_start_cmdline_record(void);
747void tracing_stop_cmdline_record(void);
d914ba37
JF
748void tracing_start_tgid_record(void);
749void tracing_stop_tgid_record(void);
750
bc0c38d1 751int register_tracer(struct tracer *type);
b5130b1e 752int is_tracing_stopped(void);
955b61e5 753
098c879e
SRRH
754loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
755
955b61e5
JW
756extern cpumask_var_t __read_mostly tracing_buffer_mask;
757
758#define for_each_tracing_cpu(cpu) \
759 for_each_cpu(cpu, tracing_buffer_mask)
bc0c38d1
SR
760
761extern unsigned long nsecs_to_usecs(unsigned long nsecs);
762
0e950173
TB
763extern unsigned long tracing_thresh;
764
4e267db1 765/* PID filtering */
76c813e2 766
4e267db1
SR
767bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
768 pid_t search_pid);
769bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
b3b1e6ed 770 struct trace_pid_list *filtered_no_pids,
4e267db1
SR
771 struct task_struct *task);
772void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
773 struct task_struct *self,
774 struct task_struct *task);
5cc8976b
SRRH
775void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
776void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
777int trace_pid_show(struct seq_file *m, void *v);
76c813e2
SRRH
778int trace_pid_write(struct trace_pid_list *filtered_pids,
779 struct trace_pid_list **new_pid_list,
780 const char __user *ubuf, size_t cnt);
4e267db1 781
5d4a9dba 782#ifdef CONFIG_TRACER_MAX_TRACE
a35873a0
TZ
783void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
784 void *cond_data);
bc0c38d1
SR
785void update_max_tr_single(struct trace_array *tr,
786 struct task_struct *tsk, int cpu);
787
e25e43a4 788#ifdef CONFIG_FSNOTIFY
6880c987
SRV
789#define LATENCY_FS_NOTIFY
790#endif
e25e43a4 791#endif /* CONFIG_TRACER_MAX_TRACE */
91edde2e 792
6880c987 793#ifdef LATENCY_FS_NOTIFY
91edde2e 794void latency_fsnotify(struct trace_array *tr);
91edde2e 795#else
36b3615d 796static inline void latency_fsnotify(struct trace_array *tr) { }
91edde2e
VRB
797#endif
798
c0a0d0d3 799#ifdef CONFIG_STACKTRACE
36590c50 800void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip);
c0a0d0d3 801#else
36590c50
SAS
802static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
803 int skip)
c0a0d0d3
FW
804{
805}
806#endif /* CONFIG_STACKTRACE */
53614991 807
c658797f
YKV
808void trace_last_func_repeats(struct trace_array *tr,
809 struct trace_func_repeats *last_info,
810 unsigned int trace_ctx);
811
a5a1d1c2 812extern u64 ftrace_now(int cpu);
bc0c38d1 813
4ca53085 814extern void trace_find_cmdline(int pid, char comm[]);
d914ba37 815extern int trace_find_tgid(int pid);
c37775d5 816extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
f7d48cbd 817
5f3719f6
SR
818extern int trace_events_enabled(struct trace_array *tr, const char *system);
819
bc0c38d1
SR
820#ifdef CONFIG_DYNAMIC_FTRACE
821extern unsigned long ftrace_update_tot_cnt;
da537f0a
SRV
822extern unsigned long ftrace_number_of_pages;
823extern unsigned long ftrace_number_of_groups;
36a367b8
SR
824extern u64 ftrace_update_time;
825extern u64 ftrace_total_mod_time;
04ec7bb6
SRV
826void ftrace_init_trace_array(struct trace_array *tr);
827#else
828static inline void ftrace_init_trace_array(struct trace_array *tr) { }
ad97772a 829#endif
d05cdb25
SR
830#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
831extern int DYN_FTRACE_TEST_NAME(void);
95950c2e
SR
832#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
833extern int DYN_FTRACE_TEST_NAME2(void);
bc0c38d1 834
a1f157c7 835extern void trace_set_ring_buffer_expanded(struct trace_array *tr);
020e5f85
LZ
836extern bool tracing_selftest_disabled;
837
60a11774 838#ifdef CONFIG_FTRACE_STARTUP_TEST
60efe21e
MH
839extern void __init disable_tracing_selftest(const char *reason);
840
60a11774
SR
841extern int trace_selftest_startup_function(struct tracer *trace,
842 struct trace_array *tr);
7447dce9
FW
843extern int trace_selftest_startup_function_graph(struct tracer *trace,
844 struct trace_array *tr);
60a11774
SR
845extern int trace_selftest_startup_irqsoff(struct tracer *trace,
846 struct trace_array *tr);
60a11774
SR
847extern int trace_selftest_startup_preemptoff(struct tracer *trace,
848 struct trace_array *tr);
60a11774
SR
849extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
850 struct trace_array *tr);
60a11774
SR
851extern int trace_selftest_startup_wakeup(struct tracer *trace,
852 struct trace_array *tr);
fb1b6d8b
SN
853extern int trace_selftest_startup_nop(struct tracer *trace,
854 struct trace_array *tr);
80e5ea45
SR
855extern int trace_selftest_startup_branch(struct tracer *trace,
856 struct trace_array *tr);
8f768993
SRRH
857/*
858 * Tracer data references selftest functions that only occur
859 * on boot up. These can be __init functions. Thus, when selftests
860 * are enabled, then the tracers need to reference __init functions.
861 */
862#define __tracer_data __refdata
863#else
60efe21e
MH
864static inline void __init disable_tracing_selftest(const char *reason)
865{
866}
8f768993
SRRH
867/* Tracers are seldom changed. Optimize when selftests are disabled. */
868#define __tracer_data __read_mostly
60a11774
SR
869#endif /* CONFIG_FTRACE_STARTUP_TEST */
870
c7aafc54 871extern void *head_page(struct trace_array_cpu *data);
a5a1d1c2 872extern unsigned long long ns2usecs(u64 nsec);
196a0626
AS
873
874__printf(2, 0)
875int trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
876__printf(2, 0)
877int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
878__printf(3, 0)
879int trace_array_vprintk(struct trace_array *tr,
880 unsigned long ip, const char *fmt, va_list args);
881__printf(3, 4)
13292494 882int trace_array_printk_buf(struct trace_buffer *buffer,
12883efb 883 unsigned long ip, const char *fmt, ...);
955b61e5
JW
884void trace_printk_seq(struct trace_seq *s);
885enum print_line_t print_trace_line(struct trace_iterator *iter);
c7aafc54 886
8e1e1df2
BP
887extern char trace_find_mark(unsigned long long duration);
888
673feb9d
SRV
889struct ftrace_hash;
890
891struct ftrace_mod_load {
892 struct list_head list;
893 char *func;
894 char *module;
895 int enable;
896};
897
8c08f0d5
SRV
898enum {
899 FTRACE_HASH_FL_MOD = (1 << 0),
900};
901
4046bf02
NK
902struct ftrace_hash {
903 unsigned long size_bits;
904 struct hlist_head *buckets;
905 unsigned long count;
8c08f0d5 906 unsigned long flags;
4046bf02
NK
907 struct rcu_head rcu;
908};
909
910struct ftrace_func_entry *
911ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
912
eb583cd4 913static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
4046bf02 914{
8c08f0d5 915 return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
4046bf02
NK
916}
917
15e6cb36 918/* Standard output formatting function used for function return traces */
fb52607a 919#ifdef CONFIG_FUNCTION_GRAPH_TRACER
62b915f1
JO
920
921/* Flag options */
922#define TRACE_GRAPH_PRINT_OVERRUN 0x1
923#define TRACE_GRAPH_PRINT_CPU 0x2
924#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
925#define TRACE_GRAPH_PRINT_PROC 0x8
926#define TRACE_GRAPH_PRINT_DURATION 0x10
927#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
9acd8de6
CD
928#define TRACE_GRAPH_PRINT_REL_TIME 0x40
929#define TRACE_GRAPH_PRINT_IRQS 0x80
930#define TRACE_GRAPH_PRINT_TAIL 0x100
931#define TRACE_GRAPH_SLEEP_TIME 0x200
932#define TRACE_GRAPH_GRAPH_TIME 0x400
a1be9ccc
DP
933#define TRACE_GRAPH_PRINT_RETVAL 0x800
934#define TRACE_GRAPH_PRINT_RETVAL_HEX 0x1000
21e92806 935#define TRACE_GRAPH_PRINT_RETADDR 0x2000
ff5c9c57 936#define TRACE_GRAPH_ARGS 0x4000
6fc84ea7
SRRH
937#define TRACE_GRAPH_PRINT_FILL_SHIFT 28
938#define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
62b915f1 939
55577204 940extern void ftrace_graph_sleep_time_control(bool enable);
c8dd0f45
SRV
941
942#ifdef CONFIG_FUNCTION_PROFILER
55577204 943extern void ftrace_graph_graph_time_control(bool enable);
c8dd0f45
SRV
944#else
945static inline void ftrace_graph_graph_time_control(bool enable) { }
946#endif
55577204 947
d7a8d9e9
JO
948extern enum print_line_t
949print_graph_function_flags(struct trace_iterator *iter, u32 flags);
950extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
9d9add34 951extern void
0706f1c4 952trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
62b915f1
JO
953extern void graph_trace_open(struct trace_iterator *iter);
954extern void graph_trace_close(struct trace_iterator *iter);
955extern int __trace_graph_entry(struct trace_array *tr,
956 struct ftrace_graph_ent *trace,
36590c50 957 unsigned int trace_ctx);
21e92806
DP
958extern int __trace_graph_retaddr_entry(struct trace_array *tr,
959 struct ftrace_graph_ent *trace,
960 unsigned int trace_ctx,
961 unsigned long retaddr);
62b915f1
JO
962extern void __trace_graph_return(struct trace_array *tr,
963 struct ftrace_graph_ret *trace,
66611c04
SR
964 unsigned int trace_ctx,
965 u64 calltime, u64 rettime);
966
c132be2c
SRV
967extern void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops);
968extern int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops);
26dda563 969extern void free_fgraph_ops(struct trace_array *tr);
62b915f1 970
12117f33
SRV
971enum {
972 TRACE_GRAPH_FL = 1,
068da098
SRV
973
974 /*
975 * In the very unlikely case that an interrupt came in
976 * at a start of graph tracing, and we want to trace
977 * the function in that interrupt, the depth can be greater
978 * than zero, because of the preempted start of a previous
979 * trace. In an even more unlikely case, depth could be 2
980 * if a softirq interrupted the start of graph tracing,
981 * followed by an interrupt preempting a start of graph
982 * tracing in the softirq, and depth can even be 3
983 * if an NMI came in at the start of an interrupt function
984 * that preempted a softirq start of a function that
985 * preempted normal context!!!! Luckily, it can't be
986 * greater than 3, so the next two bits are a mask
987 * of what the depth is when we set TRACE_GRAPH_FL
988 */
989
990 TRACE_GRAPH_DEPTH_START_BIT,
991 TRACE_GRAPH_DEPTH_END_BIT,
b8421489
SRV
992
993 /*
994 * To implement set_graph_notrace, if this bit is set, we ignore
995 * function graph tracing of called functions, until the return
996 * function is called to clear it.
997 */
998 TRACE_GRAPH_NOTRACE_BIT,
12117f33
SRV
999};
1000
b8421489
SRV
1001#define TRACE_GRAPH_NOTRACE (1 << TRACE_GRAPH_NOTRACE_BIT)
1002
068da098
SRV
1003static inline unsigned long ftrace_graph_depth(unsigned long *task_var)
1004{
1005 return (*task_var >> TRACE_GRAPH_DEPTH_START_BIT) & 3;
1006}
1007
1008static inline void ftrace_graph_set_depth(unsigned long *task_var, int depth)
1009{
1010 *task_var &= ~(3 << TRACE_GRAPH_DEPTH_START_BIT);
1011 *task_var |= (depth & 3) << TRACE_GRAPH_DEPTH_START_BIT;
1012}
62b915f1 1013
ea4e2bc4 1014#ifdef CONFIG_DYNAMIC_FTRACE
24a9729f 1015extern struct ftrace_hash __rcu *ftrace_graph_hash;
fd0e6852 1016extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
ea4e2bc4 1017
12117f33
SRV
1018static inline int
1019ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace)
ea4e2bc4 1020{
5cf99a0f 1021 unsigned long addr = trace->func;
b9b0c831 1022 int ret = 0;
24a9729f 1023 struct ftrace_hash *hash;
b9b0c831
NK
1024
1025 preempt_disable_notrace();
1026
16052dd5
SRV
1027 /*
1028 * Have to open code "rcu_dereference_sched()" because the
1029 * function graph tracer can be called when RCU is not
1030 * "watching".
54a16ff6 1031 * Protected with schedule_on_each_cpu(ftrace_sync)
16052dd5 1032 */
24a9729f
AG
1033 hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
1034
1035 if (ftrace_hash_empty(hash)) {
b9b0c831
NK
1036 ret = 1;
1037 goto out;
ea4e2bc4
SR
1038 }
1039
24a9729f 1040 if (ftrace_lookup_ip(hash, addr)) {
5cf99a0f
SRV
1041 /*
1042 * This needs to be cleared on the return functions
1043 * when the depth is zero.
1044 */
12117f33 1045 *task_var |= TRACE_GRAPH_FL;
068da098 1046 ftrace_graph_set_depth(task_var, trace->depth);
5cf99a0f 1047
b9b0c831
NK
1048 /*
1049 * If no irqs are to be traced, but a set_graph_function
1050 * is set, and called by an interrupt handler, we still
1051 * want to trace it.
1052 */
affc6592 1053 if (in_hardirq())
b9b0c831
NK
1054 trace_recursion_set(TRACE_IRQ_BIT);
1055 else
1056 trace_recursion_clear(TRACE_IRQ_BIT);
1057 ret = 1;
1058 }
1059
1060out:
1061 preempt_enable_notrace();
1062 return ret;
ea4e2bc4 1063}
29ad23b0 1064
12117f33
SRV
1065static inline void
1066ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace)
5cf99a0f 1067{
12117f33
SRV
1068 unsigned long *task_var = fgraph_get_task_var(gops);
1069
1070 if ((*task_var & TRACE_GRAPH_FL) &&
068da098 1071 trace->depth == ftrace_graph_depth(task_var))
12117f33 1072 *task_var &= ~TRACE_GRAPH_FL;
5cf99a0f
SRV
1073}
1074
29ad23b0
NK
1075static inline int ftrace_graph_notrace_addr(unsigned long addr)
1076{
b9b0c831 1077 int ret = 0;
fd0e6852 1078 struct ftrace_hash *notrace_hash;
29ad23b0 1079
b9b0c831 1080 preempt_disable_notrace();
29ad23b0 1081
16052dd5
SRV
1082 /*
1083 * Have to open code "rcu_dereference_sched()" because the
1084 * function graph tracer can be called when RCU is not
1085 * "watching".
54a16ff6 1086 * Protected with schedule_on_each_cpu(ftrace_sync)
16052dd5 1087 */
fd0e6852
AG
1088 notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
1089 !preemptible());
1090
1091 if (ftrace_lookup_ip(notrace_hash, addr))
b9b0c831 1092 ret = 1;
29ad23b0 1093
b9b0c831
NK
1094 preempt_enable_notrace();
1095 return ret;
29ad23b0 1096}
15e6cb36 1097#else
12117f33 1098static inline int ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace)
6b253930
IM
1099{
1100 return 1;
ea4e2bc4 1101}
29ad23b0
NK
1102
1103static inline int ftrace_graph_notrace_addr(unsigned long addr)
1104{
1105 return 0;
1106}
12117f33 1107static inline void ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace)
5cf99a0f 1108{ }
ea4e2bc4 1109#endif /* CONFIG_DYNAMIC_FTRACE */
1a414428
SRRH
1110
1111extern unsigned int fgraph_max_depth;
3c9880f3 1112extern bool fgraph_sleep_time;
1a414428 1113
12117f33
SRV
1114static inline bool
1115ftrace_graph_ignore_func(struct fgraph_ops *gops, struct ftrace_graph_ent *trace)
1a414428 1116{
12117f33
SRV
1117 unsigned long *task_var = fgraph_get_task_var(gops);
1118
1a414428 1119 /* trace it when it is-nested-in or is a function enabled. */
12117f33
SRV
1120 return !((*task_var & TRACE_GRAPH_FL) ||
1121 ftrace_graph_addr(task_var, trace)) ||
1a414428
SRRH
1122 (trace->depth < 0) ||
1123 (fgraph_max_depth && trace->depth >= fgraph_max_depth);
1124}
1125
c132be2c
SRV
1126void fgraph_init_ops(struct ftrace_ops *dst_ops,
1127 struct ftrace_ops *src_ops);
1128
ea4e2bc4 1129#else /* CONFIG_FUNCTION_GRAPH_TRACER */
15e6cb36 1130static inline enum print_line_t
d7a8d9e9 1131print_graph_function_flags(struct trace_iterator *iter, u32 flags)
15e6cb36
FW
1132{
1133 return TRACE_TYPE_UNHANDLED;
1134}
26dda563 1135static inline void free_fgraph_ops(struct trace_array *tr) { }
c132be2c
SRV
1136/* ftrace_ops may not be defined */
1137#define init_array_fgraph_ops(tr, ops) do { } while (0)
1138#define allocate_fgraph_ops(tr, ops) ({ 0; })
ea4e2bc4 1139#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
15e6cb36 1140
756d17ee 1141extern struct list_head ftrace_pids;
804a6851 1142
1155de47 1143#ifdef CONFIG_FUNCTION_TRACER
c58b6b03
JB
1144
1145#define FTRACE_PID_IGNORE -1
1146#define FTRACE_PID_TRACE -2
1147
92a68fa0
SRV
1148struct ftrace_func_command {
1149 struct list_head list;
1150 char *name;
04ec7bb6
SRV
1151 int (*func)(struct trace_array *tr,
1152 struct ftrace_hash *hash,
92a68fa0
SRV
1153 char *func, char *cmd,
1154 char *params, int enable);
1155};
f1ed7c74 1156extern bool ftrace_filter_param __initdata;
345ddcc8 1157static inline int ftrace_trace_task(struct trace_array *tr)
804a6851 1158{
c58b6b03
JB
1159 return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) !=
1160 FTRACE_PID_IGNORE;
804a6851 1161}
e0a413f6 1162extern int ftrace_is_dead(void);
591dffda
SRRH
1163int ftrace_create_function_files(struct trace_array *tr,
1164 struct dentry *parent);
1165void ftrace_destroy_function_files(struct trace_array *tr);
4114fbfd
MH
1166int ftrace_allocate_ftrace_ops(struct trace_array *tr);
1167void ftrace_free_ftrace_ops(struct trace_array *tr);
4104d326 1168void ftrace_init_global_array_ops(struct trace_array *tr);
31f505dc 1169struct trace_array *trace_get_global_array(void);
4104d326
SRRH
1170void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
1171void ftrace_reset_array_ops(struct trace_array *tr);
345ddcc8 1172void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
501c2375
SRRH
1173void ftrace_init_tracefs_toplevel(struct trace_array *tr,
1174 struct dentry *d_tracer);
d879d0b8 1175void ftrace_clear_pids(struct trace_array *tr);
dbeafd0d 1176int init_function_trace(void);
1e10486f 1177void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
1155de47 1178#else
345ddcc8 1179static inline int ftrace_trace_task(struct trace_array *tr)
1155de47
PM
1180{
1181 return 1;
1182}
e0a413f6 1183static inline int ftrace_is_dead(void) { return 0; }
591dffda
SRRH
1184static inline int
1185ftrace_create_function_files(struct trace_array *tr,
1186 struct dentry *parent)
1187{
1188 return 0;
1189}
4114fbfd
MH
1190static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr)
1191{
1192 return 0;
1193}
1194static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { }
591dffda 1195static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
4104d326
SRRH
1196static inline __init void
1197ftrace_init_global_array_ops(struct trace_array *tr) { }
1198static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
345ddcc8 1199static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
501c2375 1200static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
d879d0b8 1201static inline void ftrace_clear_pids(struct trace_array *tr) { }
dbeafd0d 1202static inline int init_function_trace(void) { return 0; }
1e10486f 1203static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
4104d326
SRRH
1204/* ftace_func_t type is not defined, use macro instead of static inline */
1205#define ftrace_init_array_ops(tr, func) do { } while (0)
591dffda
SRRH
1206#endif /* CONFIG_FUNCTION_TRACER */
1207
1208#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
ec19b859
SRV
1209
1210struct ftrace_probe_ops {
1211 void (*func)(unsigned long ip,
1212 unsigned long parent_ip,
b5f081b5 1213 struct trace_array *tr,
bca6c8d0 1214 struct ftrace_probe_ops *ops,
6e444319 1215 void *data);
ec19b859 1216 int (*init)(struct ftrace_probe_ops *ops,
b5f081b5 1217 struct trace_array *tr,
6e444319
SRV
1218 unsigned long ip, void *init_data,
1219 void **data);
ec19b859 1220 void (*free)(struct ftrace_probe_ops *ops,
b5f081b5 1221 struct trace_array *tr,
6e444319 1222 unsigned long ip, void *data);
ec19b859
SRV
1223 int (*print)(struct seq_file *m,
1224 unsigned long ip,
1225 struct ftrace_probe_ops *ops,
1226 void *data);
1227};
1228
41794f19
SRV
1229struct ftrace_func_mapper;
1230typedef int (*ftrace_mapper_func)(void *data);
1231
1232struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
1233void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
1234 unsigned long ip);
1235int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
1236 unsigned long ip, void *data);
1237void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
1238 unsigned long ip);
1239void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
1240 ftrace_mapper_func free_func);
1241
ec19b859 1242extern int
04ec7bb6
SRV
1243register_ftrace_function_probe(char *glob, struct trace_array *tr,
1244 struct ftrace_probe_ops *ops, void *data);
d3d532d7 1245extern int
7b60f3d8
SRV
1246unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
1247 struct ftrace_probe_ops *ops);
a0e6369e 1248extern void clear_ftrace_function_probes(struct trace_array *tr);
ec19b859 1249
92a68fa0
SRV
1250int register_ftrace_command(struct ftrace_func_command *cmd);
1251int unregister_ftrace_command(struct ftrace_func_command *cmd);
1252
591dffda
SRRH
1253void ftrace_create_filter_files(struct ftrace_ops *ops,
1254 struct dentry *parent);
1255void ftrace_destroy_filter_files(struct ftrace_ops *ops);
5c3469cb
MH
1256
1257extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
1258 int len, int reset);
1259extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
1260 int len, int reset);
591dffda 1261#else
92a68fa0
SRV
1262struct ftrace_func_command;
1263
1264static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
1265{
1266 return -EINVAL;
1267}
1268static inline __init int unregister_ftrace_command(char *cmd_name)
1269{
1270 return -EINVAL;
1271}
8a49f3e0
SRV
1272static inline void clear_ftrace_function_probes(struct trace_array *tr)
1273{
1274}
1275
591dffda
SRRH
1276/*
1277 * The ops parameter passed in is usually undefined.
1278 * This must be a macro.
1279 */
1280#define ftrace_create_filter_files(ops, parent) do { } while (0)
1281#define ftrace_destroy_filter_files(ops) do { } while (0)
1282#endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
804a6851 1283
c6650b2e 1284bool ftrace_event_is_function(struct trace_event_call *call);
ced39002 1285
b63f39ea 1286/*
1287 * struct trace_parser - servers for reading the user input separated by spaces
1288 * @cont: set if the input is not complete - no final space char was found
1289 * @buffer: holds the parsed user input
1537a363 1290 * @idx: user input length
b63f39ea 1291 * @size: buffer size
1292 */
1293struct trace_parser {
1294 bool cont;
1295 char *buffer;
1296 unsigned idx;
1297 unsigned size;
1298};
1299
1300static inline bool trace_parser_loaded(struct trace_parser *parser)
1301{
1302 return (parser->idx != 0);
1303}
1304
1305static inline bool trace_parser_cont(struct trace_parser *parser)
1306{
1307 return parser->cont;
1308}
1309
1310static inline void trace_parser_clear(struct trace_parser *parser)
1311{
1312 parser->cont = false;
1313 parser->idx = 0;
1314}
1315
1316extern int trace_parser_get_init(struct trace_parser *parser, int size);
1317extern void trace_parser_put(struct trace_parser *parser);
1318extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1319 size_t cnt, loff_t *ppos);
1320
729358da
SRRH
1321/*
1322 * Only create function graph options if function graph is configured.
1323 */
1324#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1325# define FGRAPH_FLAGS \
729358da 1326 C(DISPLAY_GRAPH, "display-graph"),
729358da
SRRH
1327#else
1328# define FGRAPH_FLAGS
729358da
SRRH
1329#endif
1330
4ee4301c
SRRH
1331#ifdef CONFIG_BRANCH_TRACER
1332# define BRANCH_FLAGS \
1333 C(BRANCH, "branch"),
1334#else
1335# define BRANCH_FLAGS
1336#endif
1337
8179e8a1
SRRH
1338#ifdef CONFIG_FUNCTION_TRACER
1339# define FUNCTION_FLAGS \
1e10486f
NK
1340 C(FUNCTION, "function-trace"), \
1341 C(FUNC_FORK, "function-fork"),
8179e8a1
SRRH
1342# define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
1343#else
1344# define FUNCTION_FLAGS
1345# define FUNCTION_DEFAULT_FLAGS 0UL
1e10486f 1346# define TRACE_ITER_FUNC_FORK 0UL
8179e8a1
SRRH
1347#endif
1348
73dddbb5
SRRH
1349#ifdef CONFIG_STACKTRACE
1350# define STACK_FLAGS \
1351 C(STACKTRACE, "stacktrace"),
1352#else
1353# define STACK_FLAGS
1354#endif
1355
4fcdae83
SR
1356/*
1357 * trace_iterator_flags is an enumeration that defines bit
1358 * positions into trace_flags that controls the output.
1359 *
1360 * NOTE: These bits must match the trace_options array in
a3418a36 1361 * trace.c (this macro guarantees it).
4fcdae83 1362 */
a3418a36
SRRH
1363#define TRACE_FLAGS \
1364 C(PRINT_PARENT, "print-parent"), \
1365 C(SYM_OFFSET, "sym-offset"), \
1366 C(SYM_ADDR, "sym-addr"), \
1367 C(VERBOSE, "verbose"), \
1368 C(RAW, "raw"), \
1369 C(HEX, "hex"), \
1370 C(BIN, "bin"), \
1371 C(BLOCK, "block"), \
80a76994 1372 C(FIELDS, "fields"), \
a3418a36 1373 C(PRINTK, "trace_printk"), \
a3418a36
SRRH
1374 C(ANNOTATE, "annotate"), \
1375 C(USERSTACKTRACE, "userstacktrace"), \
1376 C(SYM_USEROBJ, "sym-userobj"), \
1377 C(PRINTK_MSGONLY, "printk-msg-only"), \
1378 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
1379 C(LATENCY_FMT, "latency-format"), \
a3418a36 1380 C(RECORD_CMD, "record-cmd"), \
d914ba37 1381 C(RECORD_TGID, "record-tgid"), \
a3418a36
SRRH
1382 C(OVERWRITE, "overwrite"), \
1383 C(STOP_ON_FREE, "disable_on_free"), \
1384 C(IRQ_INFO, "irq-info"), \
1385 C(MARKERS, "markers"), \
c37775d5 1386 C(EVENT_FORK, "event-fork"), \
ef2bd81d 1387 C(TRACE_PRINTK, "trace_printk_dest"), \
7b382efd 1388 C(COPY_MARKER, "copy_trace_marker"),\
06e0a548 1389 C(PAUSE_ON_TRACE, "pause-on-trace"), \
a345a671 1390 C(HASH_PTR, "hash-ptr"), /* Print hashed pointer */ \
8179e8a1 1391 FUNCTION_FLAGS \
4ee4301c 1392 FGRAPH_FLAGS \
73dddbb5 1393 STACK_FLAGS \
4ee4301c 1394 BRANCH_FLAGS
ce3fed62 1395
a3418a36
SRRH
1396/*
1397 * By defining C, we can make TRACE_FLAGS a list of bit names
1398 * that will define the bits for the flag masks.
1399 */
1400#undef C
1401#define C(a, b) TRACE_ITER_##a##_BIT
1402
b5e87c05
SRRH
1403enum trace_iterator_bits {
1404 TRACE_FLAGS
1405 /* Make sure we don't go more than we have bits for */
1406 TRACE_ITER_LAST_BIT
1407};
a3418a36
SRRH
1408
1409/*
1410 * By redefining C, we can make TRACE_FLAGS a list of masks that
1411 * use the bits as defined above.
1412 */
1413#undef C
1414#define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1415
1416enum trace_iterator_flags { TRACE_FLAGS };
4e655519 1417
15e6cb36
FW
1418/*
1419 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1420 * control the output of kernel symbols.
1421 */
1422#define TRACE_ITER_SYM_MASK \
1423 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1424
43a15386
FW
1425extern struct tracer nop_trace;
1426
2ed84eeb 1427#ifdef CONFIG_BRANCH_TRACER
9f029e83
SR
1428extern int enable_branch_tracing(struct trace_array *tr);
1429extern void disable_branch_tracing(void);
1430static inline int trace_branch_enable(struct trace_array *tr)
52f232cb 1431{
983f938a 1432 if (tr->trace_flags & TRACE_ITER_BRANCH)
9f029e83 1433 return enable_branch_tracing(tr);
52f232cb
SR
1434 return 0;
1435}
9f029e83 1436static inline void trace_branch_disable(void)
52f232cb
SR
1437{
1438 /* due to races, always disable */
9f029e83 1439 disable_branch_tracing();
52f232cb
SR
1440}
1441#else
9f029e83 1442static inline int trace_branch_enable(struct trace_array *tr)
52f232cb
SR
1443{
1444 return 0;
1445}
9f029e83 1446static inline void trace_branch_disable(void)
52f232cb
SR
1447{
1448}
2ed84eeb 1449#endif /* CONFIG_BRANCH_TRACER */
52f232cb 1450
1852fcce 1451/* set ring buffers to default size if not already done so */
a1f157c7 1452int tracing_update_buffers(struct trace_array *tr);
1852fcce 1453
ddeea494
SS
1454union trace_synth_field {
1455 u8 as_u8;
1456 u16 as_u16;
1457 u32 as_u32;
1458 u64 as_u64;
1459 struct trace_dynamic_info as_dynamic;
1460};
1461
cf027f64
TZ
1462struct ftrace_event_field {
1463 struct list_head link;
92edca07
SR
1464 const char *name;
1465 const char *type;
aa38e9fc 1466 int filter_type;
cf027f64
TZ
1467 int offset;
1468 int size;
afd2627f
SR
1469 unsigned int is_signed:1;
1470 unsigned int needs_test:1;
b6c7abd1 1471 int len;
cf027f64
TZ
1472};
1473
80765597
SRV
1474struct prog_entry;
1475
30e673b2 1476struct event_filter {
80765597
SRV
1477 struct prog_entry __rcu *prog;
1478 char *filter_string;
30e673b2
TZ
1479};
1480
cfb180f3
TZ
1481struct event_subsystem {
1482 struct list_head list;
1483 const char *name;
1f9963cb 1484 struct event_filter *filter;
e9dbfae5 1485 int ref_count;
cfb180f3
TZ
1486};
1487
7967b3e0 1488struct trace_subsystem_dir {
ae63b31e
SR
1489 struct list_head list;
1490 struct event_subsystem *subsystem;
1491 struct trace_array *tr;
5790b1fb 1492 struct eventfs_inode *ei;
ae63b31e
SR
1493 int ref_count;
1494 int nr_events;
1495};
1496
fa66ddb8 1497void trace_buffer_unlock_commit_regs(struct trace_array *tr,
13292494 1498 struct trace_buffer *buffer,
fa66ddb8 1499 struct ring_buffer_event *event,
36590c50 1500 unsigned int trcace_ctx,
fa66ddb8 1501 struct pt_regs *regs);
33fddff2
SRRH
1502
1503static inline void trace_buffer_unlock_commit(struct trace_array *tr,
13292494 1504 struct trace_buffer *buffer,
33fddff2 1505 struct ring_buffer_event *event,
36590c50 1506 unsigned int trace_ctx)
33fddff2 1507{
36590c50 1508 trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL);
33fddff2
SRRH
1509}
1510
2cc621fd
SRG
1511DECLARE_PER_CPU(bool, trace_taskinfo_save);
1512int trace_save_cmdline(struct task_struct *tsk);
1513int trace_create_savedcmd(void);
1514int trace_alloc_tgid_map(void);
1515void trace_free_saved_cmdlines_buffer(void);
1516
1517extern const struct file_operations tracing_saved_cmdlines_fops;
1518extern const struct file_operations tracing_saved_tgids_fops;
1519extern const struct file_operations tracing_saved_cmdlines_size_fops;
1520
0fc1b09f
SRRH
1521DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1522DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1523void trace_buffered_event_disable(void);
1524void trace_buffered_event_enable(void);
1525
c4846480
SRG
1526void early_enable_events(struct trace_array *tr, char *buf, bool disable_first);
1527
0fc1b09f 1528static inline void
13292494 1529__trace_event_discard_commit(struct trace_buffer *buffer,
0fc1b09f
SRRH
1530 struct ring_buffer_event *event)
1531{
1532 if (this_cpu_read(trace_buffered_event) == event) {
6c536d76 1533 /* Simply release the temp buffer and enable preemption */
0fc1b09f 1534 this_cpu_dec(trace_buffered_event_cnt);
6c536d76 1535 preempt_enable_notrace();
0fc1b09f
SRRH
1536 return;
1537 }
6c536d76 1538 /* ring_buffer_discard_commit() enables preemption */
0fc1b09f
SRRH
1539 ring_buffer_discard_commit(buffer, event);
1540}
1541
dad56ee7
SRRH
1542/*
1543 * Helper function for event_trigger_unlock_commit{_regs}().
1544 * If there are event triggers attached to this event that requires
499f7bb0 1545 * filtering against its fields, then they will be called as the
dad56ee7
SRRH
1546 * entry already holds the field information of the current event.
1547 *
1548 * It also checks if the event should be discarded or not.
1549 * It is to be discarded if the event is soft disabled and the
1550 * event was only recorded to process triggers, or if the event
1551 * filter is active and this event did not match the filters.
1552 *
1553 * Returns true if the event is discarded, false otherwise.
1554 */
1555static inline bool
1556__event_trigger_test_discard(struct trace_event_file *file,
13292494 1557 struct trace_buffer *buffer,
dad56ee7
SRRH
1558 struct ring_buffer_event *event,
1559 void *entry,
1560 enum event_trigger_type *tt)
1561{
1562 unsigned long eflags = file->flags;
1563
1564 if (eflags & EVENT_FILE_FL_TRIGGER_COND)
b47e3302 1565 *tt = event_triggers_call(file, buffer, entry, event);
dad56ee7 1566
a55f224f
SRV
1567 if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
1568 EVENT_FILE_FL_FILTERED |
1569 EVENT_FILE_FL_PID_FILTER))))
1570 return false;
1571
1572 if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
1573 goto discard;
1574
1575 if (file->flags & EVENT_FILE_FL_FILTERED &&
1576 !filter_match_preds(file->filter, entry))
1577 goto discard;
1578
1579 if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
1580 trace_event_ignore_this_pid(file))
1581 goto discard;
dad56ee7 1582
9cbb1506 1583 return false;
a55f224f
SRV
1584 discard:
1585 __trace_event_discard_commit(buffer, event);
1586 return true;
dad56ee7
SRRH
1587}
1588
1589/**
1590 * event_trigger_unlock_commit - handle triggers and finish event commit
f2cc020d 1591 * @file: The file pointer associated with the event
dad56ee7
SRRH
1592 * @buffer: The ring buffer that the event is being written to
1593 * @event: The event meta data in the ring buffer
1594 * @entry: The event itself
36590c50 1595 * @trace_ctx: The tracing context flags.
dad56ee7
SRRH
1596 *
1597 * This is a helper function to handle triggers that require data
1598 * from the event itself. It also tests the event against filters and
1599 * if the event is soft disabled and should be discarded.
1600 */
1601static inline void
1602event_trigger_unlock_commit(struct trace_event_file *file,
13292494 1603 struct trace_buffer *buffer,
dad56ee7 1604 struct ring_buffer_event *event,
36590c50 1605 void *entry, unsigned int trace_ctx)
dad56ee7
SRRH
1606{
1607 enum event_trigger_type tt = ETT_NONE;
1608
1609 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
36590c50 1610 trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx);
dad56ee7
SRRH
1611
1612 if (tt)
c94e45bc 1613 event_triggers_post_call(file, tt);
dad56ee7
SRRH
1614}
1615
61e9dea2
SR
1616#define FILTER_PRED_INVALID ((unsigned short)-1)
1617#define FILTER_PRED_IS_RIGHT (1 << 15)
43cd4145 1618#define FILTER_PRED_FOLD (1 << 15)
61e9dea2 1619
bf93f9ed
SR
1620/*
1621 * The max preds is the size of unsigned short with
1622 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1623 * and FOLD flags. The other is reserved.
1624 *
1625 * 2^14 preds is way more than enough.
1626 */
1627#define MAX_FILTER_PRED 16384
4a3d27e9 1628
7ce7e424 1629struct filter_pred;
1889d209 1630struct regex;
7ce7e424 1631
1889d209
FW
1632typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1633
3f6fe06d 1634enum regex_type {
b0f1a59a 1635 MATCH_FULL = 0,
3f6fe06d
FW
1636 MATCH_FRONT_ONLY,
1637 MATCH_MIDDLE_ONLY,
1638 MATCH_END_ONLY,
60f1d5e3 1639 MATCH_GLOB,
f79b3f33 1640 MATCH_INDEX,
3f6fe06d
FW
1641};
1642
1889d209
FW
1643struct regex {
1644 char pattern[MAX_FILTER_STR_VAL];
1645 int len;
1646 int field_len;
1647 regex_match_func match;
1648};
1649
4ef56902
TZ
1650static inline bool is_string_field(struct ftrace_event_field *field)
1651{
1652 return field->filter_type == FILTER_DYN_STRING ||
05770dd0 1653 field->filter_type == FILTER_RDYN_STRING ||
4ef56902 1654 field->filter_type == FILTER_STATIC_STRING ||
4c738413
SRV
1655 field->filter_type == FILTER_PTR_STRING ||
1656 field->filter_type == FILTER_COMM;
4ef56902
TZ
1657}
1658
1659static inline bool is_function_field(struct ftrace_event_field *field)
1660{
1661 return field->filter_type == FILTER_TRACE_FN;
1662}
1663
3f6fe06d
FW
1664extern enum regex_type
1665filter_parse_regex(char *buff, int len, char **search, int *not);
7f1d2f82 1666extern void print_event_filter(struct trace_event_file *file,
4bda2d51 1667 struct trace_seq *s);
7f1d2f82 1668extern int apply_event_filter(struct trace_event_file *file,
8b372562 1669 char *filter_string);
7967b3e0 1670extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
8b372562
TZ
1671 char *filter_string);
1672extern void print_subsystem_event_filter(struct event_subsystem *system,
ac1adc55 1673 struct trace_seq *s);
aa38e9fc 1674extern int filter_assign_type(const char *type);
1e144d73
SRV
1675extern int create_event_filter(struct trace_array *tr,
1676 struct trace_event_call *call,
bac5fb97
TZ
1677 char *filter_str, bool set_str,
1678 struct event_filter **filterp);
1679extern void free_event_filter(struct event_filter *filter);
7ce7e424 1680
b3a8c6fd 1681struct ftrace_event_field *
2425bcb9 1682trace_find_event_field(struct trace_event_call *call, char *name);
2e33af02 1683
e870e9a1 1684extern void trace_event_enable_cmd_record(bool enable);
d914ba37
JF
1685extern void trace_event_enable_tgid_record(bool enable);
1686
58b92547 1687extern int event_trace_init(void);
3bb06eb6 1688extern int init_events(void);
277ba044 1689extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
0c8916c3 1690extern int event_trace_del_tracer(struct trace_array *tr);
720dee53 1691extern void __trace_early_add_events(struct trace_array *tr);
e870e9a1 1692
3c96529c
SRV
1693extern struct trace_event_file *__find_event_file(struct trace_array *tr,
1694 const char *system,
1695 const char *event);
7f1d2f82
SRRH
1696extern struct trace_event_file *find_event_file(struct trace_array *tr,
1697 const char *system,
1698 const char *event);
7862ad18 1699
85f2b082
TZ
1700static inline void *event_file_data(struct file *filp)
1701{
6aa7de05 1702 return READ_ONCE(file_inode(filp)->i_private);
85f2b082
TZ
1703}
1704
20c8928a 1705extern struct mutex event_mutex;
a59fd602 1706extern struct list_head ftrace_events;
ac199db0 1707
b1560408
SR
1708/*
1709 * When the trace_event_file is the filp->i_private pointer,
1710 * it must be taken under the event_mutex lock, and then checked
1711 * if the EVENT_FILE_FL_FREED flag is set. If it is, then the
1712 * data pointed to by the trace_event_file can not be trusted.
1713 *
1714 * Use the event_file_file() to access the trace_event_file from
1715 * the filp the first time under the event_mutex and check for
1716 * NULL. If it is needed to be retrieved again and the event_mutex
1717 * is still held, then the event_file_data() can be used and it
1718 * is guaranteed to be valid.
1719 */
1720static inline struct trace_event_file *event_file_file(struct file *filp)
1721{
1722 struct trace_event_file *file;
1723
1724 lockdep_assert_held(&event_mutex);
1725 file = READ_ONCE(file_inode(filp)->i_private);
1726 if (!file || file->flags & EVENT_FILE_FL_FREED)
1727 return NULL;
1728 return file;
1729}
1730
85f2b082 1731extern const struct file_operations event_trigger_fops;
7ef224d1 1732extern const struct file_operations event_hist_fops;
2d19bd79 1733extern const struct file_operations event_hist_debug_fops;
6c3edaf9 1734extern const struct file_operations event_inject_fops;
7ef224d1
TZ
1735
1736#ifdef CONFIG_HIST_TRIGGERS
1737extern int register_trigger_hist_cmd(void);
d0bad49b 1738extern int register_trigger_hist_enable_disable_cmds(void);
7ef224d1
TZ
1739#else
1740static inline int register_trigger_hist_cmd(void) { return 0; }
d0bad49b 1741static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
7ef224d1 1742#endif
85f2b082
TZ
1743
1744extern int register_trigger_cmds(void);
1745extern void clear_event_triggers(struct trace_array *tr);
1746
7491e2c4
TSV
1747enum {
1748 EVENT_TRIGGER_FL_PROBE = BIT(0),
1749};
1750
85f2b082
TZ
1751struct event_trigger_data {
1752 unsigned long count;
1753 int ref;
7491e2c4 1754 int flags;
502d2e71 1755 const struct event_trigger_ops *ops;
85f2b082 1756 struct event_command *cmd_ops;
d8a30f20 1757 struct event_filter __rcu *filter;
85f2b082
TZ
1758 char *filter_str;
1759 void *private_data;
104f2810 1760 bool paused;
db1388b4 1761 bool paused_tmp;
85f2b082 1762 struct list_head list;
db1388b4
TZ
1763 char *name;
1764 struct list_head named_list;
1765 struct event_trigger_data *named_data;
85f2b082
TZ
1766};
1767
d0bad49b
TZ
1768/* Avoid typos */
1769#define ENABLE_EVENT_STR "enable_event"
1770#define DISABLE_EVENT_STR "disable_event"
1771#define ENABLE_HIST_STR "enable_hist"
1772#define DISABLE_HIST_STR "disable_hist"
1773
1774struct enable_trigger_data {
1775 struct trace_event_file *file;
1776 bool enable;
1777 bool hist;
1778};
1779
1780extern int event_enable_trigger_print(struct seq_file *m,
d0bad49b 1781 struct event_trigger_data *data);
47670541 1782extern void event_enable_trigger_free(struct event_trigger_data *data);
9ec5a7d1
TZ
1783extern int event_enable_trigger_parse(struct event_command *cmd_ops,
1784 struct trace_event_file *file,
e1f187d0
TZ
1785 char *glob, char *cmd,
1786 char *param_and_filter);
d0bad49b 1787extern int event_enable_register_trigger(char *glob,
d0bad49b
TZ
1788 struct event_trigger_data *data,
1789 struct trace_event_file *file);
1790extern void event_enable_unregister_trigger(char *glob,
d0bad49b
TZ
1791 struct event_trigger_data *test,
1792 struct trace_event_file *file);
f2947c4b
SR
1793extern struct event_trigger_data *
1794trigger_data_alloc(struct event_command *cmd_ops, char *cmd, char *param,
1795 void *private_data);
ab4bf008 1796extern void trigger_data_free(struct event_trigger_data *data);
47670541 1797extern int event_trigger_init(struct event_trigger_data *data);
ab4bf008
TZ
1798extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1799 int trigger_enable);
1800extern void update_cond_flag(struct trace_event_file *file);
ab4bf008
TZ
1801extern int set_trigger_filter(char *filter_str,
1802 struct event_trigger_data *trigger_data,
1803 struct trace_event_file *file);
db1388b4
TZ
1804extern struct event_trigger_data *find_named_trigger(const char *name);
1805extern bool is_named_trigger(struct event_trigger_data *test);
1806extern int save_named_trigger(const char *name,
1807 struct event_trigger_data *data);
1808extern void del_named_trigger(struct event_trigger_data *data);
1809extern void pause_named_trigger(struct event_trigger_data *data);
1810extern void unpause_named_trigger(struct event_trigger_data *data);
1811extern void set_named_trigger_data(struct event_trigger_data *data,
1812 struct event_trigger_data *named_data);
067fe038
TZ
1813extern struct event_trigger_data *
1814get_named_trigger_data(struct event_trigger_data *data);
ab4bf008 1815extern int register_event_command(struct event_command *cmd);
d0bad49b
TZ
1816extern int unregister_event_command(struct event_command *cmd);
1817extern int register_trigger_hist_enable_disable_cmds(void);
86599dbe
TZ
1818extern bool event_trigger_check_remove(const char *glob);
1819extern bool event_trigger_empty_param(const char *param);
1820extern int event_trigger_separate_filter(char *param_and_filter, char **param,
1821 char **filter, bool param_required);
86599dbe
TZ
1822extern int event_trigger_parse_num(char *trigger,
1823 struct event_trigger_data *trigger_data);
1824extern int event_trigger_set_filter(struct event_command *cmd_ops,
1825 struct trace_event_file *file,
1826 char *param,
1827 struct event_trigger_data *trigger_data);
1828extern void event_trigger_reset_filter(struct event_command *cmd_ops,
1829 struct event_trigger_data *trigger_data);
1830extern int event_trigger_register(struct event_command *cmd_ops,
1831 struct trace_event_file *file,
1832 char *glob,
b8cc44a4
TZ
1833 struct event_trigger_data *trigger_data);
1834extern void event_trigger_unregister(struct event_command *cmd_ops,
1835 struct trace_event_file *file,
1836 char *glob,
1837 struct event_trigger_data *trigger_data);
ab4bf008 1838
bb32500f
SRG
1839extern void event_file_get(struct trace_event_file *file);
1840extern void event_file_put(struct trace_event_file *file);
1841
85f2b082
TZ
1842/**
1843 * struct event_trigger_ops - callbacks for trace event triggers
1844 *
1845 * The methods in this structure provide per-event trigger hooks for
1846 * various trigger operations.
1847 *
fb339e53
TZ
1848 * The @init and @free methods are used during trigger setup and
1849 * teardown, typically called from an event_command's @parse()
1850 * function implementation.
1851 *
1852 * The @print method is used to print the trigger spec.
1853 *
1854 * The @trigger method is the function that actually implements the
1855 * trigger and is called in the context of the triggering event
1856 * whenever that event occurs.
1857 *
85f2b082
TZ
1858 * All the methods below, except for @init() and @free(), must be
1859 * implemented.
1860 *
fb339e53 1861 * @trigger: The trigger 'probe' function called when the triggering
85f2b082
TZ
1862 * event occurs. The data passed into this callback is the data
1863 * that was supplied to the event_command @reg() function that
c4a59230
TZ
1864 * registered the trigger (see struct event_command) along with
1865 * the trace record, rec.
85f2b082
TZ
1866 *
1867 * @init: An optional initialization function called for the trigger
1868 * when the trigger is registered (via the event_command reg()
1869 * function). This can be used to perform per-trigger
1870 * initialization such as incrementing a per-trigger reference
1871 * count, for instance. This is usually implemented by the
1872 * generic utility function @event_trigger_init() (see
1873 * trace_event_triggers.c).
1874 *
1875 * @free: An optional de-initialization function called for the
1876 * trigger when the trigger is unregistered (via the
1877 * event_command @reg() function). This can be used to perform
1878 * per-trigger de-initialization such as decrementing a
1879 * per-trigger reference count and freeing corresponding trigger
1880 * data, for instance. This is usually implemented by the
1881 * generic utility function @event_trigger_free() (see
1882 * trace_event_triggers.c).
1883 *
1884 * @print: The callback function invoked to have the trigger print
1885 * itself. This is usually implemented by a wrapper function
1886 * that calls the generic utility function @event_trigger_print()
1887 * (see trace_event_triggers.c).
1888 */
1889struct event_trigger_ops {
fb339e53
TZ
1890 void (*trigger)(struct event_trigger_data *data,
1891 struct trace_buffer *buffer,
1892 void *rec,
1893 struct ring_buffer_event *rbe);
47670541
TZ
1894 int (*init)(struct event_trigger_data *data);
1895 void (*free)(struct event_trigger_data *data);
85f2b082 1896 int (*print)(struct seq_file *m,
85f2b082
TZ
1897 struct event_trigger_data *data);
1898};
1899
1900/**
1901 * struct event_command - callbacks and data members for event commands
1902 *
1903 * Event commands are invoked by users by writing the command name
1904 * into the 'trigger' file associated with a trace event. The
1905 * parameters associated with a specific invocation of an event
1906 * command are used to create an event trigger instance, which is
1907 * added to the list of trigger instances associated with that trace
1908 * event. When the event is hit, the set of triggers associated with
1909 * that event is invoked.
1910 *
1911 * The data members in this structure provide per-event command data
1912 * for various event commands.
1913 *
1914 * All the data members below, except for @post_trigger, must be set
1915 * for each event command.
1916 *
1917 * @name: The unique name that identifies the event command. This is
1918 * the name used when setting triggers via trigger files.
1919 *
1920 * @trigger_type: A unique id that identifies the event command
1921 * 'type'. This value has two purposes, the first to ensure that
1922 * only one trigger of the same type can be set at a given time
1923 * for a particular event e.g. it doesn't make sense to have both
1924 * a traceon and traceoff trigger attached to a single event at
1925 * the same time, so traceon and traceoff have the same type
1926 * though they have different names. The @trigger_type value is
1927 * also used as a bit value for deferring the actual trigger
1928 * action until after the current event is finished. Some
1929 * commands need to do this if they themselves log to the trace
1930 * buffer (see the @post_trigger() member below). @trigger_type
1931 * values are defined by adding new values to the trigger_type
af658dca 1932 * enum in include/linux/trace_events.h.
85f2b082 1933 *
353206f5 1934 * @flags: See the enum event_command_flags below.
a5863dae 1935 *
a88e1cfb
TZ
1936 * All the methods below, except for @set_filter() and @unreg_all(),
1937 * must be implemented.
85f2b082 1938 *
9ec5a7d1 1939 * @parse: The callback function responsible for parsing and
85f2b082
TZ
1940 * registering the trigger written to the 'trigger' file by the
1941 * user. It allocates the trigger instance and registers it with
1942 * the appropriate trace event. It makes use of the other
1943 * event_command callback functions to orchestrate this, and is
1944 * usually implemented by the generic utility function
1945 * @event_trigger_callback() (see trace_event_triggers.c).
1946 *
1947 * @reg: Adds the trigger to the list of triggers associated with the
1948 * event, and enables the event trigger itself, after
1949 * initializing it (via the event_trigger_ops @init() function).
1950 * This is also where commands can use the @trigger_type value to
1951 * make the decision as to whether or not multiple instances of
1952 * the trigger should be allowed. This is usually implemented by
1953 * the generic utility function @register_trigger() (see
1954 * trace_event_triggers.c).
1955 *
1956 * @unreg: Removes the trigger from the list of triggers associated
1957 * with the event, and disables the event trigger itself, after
1958 * initializing it (via the event_trigger_ops @free() function).
1959 * This is usually implemented by the generic utility function
1960 * @unregister_trigger() (see trace_event_triggers.c).
1961 *
a88e1cfb
TZ
1962 * @unreg_all: An optional function called to remove all the triggers
1963 * from the list of triggers associated with the event. Called
1964 * when a trigger file is opened in truncate mode.
1965 *
85f2b082
TZ
1966 * @set_filter: An optional function called to parse and set a filter
1967 * for the trigger. If no @set_filter() method is set for the
1968 * event command, filters set by the user for the command will be
1969 * ignored. This is usually implemented by the generic utility
1970 * function @set_trigger_filter() (see trace_event_triggers.c).
1971 *
1972 * @get_trigger_ops: The callback function invoked to retrieve the
1973 * event_trigger_ops implementation associated with the command.
9ec5a7d1
TZ
1974 * This callback function allows a single event_command to
1975 * support multiple trigger implementations via different sets of
1976 * event_trigger_ops, depending on the value of the @param
1977 * string.
85f2b082
TZ
1978 */
1979struct event_command {
1980 struct list_head list;
1981 char *name;
1982 enum event_trigger_type trigger_type;
353206f5 1983 int flags;
9ec5a7d1
TZ
1984 int (*parse)(struct event_command *cmd_ops,
1985 struct trace_event_file *file,
1986 char *glob, char *cmd,
1987 char *param_and_filter);
85f2b082 1988 int (*reg)(char *glob,
85f2b082 1989 struct event_trigger_data *data,
7f1d2f82 1990 struct trace_event_file *file);
85f2b082 1991 void (*unreg)(char *glob,
85f2b082 1992 struct event_trigger_data *data,
7f1d2f82 1993 struct trace_event_file *file);
a88e1cfb 1994 void (*unreg_all)(struct trace_event_file *file);
85f2b082
TZ
1995 int (*set_filter)(char *filter_str,
1996 struct event_trigger_data *data,
7f1d2f82 1997 struct trace_event_file *file);
502d2e71 1998 const struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
85f2b082
TZ
1999};
2000
353206f5
SRRH
2001/**
2002 * enum event_command_flags - flags for struct event_command
2003 *
2004 * @POST_TRIGGER: A flag that says whether or not this command needs
2005 * to have its action delayed until after the current event has
2006 * been closed. Some triggers need to avoid being invoked while
2007 * an event is currently in the process of being logged, since
2008 * the trigger may itself log data into the trace buffer. Thus
2009 * we make sure the current event is committed before invoking
2010 * those triggers. To do that, the trigger invocation is split
2011 * in two - the first part checks the filter using the current
2012 * trace record; if a command has the @post_trigger flag set, it
2013 * sets a bit for itself in the return value, otherwise it
2014 * directly invokes the trigger. Once all commands have been
2015 * either invoked or set their return flag, the current record is
2016 * either committed or discarded. At that point, if any commands
2017 * have deferred their triggers, those commands are finally
2018 * invoked following the close of the current event. In other
2019 * words, if the event_trigger_ops @func() probe implementation
2020 * itself logs to the trace buffer, this flag should be set,
2021 * otherwise it can be left unspecified.
2022 *
2023 * @NEEDS_REC: A flag that says whether or not this command needs
2024 * access to the trace record in order to perform its function,
2025 * regardless of whether or not it has a filter associated with
2026 * it (filters make a trigger require access to the trace record
2027 * but are not always present).
2028 */
2029enum event_command_flags {
2030 EVENT_CMD_FL_POST_TRIGGER = 1,
2031 EVENT_CMD_FL_NEEDS_REC = 2,
2032};
2033
2034static inline bool event_command_post_trigger(struct event_command *cmd_ops)
2035{
2036 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
2037}
2038
2039static inline bool event_command_needs_rec(struct event_command *cmd_ops)
2040{
2041 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
2042}
2043
7f1d2f82 2044extern int trace_event_enable_disable(struct trace_event_file *file,
85f2b082 2045 int enable, int soft_disable);
93e31ffb 2046extern int tracing_alloc_snapshot(void);
a35873a0
TZ
2047extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
2048extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
2049
2050extern int tracing_snapshot_cond_disable(struct trace_array *tr);
2051extern void *tracing_cond_snapshot_data(struct trace_array *tr);
85f2b082 2052
e9fb2b6d
SR
2053extern const char *__start___trace_bprintk_fmt[];
2054extern const char *__stop___trace_bprintk_fmt[];
2055
102c9323
SRRH
2056extern const char *__start___tracepoint_str[];
2057extern const char *__stop___tracepoint_str[];
2058
b9f9108c 2059void trace_printk_control(bool enabled);
81698831 2060void trace_printk_start_comm(void);
613f04a0 2061int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
2b6080f2 2062int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
07d777fe 2063
5c3469cb
MH
2064/* Used from boot time tracer */
2065extern int trace_set_options(struct trace_array *tr, char *option);
2066extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
2067extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
2068 unsigned long size, int cpu_id);
2069extern int tracing_set_cpumask(struct trace_array *tr,
2070 cpumask_var_t tracing_cpumask_new);
2071
2072
7e465baa
TZ
2073#define MAX_EVENT_NAME_LEN 64
2074
7e465baa
TZ
2075extern ssize_t trace_parse_run_command(struct file *file,
2076 const char __user *buffer, size_t count, loff_t *ppos,
d262271d 2077 int (*createfn)(const char *));
7e465baa 2078
8a062902 2079extern unsigned int err_pos(char *cmd, const char *str);
2f754e77
SRV
2080extern void tracing_log_err(struct trace_array *tr,
2081 const char *loc, const char *cmd,
1581a884 2082 const char **errs, u8 type, u16 pos);
8a062902 2083
ca268da6
SRRH
2084/*
2085 * Normal trace_printk() and friends allocates special buffers
2086 * to do the manipulation, as well as saves the print formats
2087 * into sections to display. But the trace infrastructure wants
2088 * to use these without the added overhead at the price of being
2089 * a bit slower (used mainly for warnings, where we don't care
2090 * about performance). The internal_trace_puts() is for such
2091 * a purpose.
2092 */
2093#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
2094
4e5292ea 2095#undef FTRACE_ENTRY
04ae87a5 2096#define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
2425bcb9 2097 extern struct trace_event_call \
52f5684c 2098 __aligned(4) event_##call;
4e5292ea 2099#undef FTRACE_ENTRY_DUP
04ae87a5
PZ
2100#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
2101 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
a4a551b8 2102#undef FTRACE_ENTRY_PACKED
04ae87a5
PZ
2103#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \
2104 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
a4a551b8 2105
4e5292ea 2106#include "trace_entries.h"
e1112b4d 2107
6e48b550 2108#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
2425bcb9 2109int perf_ftrace_event_register(struct trace_event_call *call,
ced39002
JO
2110 enum trace_reg type, void *data);
2111#else
2112#define perf_ftrace_event_register NULL
6e48b550 2113#endif
ced39002 2114
5f893b26
SRRH
2115#ifdef CONFIG_FTRACE_SYSCALLS
2116void init_ftrace_syscalls(void);
dbfeaa7a 2117const char *get_syscall_name(int syscall);
5f893b26
SRRH
2118#else
2119static inline void init_ftrace_syscalls(void) { }
dbfeaa7a
TZ
2120static inline const char *get_syscall_name(int syscall)
2121{
2122 return NULL;
2123}
5f893b26
SRRH
2124#endif
2125
2126#ifdef CONFIG_EVENT_TRACING
2127void trace_event_init(void);
f57a4143 2128void trace_event_eval_update(struct trace_eval_map **map, int len);
5c3469cb
MH
2129/* Used from boot time tracer */
2130extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
2131extern int trigger_process_regex(struct trace_event_file *file, char *buff);
5f893b26
SRRH
2132#else
2133static inline void __init trace_event_init(void) { }
f57a4143 2134static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
5f893b26
SRRH
2135#endif
2136
2824f503
SRV
2137#ifdef CONFIG_TRACER_SNAPSHOT
2138void tracing_snapshot_instance(struct trace_array *tr);
2139int tracing_alloc_snapshot_instance(struct trace_array *tr);
180e4e39
VD
2140int tracing_arm_snapshot(struct trace_array *tr);
2141void tracing_disarm_snapshot(struct trace_array *tr);
2824f503
SRV
2142#else
2143static inline void tracing_snapshot_instance(struct trace_array *tr) { }
2144static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
2145{
2146 return 0;
2147}
180e4e39
VD
2148static inline int tracing_arm_snapshot(struct trace_array *tr) { return 0; }
2149static inline void tracing_disarm_snapshot(struct trace_array *tr) { }
2824f503
SRV
2150#endif
2151
3f1756dc
SRV
2152#ifdef CONFIG_PREEMPT_TRACER
2153void tracer_preempt_on(unsigned long a0, unsigned long a1);
2154void tracer_preempt_off(unsigned long a0, unsigned long a1);
2155#else
2156static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
2157static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
2158#endif
2159#ifdef CONFIG_IRQSOFF_TRACER
2160void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
2161void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
2162#else
2163static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
2164static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
2165#endif
2166
0c97bf86
MO
2167/*
2168 * Reset the state of the trace_iterator so that it can read consumed data.
2169 * Normally, the trace_iterator is used for reading the data when it is not
2170 * consumed, and must retain state.
2171 */
2172static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
2173{
dba87967 2174 memset_startat(iter, 0, seq);
0c97bf86
MO
2175 iter->pos = -1;
2176}
2177
42d120e2 2178/* Check the name is good for event/group/fields */
575b76cb 2179static inline bool __is_good_name(const char *name, bool hash_ok)
42d120e2 2180{
575b76cb 2181 if (!isalpha(*name) && *name != '_' && (!hash_ok || *name != '-'))
42d120e2
TZ
2182 return false;
2183 while (*++name != '\0') {
575b76cb
SRG
2184 if (!isalpha(*name) && !isdigit(*name) && *name != '_' &&
2185 (!hash_ok || *name != '-'))
42d120e2
TZ
2186 return false;
2187 }
2188 return true;
2189}
2190
575b76cb
SRG
2191/* Check the name is good for event/group/fields */
2192static inline bool is_good_name(const char *name)
2193{
2194 return __is_good_name(name, false);
2195}
2196
2197/* Check the name is good for system */
2198static inline bool is_good_system_name(const char *name)
2199{
2200 return __is_good_name(name, true);
2201}
2202
7491e2c4
TSV
2203/* Convert certain expected symbols into '_' when generating event names */
2204static inline void sanitize_event_name(char *name)
2205{
2206 while (*name++ != '\0')
2207 if (*name == ':' || *name == '.')
2208 *name = '_';
2209}
2210
bc87cf0a
DBO
2211/*
2212 * This is a generic way to read and write a u64 value from a file in tracefs.
2213 *
2214 * The value is stored on the variable pointed by *val. The value needs
2215 * to be at least *min and at most *max. The write is protected by an
2216 * existing *lock.
2217 */
2218struct trace_min_max_param {
2219 struct mutex *lock;
2220 u64 *val;
2221 u64 *min;
2222 u64 *max;
2223};
2224
2225#define U64_STR_SIZE 24 /* 20 digits max */
2226
2227extern const struct file_operations trace_min_max_fops;
2228
102227b9
DBO
2229#ifdef CONFIG_RV
2230extern int rv_init_interface(void);
2231#else
2232static inline int rv_init_interface(void)
2233{
2234 return 0;
2235}
2236#endif
2237
6ce5a6f0
T
2238/*
2239 * This is used only to distinguish
2240 * function address from trampoline code.
2241 * So this value has no meaning.
2242 */
2243#define FTRACE_TRAMPOLINE_MARKER ((unsigned long) INT_MAX)
2244
bc0c38d1 2245#endif /* _LINUX_KERNEL_TRACE_H */