Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
bac5fb97 | 2 | |
645df987 SRRH |
3 | #ifndef _LINUX_TRACE_EVENT_H |
4 | #define _LINUX_TRACE_EVENT_H | |
97f20251 | 5 | |
97f20251 | 6 | #include <linux/ring_buffer.h> |
16bb8eb1 | 7 | #include <linux/trace_seq.h> |
be74b73a | 8 | #include <linux/percpu.h> |
20ab4425 | 9 | #include <linux/hardirq.h> |
430ad5a6 | 10 | #include <linux/perf_event.h> |
de7b2973 | 11 | #include <linux/tracepoint.h> |
97f20251 SR |
12 | |
13 | struct trace_array; | |
12883efb | 14 | struct trace_buffer; |
97f20251 | 15 | struct tracer; |
6d723736 | 16 | struct dentry; |
2541517c | 17 | struct bpf_prog; |
97f20251 | 18 | |
645df987 SRRH |
19 | const char *trace_print_flags_seq(struct trace_seq *p, const char *delim, |
20 | unsigned long flags, | |
21 | const struct trace_print_flags *flag_array); | |
be74b73a | 22 | |
645df987 SRRH |
23 | const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val, |
24 | const struct trace_print_flags *symbol_array); | |
0f4fc29d | 25 | |
2fc1b6f0 | 26 | #if BITS_PER_LONG == 32 |
d3213e8f RZ |
27 | const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim, |
28 | unsigned long long flags, | |
29 | const struct trace_print_flags_u64 *flag_array); | |
30 | ||
645df987 SRRH |
31 | const char *trace_print_symbols_seq_u64(struct trace_seq *p, |
32 | unsigned long long val, | |
33 | const struct trace_print_flags_u64 | |
2fc1b6f0 | 34 | *symbol_array); |
35 | #endif | |
36 | ||
645df987 SRRH |
37 | const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, |
38 | unsigned int bitmask_size); | |
4449bf92 | 39 | |
645df987 | 40 | const char *trace_print_hex_seq(struct trace_seq *p, |
2acae0d5 | 41 | const unsigned char *buf, int len, |
3898fac1 | 42 | bool concatenate); |
5a2e3995 | 43 | |
645df987 | 44 | const char *trace_print_array_seq(struct trace_seq *p, |
ac01ce14 | 45 | const void *buf, int count, |
6ea22486 DM |
46 | size_t el_size); |
47 | ||
f71130de LZ |
48 | struct trace_iterator; |
49 | struct trace_event; | |
50 | ||
892c505a SRRH |
51 | int trace_raw_output_prep(struct trace_iterator *iter, |
52 | struct trace_event *event); | |
f71130de | 53 | |
97f20251 SR |
54 | /* |
55 | * The trace entry - the most basic unit of tracing. This is what | |
56 | * is printed in the end as a single line in the trace output, such as: | |
57 | * | |
58 | * bash-15816 [01] 235.197585: idle_cpu <- irq_enter | |
59 | */ | |
60 | struct trace_entry { | |
89ec0dee | 61 | unsigned short type; |
97f20251 SR |
62 | unsigned char flags; |
63 | unsigned char preempt_count; | |
64 | int pid; | |
97f20251 SR |
65 | }; |
66 | ||
609a7404 | 67 | #define TRACE_EVENT_TYPE_MAX \ |
89ec0dee SR |
68 | ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1) |
69 | ||
97f20251 SR |
70 | /* |
71 | * Trace iterator - used by printout routines who present trace | |
72 | * results to users and which routines might sleep, etc: | |
73 | */ | |
74 | struct trace_iterator { | |
75 | struct trace_array *tr; | |
76 | struct tracer *trace; | |
12883efb | 77 | struct trace_buffer *trace_buffer; |
97f20251 SR |
78 | void *private; |
79 | int cpu_file; | |
80 | struct mutex mutex; | |
6d158a81 | 81 | struct ring_buffer_iter **buffer_iter; |
112f38a7 | 82 | unsigned long iter_flags; |
97f20251 | 83 | |
bc289ae9 LJ |
84 | /* trace_seq for __print_flags() and __print_symbolic() etc. */ |
85 | struct trace_seq tmp_seq; | |
86 | ||
ed5467da AV |
87 | cpumask_var_t started; |
88 | ||
89 | /* it's true when current open file is snapshot */ | |
90 | bool snapshot; | |
91 | ||
97f20251 SR |
92 | /* The below is zeroed out in pipe_read */ |
93 | struct trace_seq seq; | |
94 | struct trace_entry *ent; | |
bc21b478 | 95 | unsigned long lost_events; |
a63ce5b3 | 96 | int leftover; |
4a9bd3f1 | 97 | int ent_size; |
97f20251 SR |
98 | int cpu; |
99 | u64 ts; | |
100 | ||
97f20251 SR |
101 | loff_t pos; |
102 | long idx; | |
103 | ||
ed5467da | 104 | /* All new field here will be zeroed out in pipe_read */ |
97f20251 SR |
105 | }; |
106 | ||
8be0709f DS |
107 | enum trace_iter_flags { |
108 | TRACE_FILE_LAT_FMT = 1, | |
109 | TRACE_FILE_ANNOTATE = 2, | |
110 | TRACE_FILE_TIME_IN_NS = 4, | |
111 | }; | |
112 | ||
97f20251 SR |
113 | |
114 | typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, | |
a9a57763 SR |
115 | int flags, struct trace_event *event); |
116 | ||
117 | struct trace_event_functions { | |
97f20251 SR |
118 | trace_print_func trace; |
119 | trace_print_func raw; | |
120 | trace_print_func hex; | |
121 | trace_print_func binary; | |
122 | }; | |
123 | ||
a9a57763 SR |
124 | struct trace_event { |
125 | struct hlist_node node; | |
126 | struct list_head list; | |
127 | int type; | |
128 | struct trace_event_functions *funcs; | |
129 | }; | |
130 | ||
9023c930 SRRH |
131 | extern int register_trace_event(struct trace_event *event); |
132 | extern int unregister_trace_event(struct trace_event *event); | |
97f20251 SR |
133 | |
134 | /* Return values for print_line callback */ | |
135 | enum print_line_t { | |
136 | TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ | |
137 | TRACE_TYPE_HANDLED = 1, | |
138 | TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */ | |
139 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ | |
140 | }; | |
141 | ||
af0009fc | 142 | enum print_line_t trace_handle_return(struct trace_seq *s); |
19a7fe20 | 143 | |
f413cdb8 FW |
144 | void tracing_generic_entry_update(struct trace_entry *entry, |
145 | unsigned long flags, | |
146 | int pc); | |
7f1d2f82 | 147 | struct trace_event_file; |
ccb469a1 SR |
148 | |
149 | struct ring_buffer_event * | |
150 | trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer, | |
7f1d2f82 | 151 | struct trace_event_file *trace_file, |
ccb469a1 SR |
152 | int type, unsigned long len, |
153 | unsigned long flags, int pc); | |
97f20251 | 154 | |
d914ba37 JF |
155 | #define TRACE_RECORD_CMDLINE BIT(0) |
156 | #define TRACE_RECORD_TGID BIT(1) | |
157 | ||
158 | void tracing_record_taskinfo(struct task_struct *task, int flags); | |
159 | void tracing_record_taskinfo_sched_switch(struct task_struct *prev, | |
160 | struct task_struct *next, int flags); | |
161 | ||
162 | void tracing_record_cmdline(struct task_struct *task); | |
163 | void tracing_record_tgid(struct task_struct *task); | |
97f20251 | 164 | |
892c505a | 165 | int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...); |
1d6bae96 | 166 | |
1f9963cb LZ |
167 | struct event_filter; |
168 | ||
2239291a SR |
169 | enum trace_reg { |
170 | TRACE_REG_REGISTER, | |
171 | TRACE_REG_UNREGISTER, | |
37d73998 | 172 | #ifdef CONFIG_PERF_EVENTS |
2239291a SR |
173 | TRACE_REG_PERF_REGISTER, |
174 | TRACE_REG_PERF_UNREGISTER, | |
ceec0b6f JO |
175 | TRACE_REG_PERF_OPEN, |
176 | TRACE_REG_PERF_CLOSE, | |
466c81c4 PZ |
177 | /* |
178 | * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a | |
179 | * custom action was taken and the default action is not to be | |
180 | * performed. | |
181 | */ | |
489c75c3 JO |
182 | TRACE_REG_PERF_ADD, |
183 | TRACE_REG_PERF_DEL, | |
37d73998 | 184 | #endif |
2239291a SR |
185 | }; |
186 | ||
2425bcb9 | 187 | struct trace_event_call; |
2239291a | 188 | |
2425bcb9 | 189 | struct trace_event_class { |
acd388fd | 190 | const char *system; |
2239291a SR |
191 | void *probe; |
192 | #ifdef CONFIG_PERF_EVENTS | |
193 | void *perf_probe; | |
194 | #endif | |
2425bcb9 | 195 | int (*reg)(struct trace_event_call *event, |
ceec0b6f | 196 | enum trace_reg type, void *data); |
2425bcb9 SRRH |
197 | int (*define_fields)(struct trace_event_call *); |
198 | struct list_head *(*get_fields)(struct trace_event_call *); | |
2e33af02 | 199 | struct list_head fields; |
2425bcb9 | 200 | int (*raw_init)(struct trace_event_call *); |
8f082018 SR |
201 | }; |
202 | ||
2425bcb9 | 203 | extern int trace_event_reg(struct trace_event_call *event, |
ceec0b6f | 204 | enum trace_reg type, void *data); |
a1d0ce82 | 205 | |
3f795dcf | 206 | struct trace_event_buffer { |
3fd40d1e SR |
207 | struct ring_buffer *buffer; |
208 | struct ring_buffer_event *event; | |
7f1d2f82 | 209 | struct trace_event_file *trace_file; |
3fd40d1e SR |
210 | void *entry; |
211 | unsigned long flags; | |
212 | int pc; | |
213 | }; | |
214 | ||
3f795dcf | 215 | void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, |
7f1d2f82 | 216 | struct trace_event_file *trace_file, |
3fd40d1e SR |
217 | unsigned long len); |
218 | ||
3f795dcf | 219 | void trace_event_buffer_commit(struct trace_event_buffer *fbuffer); |
3fd40d1e | 220 | |
553552ce | 221 | enum { |
553552ce | 222 | TRACE_EVENT_FL_FILTERED_BIT, |
61c32659 | 223 | TRACE_EVENT_FL_CAP_ANY_BIT, |
27b14b56 | 224 | TRACE_EVENT_FL_NO_SET_FILTER_BIT, |
9b63776f | 225 | TRACE_EVENT_FL_IGNORE_ENABLE_BIT, |
de7b2973 | 226 | TRACE_EVENT_FL_TRACEPOINT_BIT, |
72cbbc89 | 227 | TRACE_EVENT_FL_KPROBE_BIT, |
04a22fae | 228 | TRACE_EVENT_FL_UPROBE_BIT, |
553552ce SR |
229 | }; |
230 | ||
ae63b31e SR |
231 | /* |
232 | * Event flags: | |
233 | * FILTERED - The event has a filter attached | |
234 | * CAP_ANY - Any user can enable for perf | |
235 | * NO_SET_FILTER - Set when filter has error and is to be ignored | |
5d6ad960 | 236 | * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file |
de7b2973 | 237 | * TRACEPOINT - Event is a tracepoint |
72cbbc89 | 238 | * KPROBE - Event is a kprobe |
04a22fae | 239 | * UPROBE - Event is a uprobe |
ae63b31e | 240 | */ |
553552ce | 241 | enum { |
e870e9a1 | 242 | TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), |
61c32659 | 243 | TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), |
27b14b56 | 244 | TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), |
9b63776f | 245 | TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), |
de7b2973 | 246 | TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT), |
72cbbc89 | 247 | TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), |
04a22fae | 248 | TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT), |
553552ce SR |
249 | }; |
250 | ||
04a22fae WN |
251 | #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE) |
252 | ||
2425bcb9 | 253 | struct trace_event_call { |
a59fd602 | 254 | struct list_head list; |
2425bcb9 | 255 | struct trace_event_class *class; |
de7b2973 MD |
256 | union { |
257 | char *name; | |
258 | /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */ | |
259 | struct tracepoint *tp; | |
260 | }; | |
80decc70 | 261 | struct trace_event event; |
0c564a53 | 262 | char *print_fmt; |
1f9963cb | 263 | struct event_filter *filter; |
6d723736 | 264 | void *mod; |
69fd4f0e | 265 | void *data; |
57d01ad0 SRRH |
266 | /* |
267 | * bit 0: filter_active | |
268 | * bit 1: allow trace by non root (cap any) | |
269 | * bit 2: failed to apply filter | |
5d6ad960 | 270 | * bit 3: trace internal event (do not enable) |
57d01ad0 | 271 | * bit 4: Event was enabled by module |
f306cc82 | 272 | * bit 5: use call filter rather than file filter |
de7b2973 | 273 | * bit 6: Event is a tracepoint |
57d01ad0 | 274 | */ |
ae63b31e SR |
275 | int flags; /* static flags of different events */ |
276 | ||
277 | #ifdef CONFIG_PERF_EVENTS | |
278 | int perf_refcount; | |
279 | struct hlist_head __percpu *perf_events; | |
e87c6bc3 | 280 | struct bpf_prog_array __rcu *prog_array; |
d5b5f391 | 281 | |
2425bcb9 | 282 | int (*perf_perm)(struct trace_event_call *, |
d5b5f391 | 283 | struct perf_event *); |
ae63b31e SR |
284 | #endif |
285 | }; | |
286 | ||
e87c6bc3 YS |
287 | #ifdef CONFIG_PERF_EVENTS |
288 | static inline bool bpf_prog_array_valid(struct trace_event_call *call) | |
289 | { | |
290 | /* | |
291 | * This inline function checks whether call->prog_array | |
292 | * is valid or not. The function is called in various places, | |
293 | * outside rcu_read_lock/unlock, as a heuristic to speed up execution. | |
294 | * | |
295 | * If this function returns true, and later call->prog_array | |
296 | * becomes false inside rcu_read_lock/unlock region, | |
297 | * we bail out then. If this function return false, | |
298 | * there is a risk that we might miss a few events if the checking | |
299 | * were delayed until inside rcu_read_lock/unlock region and | |
300 | * call->prog_array happened to become non-NULL then. | |
301 | * | |
302 | * Here, READ_ONCE() is used instead of rcu_access_pointer(). | |
303 | * rcu_access_pointer() requires the actual definition of | |
304 | * "struct bpf_prog_array" while READ_ONCE() only needs | |
305 | * a declaration of the same type. | |
306 | */ | |
307 | return !!READ_ONCE(call->prog_array); | |
308 | } | |
309 | #endif | |
310 | ||
de7b2973 | 311 | static inline const char * |
687fcc4a | 312 | trace_event_name(struct trace_event_call *call) |
de7b2973 MD |
313 | { |
314 | if (call->flags & TRACE_EVENT_FL_TRACEPOINT) | |
315 | return call->tp ? call->tp->name : NULL; | |
316 | else | |
317 | return call->name; | |
318 | } | |
319 | ||
ae63b31e | 320 | struct trace_array; |
7967b3e0 | 321 | struct trace_subsystem_dir; |
ae63b31e SR |
322 | |
323 | enum { | |
5d6ad960 SRRH |
324 | EVENT_FILE_FL_ENABLED_BIT, |
325 | EVENT_FILE_FL_RECORDED_CMD_BIT, | |
d914ba37 | 326 | EVENT_FILE_FL_RECORDED_TGID_BIT, |
5d6ad960 SRRH |
327 | EVENT_FILE_FL_FILTERED_BIT, |
328 | EVENT_FILE_FL_NO_SET_FILTER_BIT, | |
329 | EVENT_FILE_FL_SOFT_MODE_BIT, | |
330 | EVENT_FILE_FL_SOFT_DISABLED_BIT, | |
331 | EVENT_FILE_FL_TRIGGER_MODE_BIT, | |
332 | EVENT_FILE_FL_TRIGGER_COND_BIT, | |
3fdaf80f | 333 | EVENT_FILE_FL_PID_FILTER_BIT, |
065e63f9 | 334 | EVENT_FILE_FL_WAS_ENABLED_BIT, |
ae63b31e SR |
335 | }; |
336 | ||
337 | /* | |
5d6ad960 | 338 | * Event file flags: |
57d01ad0 | 339 | * ENABLED - The event is enabled |
ae63b31e | 340 | * RECORDED_CMD - The comms should be recorded at sched_switch |
d914ba37 | 341 | * RECORDED_TGID - The tgids should be recorded at sched_switch |
f306cc82 TZ |
342 | * FILTERED - The event has a filter attached |
343 | * NO_SET_FILTER - Set when filter has error and is to be ignored | |
417944c4 SRRH |
344 | * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED |
345 | * SOFT_DISABLED - When set, do not trace the event (even though its | |
346 | * tracepoint may be enabled) | |
85f2b082 | 347 | * TRIGGER_MODE - When set, invoke the triggers associated with the event |
bac5fb97 | 348 | * TRIGGER_COND - When set, one or more triggers has an associated filter |
3fdaf80f | 349 | * PID_FILTER - When set, the event is filtered based on pid |
065e63f9 | 350 | * WAS_ENABLED - Set when enabled to know to clear trace on module removal |
ae63b31e SR |
351 | */ |
352 | enum { | |
5d6ad960 SRRH |
353 | EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT), |
354 | EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT), | |
d914ba37 | 355 | EVENT_FILE_FL_RECORDED_TGID = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT), |
5d6ad960 SRRH |
356 | EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT), |
357 | EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT), | |
358 | EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT), | |
359 | EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT), | |
360 | EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT), | |
361 | EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT), | |
3fdaf80f | 362 | EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT), |
065e63f9 | 363 | EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT), |
ae63b31e SR |
364 | }; |
365 | ||
7f1d2f82 | 366 | struct trace_event_file { |
ae63b31e | 367 | struct list_head list; |
2425bcb9 | 368 | struct trace_event_call *event_call; |
f86f4180 | 369 | struct event_filter __rcu *filter; |
ae63b31e SR |
370 | struct dentry *dir; |
371 | struct trace_array *tr; | |
7967b3e0 | 372 | struct trace_subsystem_dir *system; |
85f2b082 | 373 | struct list_head triggers; |
97f20251 | 374 | |
553552ce SR |
375 | /* |
376 | * 32 bit flags: | |
57d01ad0 SRRH |
377 | * bit 0: enabled |
378 | * bit 1: enabled cmd record | |
417944c4 SRRH |
379 | * bit 2: enable/disable with the soft disable bit |
380 | * bit 3: soft disabled | |
85f2b082 | 381 | * bit 4: trigger enabled |
553552ce | 382 | * |
417944c4 SRRH |
383 | * Note: The bits must be set atomically to prevent races |
384 | * from other writers. Reads of flags do not need to be in | |
385 | * sync as they occur in critical sections. But the way flags | |
ae63b31e | 386 | * is currently used, these changes do not affect the code |
1eaa4787 SR |
387 | * except that when a change is made, it may have a slight |
388 | * delay in propagating the changes to other CPUs due to | |
417944c4 | 389 | * caching and such. Which is mostly OK ;-) |
553552ce | 390 | */ |
417944c4 | 391 | unsigned long flags; |
1cf4c073 | 392 | atomic_t sm_ref; /* soft-mode reference counter */ |
85f2b082 | 393 | atomic_t tm_ref; /* trigger-mode reference counter */ |
97f20251 SR |
394 | }; |
395 | ||
53cf810b FW |
396 | #define __TRACE_EVENT_FLAGS(name, value) \ |
397 | static int __init trace_init_flags_##name(void) \ | |
398 | { \ | |
de7b2973 | 399 | event_##name.flags |= value; \ |
53cf810b FW |
400 | return 0; \ |
401 | } \ | |
402 | early_initcall(trace_init_flags_##name); | |
403 | ||
d5b5f391 | 404 | #define __TRACE_EVENT_PERF_PERM(name, expr...) \ |
2425bcb9 | 405 | static int perf_perm_##name(struct trace_event_call *tp_event, \ |
d5b5f391 PZ |
406 | struct perf_event *p_event) \ |
407 | { \ | |
408 | return ({ expr; }); \ | |
409 | } \ | |
410 | static int __init trace_init_perf_perm_##name(void) \ | |
411 | { \ | |
412 | event_##name.perf_perm = &perf_perm_##name; \ | |
413 | return 0; \ | |
414 | } \ | |
415 | early_initcall(trace_init_perf_perm_##name); | |
416 | ||
97d5a220 | 417 | #define PERF_MAX_TRACE_SIZE 2048 |
20ab4425 | 418 | |
16bb8eb1 | 419 | #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ |
97f20251 | 420 | |
85f2b082 TZ |
421 | enum event_trigger_type { |
422 | ETT_NONE = (0), | |
2a2df321 | 423 | ETT_TRACE_ONOFF = (1 << 0), |
93e31ffb | 424 | ETT_SNAPSHOT = (1 << 1), |
f21ecbb3 | 425 | ETT_STACKTRACE = (1 << 2), |
7862ad18 | 426 | ETT_EVENT_ENABLE = (1 << 3), |
7ef224d1 | 427 | ETT_EVENT_HIST = (1 << 4), |
d0bad49b | 428 | ETT_HIST_ENABLE = (1 << 5), |
85f2b082 TZ |
429 | }; |
430 | ||
6fb2915d | 431 | extern int filter_match_preds(struct event_filter *filter, void *rec); |
f306cc82 | 432 | |
1ac4f51c TZ |
433 | extern enum event_trigger_type |
434 | event_triggers_call(struct trace_event_file *file, void *rec, | |
435 | struct ring_buffer_event *event); | |
436 | extern void | |
437 | event_triggers_post_call(struct trace_event_file *file, | |
c94e45bc | 438 | enum event_trigger_type tt); |
97f20251 | 439 | |
3fdaf80f SRRH |
440 | bool trace_event_ignore_this_pid(struct trace_event_file *trace_file); |
441 | ||
13a1e4ae | 442 | /** |
09a5059a | 443 | * trace_trigger_soft_disabled - do triggers and test if soft disabled |
13a1e4ae SRRH |
444 | * @file: The file pointer of the event to test |
445 | * | |
446 | * If any triggers without filters are attached to this event, they | |
447 | * will be called here. If the event is soft disabled and has no | |
448 | * triggers that require testing the fields, it will return true, | |
449 | * otherwise false. | |
450 | */ | |
451 | static inline bool | |
09a5059a | 452 | trace_trigger_soft_disabled(struct trace_event_file *file) |
13a1e4ae SRRH |
453 | { |
454 | unsigned long eflags = file->flags; | |
455 | ||
5d6ad960 SRRH |
456 | if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { |
457 | if (eflags & EVENT_FILE_FL_TRIGGER_MODE) | |
1ac4f51c | 458 | event_triggers_call(file, NULL, NULL); |
5d6ad960 | 459 | if (eflags & EVENT_FILE_FL_SOFT_DISABLED) |
13a1e4ae | 460 | return true; |
3fdaf80f SRRH |
461 | if (eflags & EVENT_FILE_FL_PID_FILTER) |
462 | return trace_event_ignore_this_pid(file); | |
13a1e4ae SRRH |
463 | } |
464 | return false; | |
465 | } | |
466 | ||
098d2164 | 467 | #ifdef CONFIG_BPF_EVENTS |
e87c6bc3 YS |
468 | unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx); |
469 | int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog); | |
470 | void perf_event_detach_bpf_prog(struct perf_event *event); | |
f4e2298e | 471 | int perf_event_query_prog_array(struct perf_event *event, void __user *info); |
c4f6699d AS |
472 | int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog); |
473 | int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog); | |
a38d1107 MM |
474 | struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name); |
475 | void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp); | |
41bdc4b4 YS |
476 | int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, |
477 | u32 *fd_type, const char **buf, | |
478 | u64 *probe_offset, u64 *probe_addr); | |
2541517c | 479 | #else |
e87c6bc3 | 480 | static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) |
2541517c AS |
481 | { |
482 | return 1; | |
483 | } | |
e87c6bc3 YS |
484 | |
485 | static inline int | |
486 | perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog) | |
487 | { | |
488 | return -EOPNOTSUPP; | |
489 | } | |
490 | ||
491 | static inline void perf_event_detach_bpf_prog(struct perf_event *event) { } | |
492 | ||
f4e2298e YS |
493 | static inline int |
494 | perf_event_query_prog_array(struct perf_event *event, void __user *info) | |
495 | { | |
496 | return -EOPNOTSUPP; | |
497 | } | |
c4f6699d AS |
498 | static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p) |
499 | { | |
500 | return -EOPNOTSUPP; | |
501 | } | |
502 | static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p) | |
503 | { | |
504 | return -EOPNOTSUPP; | |
505 | } | |
a38d1107 | 506 | static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) |
c4f6699d AS |
507 | { |
508 | return NULL; | |
509 | } | |
a38d1107 MM |
510 | static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) |
511 | { | |
512 | } | |
41bdc4b4 YS |
513 | static inline int bpf_get_perf_event_info(const struct perf_event *event, |
514 | u32 *prog_id, u32 *fd_type, | |
515 | const char **buf, u64 *probe_offset, | |
516 | u64 *probe_addr) | |
517 | { | |
518 | return -EOPNOTSUPP; | |
519 | } | |
2541517c AS |
520 | #endif |
521 | ||
43b51ead LZ |
522 | enum { |
523 | FILTER_OTHER = 0, | |
524 | FILTER_STATIC_STRING, | |
525 | FILTER_DYN_STRING, | |
87a342f5 | 526 | FILTER_PTR_STRING, |
02aa3162 | 527 | FILTER_TRACE_FN, |
e57cbaf0 SRRH |
528 | FILTER_COMM, |
529 | FILTER_CPU, | |
43b51ead LZ |
530 | }; |
531 | ||
2425bcb9 SRRH |
532 | extern int trace_event_raw_init(struct trace_event_call *call); |
533 | extern int trace_define_field(struct trace_event_call *call, const char *type, | |
aeaeae11 FW |
534 | const char *name, int offset, int size, |
535 | int is_signed, int filter_type); | |
2425bcb9 SRRH |
536 | extern int trace_add_event_call(struct trace_event_call *call); |
537 | extern int trace_remove_event_call(struct trace_event_call *call); | |
32bbe007 | 538 | extern int trace_event_get_offsets(struct trace_event_call *call); |
97f20251 | 539 | |
d2802d07 | 540 | #define is_signed_type(type) (((type)(-1)) < (type)1) |
97f20251 | 541 | |
4671c794 SR |
542 | int trace_set_clr_event(const char *system, const char *event, int set); |
543 | ||
97f20251 SR |
544 | /* |
545 | * The double __builtin_constant_p is because gcc will give us an error | |
546 | * if we try to allocate the static variable to fmt if it is not a | |
547 | * constant. Even with the outer if statement optimizing out. | |
548 | */ | |
549 | #define event_trace_printk(ip, fmt, args...) \ | |
550 | do { \ | |
551 | __trace_printk_check_format(fmt, ##args); \ | |
552 | tracing_record_cmdline(current); \ | |
553 | if (__builtin_constant_p(fmt)) { \ | |
554 | static const char *trace_printk_fmt \ | |
555 | __attribute__((section("__trace_printk_fmt"))) = \ | |
556 | __builtin_constant_p(fmt) ? fmt : NULL; \ | |
557 | \ | |
558 | __trace_bprintk(ip, trace_printk_fmt, ##args); \ | |
559 | } else \ | |
560 | __trace_printk(ip, fmt, ##args); \ | |
561 | } while (0) | |
562 | ||
07b139c8 | 563 | #ifdef CONFIG_PERF_EVENTS |
6fb2915d | 564 | struct perf_event; |
c530665c FW |
565 | |
566 | DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); | |
9802d865 | 567 | DECLARE_PER_CPU(int, bpf_kprobe_override); |
c530665c | 568 | |
1c024eca PZ |
569 | extern int perf_trace_init(struct perf_event *event); |
570 | extern void perf_trace_destroy(struct perf_event *event); | |
a4eaf7f1 PZ |
571 | extern int perf_trace_add(struct perf_event *event, int flags); |
572 | extern void perf_trace_del(struct perf_event *event, int flags); | |
e12f03d7 SL |
573 | #ifdef CONFIG_KPROBE_EVENTS |
574 | extern int perf_kprobe_init(struct perf_event *event, bool is_retprobe); | |
575 | extern void perf_kprobe_destroy(struct perf_event *event); | |
41bdc4b4 YS |
576 | extern int bpf_get_kprobe_info(const struct perf_event *event, |
577 | u32 *fd_type, const char **symbol, | |
578 | u64 *probe_offset, u64 *probe_addr, | |
579 | bool perf_type_tracepoint); | |
e12f03d7 | 580 | #endif |
33ea4b24 | 581 | #ifdef CONFIG_UPROBE_EVENTS |
a6ca88b2 SL |
582 | extern int perf_uprobe_init(struct perf_event *event, |
583 | unsigned long ref_ctr_offset, bool is_retprobe); | |
33ea4b24 | 584 | extern void perf_uprobe_destroy(struct perf_event *event); |
41bdc4b4 YS |
585 | extern int bpf_get_uprobe_info(const struct perf_event *event, |
586 | u32 *fd_type, const char **filename, | |
587 | u64 *probe_offset, bool perf_type_tracepoint); | |
33ea4b24 | 588 | #endif |
1c024eca | 589 | extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, |
6fb2915d LZ |
590 | char *filter_str); |
591 | extern void ftrace_profile_free_filter(struct perf_event *event); | |
1e1dcd93 AS |
592 | void perf_trace_buf_update(void *record, u16 type); |
593 | void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp); | |
430ad5a6 | 594 | |
c4f6699d AS |
595 | void bpf_trace_run1(struct bpf_prog *prog, u64 arg1); |
596 | void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2); | |
597 | void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2, | |
598 | u64 arg3); | |
599 | void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2, | |
600 | u64 arg3, u64 arg4); | |
601 | void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2, | |
602 | u64 arg3, u64 arg4, u64 arg5); | |
603 | void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2, | |
604 | u64 arg3, u64 arg4, u64 arg5, u64 arg6); | |
605 | void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2, | |
606 | u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7); | |
607 | void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2, | |
608 | u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, | |
609 | u64 arg8); | |
610 | void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2, | |
611 | u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, | |
612 | u64 arg8, u64 arg9); | |
613 | void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2, | |
614 | u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, | |
615 | u64 arg8, u64 arg9, u64 arg10); | |
616 | void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2, | |
617 | u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, | |
618 | u64 arg8, u64 arg9, u64 arg10, u64 arg11); | |
619 | void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2, | |
620 | u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, | |
621 | u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12); | |
85b67bcb AS |
622 | void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, |
623 | struct trace_event_call *call, u64 count, | |
624 | struct pt_regs *regs, struct hlist_head *head, | |
625 | struct task_struct *task); | |
626 | ||
430ad5a6 | 627 | static inline void |
1e1dcd93 | 628 | perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type, |
e6dab5ff | 629 | u64 count, struct pt_regs *regs, void *head, |
8fd0fbbe | 630 | struct task_struct *task) |
430ad5a6 | 631 | { |
8fd0fbbe | 632 | perf_tp_event(type, count, raw_data, size, regs, head, rctx, task); |
430ad5a6 | 633 | } |
e87c6bc3 | 634 | |
6fb2915d LZ |
635 | #endif |
636 | ||
2425bcb9 | 637 | #endif /* _LINUX_TRACE_EVENT_H */ |