Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb...
[linux-2.6-block.git] / include / trace / ftrace.h
CommitLineData
f42c85e7
SR
1/*
2 * Stage 1 of the trace events.
3 *
4 * Override the macros in <trace/trace_events.h> to include the following:
5 *
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
8 * <type> <item>;
9 * <type2> <item2>[<len>];
10 * [...]
11 * };
12 *
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
16 * in the structure.
17 */
18
19#include <linux/ftrace_event.h>
20
ff038f5c 21/*
091ad365 22 * DECLARE_EVENT_CLASS can be used to add a generic function
ff038f5c
SR
23 * handlers for events. That is, if all events have the same
24 * parameters and just have distinct trace points.
25 * Each tracepoint can be defined with DEFINE_EVENT and that
091ad365 26 * will map the DECLARE_EVENT_CLASS to the tracepoint.
ff038f5c
SR
27 *
28 * TRACE_EVENT is a one to one mapping between tracepoint and template.
29 */
30#undef TRACE_EVENT
31#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
091ad365 32 DECLARE_EVENT_CLASS(name, \
ff038f5c
SR
33 PARAMS(proto), \
34 PARAMS(args), \
35 PARAMS(tstruct), \
36 PARAMS(assign), \
37 PARAMS(print)); \
38 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
39
40
7fcb7c47
LZ
41#undef __field
42#define __field(type, item) type item;
43
43b51ead
LZ
44#undef __field_ext
45#define __field_ext(type, item, filter_type) type item;
46
4d4c9cc8
SR
47#undef __field_struct
48#define __field_struct(type, item) type item;
49
50#undef __field_struct_ext
51#define __field_struct_ext(type, item, filter_type) type item;
52
f42c85e7
SR
53#undef __array
54#define __array(type, item, len) type item[len];
55
7fcb7c47 56#undef __dynamic_array
7d536cb3 57#define __dynamic_array(type, item, len) u32 __data_loc_##item;
f42c85e7 58
9cbf1176 59#undef __string
7fcb7c47 60#define __string(item, src) __dynamic_array(char, item, -1)
9cbf1176 61
4449bf92
SRRH
62#undef __bitmask
63#define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
64
f42c85e7
SR
65#undef TP_STRUCT__entry
66#define TP_STRUCT__entry(args...) args
67
091ad365
IM
68#undef DECLARE_EVENT_CLASS
69#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
ff038f5c
SR
70 struct ftrace_raw_##name { \
71 struct trace_entry ent; \
72 tstruct \
73 char __data[0]; \
8f082018
SR
74 }; \
75 \
76 static struct ftrace_event_class event_class_##name;
77
ff038f5c
SR
78#undef DEFINE_EVENT
79#define DEFINE_EVENT(template, name, proto, args) \
49c17746 80 static struct ftrace_event_call __used \
86c38a31 81 __attribute__((__aligned__(4))) event_##name
f42c85e7 82
f5abaa1b
SR
83#undef DEFINE_EVENT_FN
84#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \
85 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
86
e5bc9721
SR
87#undef DEFINE_EVENT_PRINT
88#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
89 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
90
97419875
JS
91/* Callbacks are meaningless to ftrace. */
92#undef TRACE_EVENT_FN
0dd7b747
FW
93#define TRACE_EVENT_FN(name, proto, args, tstruct, \
94 assign, print, reg, unreg) \
819ce45a
FW
95 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
96 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
97419875 97
1ed0c597
FW
98#undef TRACE_EVENT_FLAGS
99#define TRACE_EVENT_FLAGS(name, value) \
53cf810b 100 __TRACE_EVENT_FLAGS(name, value)
1ed0c597 101
d5b5f391
PZ
102#undef TRACE_EVENT_PERF_PERM
103#define TRACE_EVENT_PERF_PERM(name, expr...) \
104 __TRACE_EVENT_PERF_PERM(name, expr)
105
f42c85e7
SR
106#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
107
9cbf1176 108
f42c85e7
SR
109/*
110 * Stage 2 of the trace events.
111 *
9cbf1176
FW
112 * Include the following:
113 *
7fcb7c47 114 * struct ftrace_data_offsets_<call> {
7d536cb3
LZ
115 * u32 <item1>;
116 * u32 <item2>;
9cbf1176
FW
117 * [...]
118 * };
119 *
7d536cb3 120 * The __dynamic_array() macro will create each u32 <item>, this is
7fcb7c47 121 * to keep the offset of each array from the beginning of the event.
7d536cb3 122 * The size of an array is also encoded, in the higher 16 bits of <item>.
9cbf1176
FW
123 */
124
7fcb7c47 125#undef __field
43b51ead
LZ
126#define __field(type, item)
127
128#undef __field_ext
129#define __field_ext(type, item, filter_type)
7fcb7c47 130
4d4c9cc8
SR
131#undef __field_struct
132#define __field_struct(type, item)
133
134#undef __field_struct_ext
135#define __field_struct_ext(type, item, filter_type)
136
9cbf1176
FW
137#undef __array
138#define __array(type, item, len)
139
7fcb7c47 140#undef __dynamic_array
7d536cb3 141#define __dynamic_array(type, item, len) u32 item;
9cbf1176
FW
142
143#undef __string
7fcb7c47 144#define __string(item, src) __dynamic_array(char, item, -1)
9cbf1176 145
4449bf92
SRRH
146#undef __bitmask
147#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
148
091ad365
IM
149#undef DECLARE_EVENT_CLASS
150#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
7fcb7c47 151 struct ftrace_data_offsets_##call { \
9cbf1176
FW
152 tstruct; \
153 };
154
ff038f5c
SR
155#undef DEFINE_EVENT
156#define DEFINE_EVENT(template, name, proto, args)
157
e5bc9721
SR
158#undef DEFINE_EVENT_PRINT
159#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
160 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
161
1ed0c597
FW
162#undef TRACE_EVENT_FLAGS
163#define TRACE_EVENT_FLAGS(event, flag)
164
d5b5f391
PZ
165#undef TRACE_EVENT_PERF_PERM
166#define TRACE_EVENT_PERF_PERM(event, expr...)
167
9cbf1176
FW
168#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
169
170/*
171 * Stage 3 of the trace events.
172 *
f42c85e7
SR
173 * Override the macros in <trace/trace_events.h> to include the following:
174 *
175 * enum print_line_t
176 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
177 * {
178 * struct trace_seq *s = &iter->seq;
179 * struct ftrace_raw_<call> *field; <-- defined in stage 1
180 * struct trace_entry *entry;
bc289ae9 181 * struct trace_seq *p = &iter->tmp_seq;
f42c85e7
SR
182 * int ret;
183 *
184 * entry = iter->ent;
185 *
32c0edae 186 * if (entry->type != event_<call>->event.type) {
f42c85e7
SR
187 * WARN_ON_ONCE(1);
188 * return TRACE_TYPE_UNHANDLED;
189 * }
190 *
191 * field = (typeof(field))entry;
192 *
56d8bd3f 193 * trace_seq_init(p);
50354a8a
LZ
194 * ret = trace_seq_printf(s, "%s: ", <call>);
195 * if (ret)
196 * ret = trace_seq_printf(s, <TP_printk> "\n");
f42c85e7
SR
197 * if (!ret)
198 * return TRACE_TYPE_PARTIAL_LINE;
199 *
200 * return TRACE_TYPE_HANDLED;
201 * }
202 *
203 * This is the method used to print the raw event to the trace
204 * output format. Note, this is not needed if the data is read
205 * in binary.
206 */
207
208#undef __entry
209#define __entry field
210
211#undef TP_printk
212#define TP_printk(fmt, args...) fmt "\n", args
213
7fcb7c47
LZ
214#undef __get_dynamic_array
215#define __get_dynamic_array(field) \
7d536cb3 216 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
7fcb7c47 217
beba4bb0
SRRH
218#undef __get_dynamic_array_len
219#define __get_dynamic_array_len(field) \
220 ((__entry->__data_loc_##field >> 16) & 0xffff)
221
9cbf1176 222#undef __get_str
7fcb7c47 223#define __get_str(field) (char *)__get_dynamic_array(field)
9cbf1176 224
4449bf92
SRRH
225#undef __get_bitmask
226#define __get_bitmask(field) \
227 ({ \
228 void *__bitmask = __get_dynamic_array(field); \
229 unsigned int __bitmask_size; \
beba4bb0 230 __bitmask_size = __get_dynamic_array_len(field); \
4449bf92
SRRH
231 ftrace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
232 })
233
be74b73a
SR
234#undef __print_flags
235#define __print_flags(flag, delim, flag_array...) \
236 ({ \
a48f494e 237 static const struct trace_print_flags __flags[] = \
be74b73a 238 { flag_array, { -1, NULL }}; \
a48f494e 239 ftrace_print_flags_seq(p, delim, flag, __flags); \
be74b73a
SR
240 })
241
0f4fc29d
SR
242#undef __print_symbolic
243#define __print_symbolic(value, symbol_array...) \
244 ({ \
245 static const struct trace_print_flags symbols[] = \
246 { symbol_array, { -1, NULL }}; \
247 ftrace_print_symbols_seq(p, value, symbols); \
248 })
249
2fc1b6f0 250#undef __print_symbolic_u64
251#if BITS_PER_LONG == 32
252#define __print_symbolic_u64(value, symbol_array...) \
253 ({ \
254 static const struct trace_print_flags_u64 symbols[] = \
255 { symbol_array, { -1, NULL } }; \
256 ftrace_print_symbols_seq_u64(p, value, symbols); \
257 })
258#else
259#define __print_symbolic_u64(value, symbol_array...) \
260 __print_symbolic(value, symbol_array)
261#endif
262
5a2e3995
KT
263#undef __print_hex
264#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
265
091ad365
IM
266#undef DECLARE_EVENT_CLASS
267#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
83f0d539 268static notrace enum print_line_t \
80decc70
SR
269ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
270 struct trace_event *trace_event) \
f42c85e7
SR
271{ \
272 struct trace_seq *s = &iter->seq; \
f71130de 273 struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
f42c85e7 274 struct ftrace_raw_##call *field; \
f42c85e7
SR
275 int ret; \
276 \
f71130de 277 field = (typeof(field))iter->ent; \
80decc70 278 \
f71130de 279 ret = ftrace_raw_output_prep(iter, trace_event); \
ff038f5c 280 if (ret) \
f71130de
LZ
281 return ret; \
282 \
283 ret = trace_seq_printf(s, print); \
f42c85e7
SR
284 if (!ret) \
285 return TRACE_TYPE_PARTIAL_LINE; \
286 \
287 return TRACE_TYPE_HANDLED; \
80decc70
SR
288} \
289static struct trace_event_functions ftrace_event_type_funcs_##call = { \
290 .trace = ftrace_raw_output_##call, \
291};
ff038f5c 292
e5bc9721
SR
293#undef DEFINE_EVENT_PRINT
294#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
83f0d539 295static notrace enum print_line_t \
a9a57763
SR
296ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
297 struct trace_event *event) \
e5bc9721 298{ \
e5bc9721
SR
299 struct ftrace_raw_##template *field; \
300 struct trace_entry *entry; \
bc289ae9 301 struct trace_seq *p = &iter->tmp_seq; \
f42c85e7
SR
302 \
303 entry = iter->ent; \
304 \
32c0edae 305 if (entry->type != event_##call.event.type) { \
f42c85e7
SR
306 WARN_ON_ONCE(1); \
307 return TRACE_TYPE_UNHANDLED; \
308 } \
309 \
310 field = (typeof(field))entry; \
311 \
56d8bd3f 312 trace_seq_init(p); \
1d6bae96 313 return ftrace_output_call(iter, #call, print); \
80decc70
SR
314} \
315static struct trace_event_functions ftrace_event_type_funcs_##call = { \
316 .trace = ftrace_raw_output_##call, \
317};
e5bc9721 318
f42c85e7
SR
319#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
320
43b51ead
LZ
321#undef __field_ext
322#define __field_ext(type, item, filter_type) \
f42c85e7
SR
323 ret = trace_define_field(event_call, #type, #item, \
324 offsetof(typeof(field), item), \
43b51ead
LZ
325 sizeof(field.item), \
326 is_signed_type(type), filter_type); \
f42c85e7
SR
327 if (ret) \
328 return ret;
329
4d4c9cc8
SR
330#undef __field_struct_ext
331#define __field_struct_ext(type, item, filter_type) \
332 ret = trace_define_field(event_call, #type, #item, \
333 offsetof(typeof(field), item), \
334 sizeof(field.item), \
335 0, filter_type); \
336 if (ret) \
337 return ret;
338
43b51ead
LZ
339#undef __field
340#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
341
4d4c9cc8
SR
342#undef __field_struct
343#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
344
f42c85e7
SR
345#undef __array
346#define __array(type, item, len) \
04295780 347 do { \
87291347 348 char *type_str = #type"["__stringify(len)"]"; \
04295780 349 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
87291347 350 ret = trace_define_field(event_call, type_str, #item, \
f42c85e7 351 offsetof(typeof(field), item), \
fb7ae981
LJ
352 sizeof(field.item), \
353 is_signed_type(type), FILTER_OTHER); \
04295780
SR
354 if (ret) \
355 return ret; \
356 } while (0);
f42c85e7 357
7fcb7c47
LZ
358#undef __dynamic_array
359#define __dynamic_array(type, item, len) \
68fd60a8 360 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
43b51ead 361 offsetof(typeof(field), __data_loc_##item), \
fb7ae981
LJ
362 sizeof(field.__data_loc_##item), \
363 is_signed_type(type), FILTER_OTHER);
7fcb7c47 364
9cbf1176 365#undef __string
7fcb7c47 366#define __string(item, src) __dynamic_array(char, item, -1)
9cbf1176 367
4449bf92
SRRH
368#undef __bitmask
369#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
370
091ad365
IM
371#undef DECLARE_EVENT_CLASS
372#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
7e4f44b1 373static int notrace __init \
14be96c9 374ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
f42c85e7
SR
375{ \
376 struct ftrace_raw_##call field; \
f42c85e7
SR
377 int ret; \
378 \
f42c85e7
SR
379 tstruct; \
380 \
381 return ret; \
382}
383
ff038f5c
SR
384#undef DEFINE_EVENT
385#define DEFINE_EVENT(template, name, proto, args)
386
e5bc9721
SR
387#undef DEFINE_EVENT_PRINT
388#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
389 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
390
f42c85e7
SR
391#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
392
7fcb7c47
LZ
393/*
394 * remember the offset of each array from the beginning of the event.
395 */
396
397#undef __entry
398#define __entry entry
399
400#undef __field
401#define __field(type, item)
402
43b51ead
LZ
403#undef __field_ext
404#define __field_ext(type, item, filter_type)
405
4d4c9cc8
SR
406#undef __field_struct
407#define __field_struct(type, item)
408
409#undef __field_struct_ext
410#define __field_struct_ext(type, item, filter_type)
411
7fcb7c47
LZ
412#undef __array
413#define __array(type, item, len)
414
415#undef __dynamic_array
416#define __dynamic_array(type, item, len) \
114e7b52 417 __item_length = (len) * sizeof(type); \
7fcb7c47
LZ
418 __data_offsets->item = __data_size + \
419 offsetof(typeof(*entry), __data); \
114e7b52
FB
420 __data_offsets->item |= __item_length << 16; \
421 __data_size += __item_length;
7fcb7c47
LZ
422
423#undef __string
4e58e547
SRRH
424#define __string(item, src) __dynamic_array(char, item, \
425 strlen((src) ? (const char *)(src) : "(null)") + 1)
7fcb7c47 426
4449bf92
SRRH
427/*
428 * __bitmask_size_in_bytes_raw is the number of bytes needed to hold
429 * num_possible_cpus().
430 */
431#define __bitmask_size_in_bytes_raw(nr_bits) \
432 (((nr_bits) + 7) / 8)
433
434#define __bitmask_size_in_longs(nr_bits) \
435 ((__bitmask_size_in_bytes_raw(nr_bits) + \
436 ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
437
438/*
439 * __bitmask_size_in_bytes is the number of bytes needed to hold
440 * num_possible_cpus() padded out to the nearest long. This is what
441 * is saved in the buffer, just to be consistent.
442 */
443#define __bitmask_size_in_bytes(nr_bits) \
444 (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
445
446#undef __bitmask
447#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \
448 __bitmask_size_in_longs(nr_bits))
449
091ad365
IM
450#undef DECLARE_EVENT_CLASS
451#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
83f0d539 452static inline notrace int ftrace_get_offsets_##call( \
7fcb7c47
LZ
453 struct ftrace_data_offsets_##call *__data_offsets, proto) \
454{ \
455 int __data_size = 0; \
114e7b52 456 int __maybe_unused __item_length; \
7fcb7c47
LZ
457 struct ftrace_raw_##call __maybe_unused *entry; \
458 \
459 tstruct; \
460 \
461 return __data_size; \
462}
463
ff038f5c
SR
464#undef DEFINE_EVENT
465#define DEFINE_EVENT(template, name, proto, args)
466
e5bc9721
SR
467#undef DEFINE_EVENT_PRINT
468#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
469 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
470
7fcb7c47
LZ
471#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
472
c32e827b 473/*
9cbf1176 474 * Stage 4 of the trace events.
c32e827b 475 *
ea20d929 476 * Override the macros in <trace/trace_events.h> to include the following:
c32e827b 477 *
157587d7 478 * For those macros defined with TRACE_EVENT:
c32e827b
SR
479 *
480 * static struct ftrace_event_call event_<call>;
481 *
2239291a 482 * static void ftrace_raw_event_<call>(void *__data, proto)
c32e827b 483 * {
ccb469a1
SR
484 * struct ftrace_event_file *ftrace_file = __data;
485 * struct ftrace_event_call *event_call = ftrace_file->event_call;
50354a8a 486 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
bac5fb97
TZ
487 * unsigned long eflags = ftrace_file->flags;
488 * enum event_trigger_type __tt = ETT_NONE;
ef18012b
SR
489 * struct ring_buffer_event *event;
490 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
e77405ad 491 * struct ring_buffer *buffer;
ef18012b 492 * unsigned long irq_flags;
50354a8a 493 * int __data_size;
ef18012b
SR
494 * int pc;
495 *
bac5fb97
TZ
496 * if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
497 * if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
498 * event_triggers_call(ftrace_file, NULL);
499 * if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
500 * return;
501 * }
417944c4 502 *
ef18012b
SR
503 * local_save_flags(irq_flags);
504 * pc = preempt_count();
505 *
50354a8a
LZ
506 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
507 *
ccb469a1 508 * event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
32c0edae 509 * event_<call>->event.type,
50354a8a 510 * sizeof(*entry) + __data_size,
ef18012b
SR
511 * irq_flags, pc);
512 * if (!event)
513 * return;
514 * entry = ring_buffer_event_data(event);
515 *
50354a8a
LZ
516 * { <assign>; } <-- Here we assign the entries by the __field and
517 * __array macros.
c32e827b 518 *
bac5fb97
TZ
519 * if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
520 * __tt = event_triggers_call(ftrace_file, entry);
521 *
522 * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
523 * &ftrace_file->flags))
524 * ring_buffer_discard_commit(buffer, event);
525 * else if (!filter_check_discard(ftrace_file, entry, buffer, event))
f306cc82 526 * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
bac5fb97
TZ
527 *
528 * if (__tt)
529 * event_triggers_post_call(ftrace_file, __tt);
c32e827b
SR
530 * }
531 *
c32e827b 532 * static struct trace_event ftrace_event_type_<call> = {
ef18012b 533 * .trace = ftrace_raw_output_<call>, <-- stage 2
c32e827b
SR
534 * };
535 *
50354a8a
LZ
536 * static const char print_fmt_<call>[] = <TP_printk>;
537 *
8f082018
SR
538 * static struct ftrace_event_class __used event_class_<template> = {
539 * .system = "<system>",
2e33af02 540 * .define_fields = ftrace_define_fields_<call>,
0405ab80
SR
541 * .fields = LIST_HEAD_INIT(event_class_##call.fields),
542 * .raw_init = trace_event_raw_init,
543 * .probe = ftrace_raw_event_##call,
a1d0ce82 544 * .reg = ftrace_event_reg,
8f082018
SR
545 * };
546 *
e4a9ea5e 547 * static struct ftrace_event_call event_<call> = {
8f082018 548 * .class = event_class_<template>,
abb43f69
MD
549 * {
550 * .tp = &__tracepoint_<call>,
551 * },
2e33af02 552 * .event = &ftrace_event_type_<call>,
50354a8a 553 * .print_fmt = print_fmt_<call>,
de7b2973 554 * .flags = TRACE_EVENT_FL_TRACEPOINT,
8f082018 555 * };
e4a9ea5e
SR
556 * // its only safe to use pointers when doing linker tricks to
557 * // create an array.
558 * static struct ftrace_event_call __used
559 * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
c32e827b
SR
560 *
561 */
562
07b139c8 563#ifdef CONFIG_PERF_EVENTS
ac199db0 564
2239291a
SR
565#define _TRACE_PERF_PROTO(call, proto) \
566 static notrace void \
567 perf_trace_##call(void *__data, proto);
568
97d5a220 569#define _TRACE_PERF_INIT(call) \
2239291a 570 .perf_probe = perf_trace_##call,
ac199db0
PZ
571
572#else
2239291a 573#define _TRACE_PERF_PROTO(call, proto)
97d5a220 574#define _TRACE_PERF_INIT(call)
07b139c8 575#endif /* CONFIG_PERF_EVENTS */
ac199db0 576
da4d0302
SR
577#undef __entry
578#define __entry entry
d20e3b03 579
9cbf1176
FW
580#undef __field
581#define __field(type, item)
582
4d4c9cc8
SR
583#undef __field_struct
584#define __field_struct(type, item)
585
9cbf1176
FW
586#undef __array
587#define __array(type, item, len)
588
7fcb7c47
LZ
589#undef __dynamic_array
590#define __dynamic_array(type, item, len) \
591 __entry->__data_loc_##item = __data_offsets.item;
592
9cbf1176 593#undef __string
4449bf92 594#define __string(item, src) __dynamic_array(char, item, -1)
9cbf1176
FW
595
596#undef __assign_str
597#define __assign_str(dst, src) \
4e58e547 598 strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
9cbf1176 599
4449bf92
SRRH
600#undef __bitmask
601#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
602
603#undef __get_bitmask
604#define __get_bitmask(field) (char *)__get_dynamic_array(field)
605
606#undef __assign_bitmask
607#define __assign_bitmask(dst, src, nr_bits) \
608 memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
609
0fa0edaf
LJ
610#undef TP_fast_assign
611#define TP_fast_assign(args...) args
612
12473965
ON
613#undef __perf_addr
614#define __perf_addr(a) (a)
615
616#undef __perf_count
617#define __perf_count(c) (c)
618
619#undef __perf_task
620#define __perf_task(t) (t)
0fa0edaf 621
091ad365
IM
622#undef DECLARE_EVENT_CLASS
623#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
c32e827b 624 \
83f0d539 625static notrace void \
2239291a 626ftrace_raw_event_##call(void *__data, proto) \
c32e827b 627{ \
ae63b31e 628 struct ftrace_event_file *ftrace_file = __data; \
7fcb7c47 629 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
3fd40d1e 630 struct ftrace_event_buffer fbuffer; \
c32e827b 631 struct ftrace_raw_##call *entry; \
7fcb7c47 632 int __data_size; \
c32e827b 633 \
13a1e4ae
SRRH
634 if (ftrace_trigger_soft_disabled(ftrace_file)) \
635 return; \
417944c4 636 \
7fcb7c47 637 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
9cbf1176 638 \
3fd40d1e
SR
639 entry = ftrace_event_buffer_reserve(&fbuffer, ftrace_file, \
640 sizeof(*entry) + __data_size); \
641 \
642 if (!entry) \
c32e827b 643 return; \
c32e827b 644 \
7fcb7c47
LZ
645 tstruct \
646 \
a9c1c3ab 647 { assign; } \
c32e827b 648 \
3fd40d1e 649 ftrace_event_buffer_commit(&fbuffer); \
ff038f5c 650}
2239291a
SR
651/*
652 * The ftrace_test_probe is compiled out, it is only here as a build time check
653 * to make sure that if the tracepoint handling changes, the ftrace probe will
654 * fail to compile unless it too is updated.
655 */
ff038f5c
SR
656
657#undef DEFINE_EVENT
658#define DEFINE_EVENT(template, call, proto, args) \
2239291a 659static inline void ftrace_test_probe_##call(void) \
c32e827b 660{ \
2239291a
SR
661 check_trace_callback_type_##call(ftrace_raw_event_##template); \
662}
e5bc9721
SR
663
664#undef DEFINE_EVENT_PRINT
80decc70 665#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
e5bc9721
SR
666
667#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
668
509e760c
LJ
669#undef __entry
670#define __entry REC
671
672#undef __print_flags
673#undef __print_symbolic
b102f1d0 674#undef __print_hex
509e760c 675#undef __get_dynamic_array
beba4bb0 676#undef __get_dynamic_array_len
509e760c 677#undef __get_str
4449bf92 678#undef __get_bitmask
509e760c
LJ
679
680#undef TP_printk
681#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
682
091ad365 683#undef DECLARE_EVENT_CLASS
509e760c 684#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
2239291a 685_TRACE_PERF_PROTO(call, PARAMS(proto)); \
8f082018 686static const char print_fmt_##call[] = print; \
523c8113 687static struct ftrace_event_class __used __refdata event_class_##call = { \
2239291a 688 .system = __stringify(TRACE_SYSTEM), \
2e33af02
SR
689 .define_fields = ftrace_define_fields_##call, \
690 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
0405ab80 691 .raw_init = trace_event_raw_init, \
2239291a 692 .probe = ftrace_raw_event_##call, \
a1d0ce82 693 .reg = ftrace_event_reg, \
2239291a 694 _TRACE_PERF_INIT(call) \
8f082018 695};
e5bc9721
SR
696
697#undef DEFINE_EVENT
698#define DEFINE_EVENT(template, call, proto, args) \
c32e827b 699 \
e4a9ea5e 700static struct ftrace_event_call __used event_##call = { \
8f082018 701 .class = &event_class_##template, \
abb43f69
MD
702 { \
703 .tp = &__tracepoint_##call, \
704 }, \
80decc70 705 .event.funcs = &ftrace_event_type_funcs_##template, \
509e760c 706 .print_fmt = print_fmt_##template, \
de7b2973 707 .flags = TRACE_EVENT_FL_TRACEPOINT, \
e4a9ea5e
SR
708}; \
709static struct ftrace_event_call __used \
710__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
ac199db0 711
e5bc9721
SR
712#undef DEFINE_EVENT_PRINT
713#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
c32e827b 714 \
509e760c
LJ
715static const char print_fmt_##call[] = print; \
716 \
e4a9ea5e 717static struct ftrace_event_call __used event_##call = { \
8f082018 718 .class = &event_class_##template, \
abb43f69
MD
719 { \
720 .tp = &__tracepoint_##call, \
721 }, \
80decc70 722 .event.funcs = &ftrace_event_type_funcs_##call, \
509e760c 723 .print_fmt = print_fmt_##call, \
de7b2973 724 .flags = TRACE_EVENT_FL_TRACEPOINT, \
e4a9ea5e
SR
725}; \
726static struct ftrace_event_call __used \
727__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
ac199db0 728
f42c85e7 729#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
ac199db0 730
f413cdb8 731
07b139c8 732#ifdef CONFIG_PERF_EVENTS
f413cdb8 733
509e760c
LJ
734#undef __entry
735#define __entry entry
736
737#undef __get_dynamic_array
738#define __get_dynamic_array(field) \
739 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
740
beba4bb0
SRRH
741#undef __get_dynamic_array_len
742#define __get_dynamic_array_len(field) \
743 ((__entry->__data_loc_##field >> 16) & 0xffff)
744
509e760c
LJ
745#undef __get_str
746#define __get_str(field) (char *)__get_dynamic_array(field)
747
4449bf92
SRRH
748#undef __get_bitmask
749#define __get_bitmask(field) (char *)__get_dynamic_array(field)
750
f413cdb8 751#undef __perf_addr
12473965 752#define __perf_addr(a) (__addr = (a))
f413cdb8
FW
753
754#undef __perf_count
12473965 755#define __perf_count(c) (__count = (c))
f413cdb8 756
e6dab5ff 757#undef __perf_task
12473965 758#define __perf_task(t) (__task = (t))
92e51938 759
091ad365
IM
760#undef DECLARE_EVENT_CLASS
761#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
83f0d539 762static notrace void \
2239291a 763perf_trace_##call(void *__data, proto) \
f413cdb8 764{ \
2239291a 765 struct ftrace_event_call *event_call = __data; \
f413cdb8 766 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
f413cdb8 767 struct ftrace_raw_##call *entry; \
ff5f149b 768 struct pt_regs __regs; \
f413cdb8 769 u64 __addr = 0, __count = 1; \
e6dab5ff 770 struct task_struct *__task = NULL; \
1c024eca 771 struct hlist_head *head; \
f413cdb8
FW
772 int __entry_size; \
773 int __data_size; \
4ed7c92d 774 int rctx; \
f413cdb8
FW
775 \
776 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
d027e6a9
ON
777 \
778 head = this_cpu_ptr(event_call->perf_events); \
779 if (__builtin_constant_p(!__task) && !__task && \
780 hlist_empty(head)) \
781 return; \
782 \
a044560c
PZ
783 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
784 sizeof(u64)); \
304703ab 785 __entry_size -= sizeof(u32); \
f413cdb8 786 \
36009d07
ON
787 perf_fetch_caller_regs(&__regs); \
788 entry = perf_trace_buf_prepare(__entry_size, \
789 event_call->event.type, &__regs, &rctx); \
430ad5a6
XG
790 if (!entry) \
791 return; \
b7e2ecef 792 \
20ab4425
FW
793 tstruct \
794 \
795 { assign; } \
796 \
97d5a220 797 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
e6dab5ff 798 __count, &__regs, head, __task); \
f413cdb8
FW
799}
800
2239291a
SR
801/*
802 * This part is compiled out, it is only here as a build time check
803 * to make sure that if the tracepoint handling changes, the
804 * perf probe will fail to compile unless it too is updated.
805 */
ff038f5c 806#undef DEFINE_EVENT
6cc8a7c1 807#define DEFINE_EVENT(template, call, proto, args) \
2239291a 808static inline void perf_test_probe_##call(void) \
6cc8a7c1 809{ \
2239291a 810 check_trace_callback_type_##call(perf_trace_##template); \
ff038f5c
SR
811}
812
2239291a 813
e5bc9721
SR
814#undef DEFINE_EVENT_PRINT
815#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
816 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
817
f413cdb8 818#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
07b139c8 819#endif /* CONFIG_PERF_EVENTS */
f413cdb8 820