2 * Stage 1 of the trace events.
4 * Override the macros in <trace/trace_events.h> to include the following:
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
9 * <type2> <item2>[<len>];
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
19 #include <linux/ftrace_event.h>
22 * DECLARE_EVENT_CLASS can be used to add a generic function
23 * handlers for events. That is, if all events have the same
24 * parameters and just have distinct trace points.
25 * Each tracepoint can be defined with DEFINE_EVENT and that
26 * will map the DECLARE_EVENT_CLASS to the tracepoint.
28 * TRACE_EVENT is a one to one mapping between tracepoint and template.
31 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
32 DECLARE_EVENT_CLASS(name, \
38 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
42 #define __field(type, item) type item;
45 #define __field_ext(type, item, filter_type) type item;
48 #define __array(type, item, len) type item[len];
50 #undef __dynamic_array
51 #define __dynamic_array(type, item, len) u32 __data_loc_##item;
54 #define __string(item, src) __dynamic_array(char, item, -1)
56 #undef TP_STRUCT__entry
57 #define TP_STRUCT__entry(args...) args
59 #undef DECLARE_EVENT_CLASS
60 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
61 struct ftrace_raw_##name { \
62 struct trace_entry ent; \
67 #define DEFINE_EVENT(template, name, proto, args) \
68 static struct ftrace_event_call event_##name
70 #undef DEFINE_EVENT_PRINT
71 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
72 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
75 #define __cpparg(arg...) arg
77 /* Callbacks are meaningless to ftrace. */
79 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
80 assign, print, reg, unreg) \
81 TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \
82 __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \
84 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
88 * Stage 2 of the trace events.
90 * Include the following:
92 * struct ftrace_data_offsets_<call> {
98 * The __dynamic_array() macro will create each u32 <item>, this is
99 * to keep the offset of each array from the beginning of the event.
100 * The size of an array is also encoded, in the higher 16 bits of <item>.
104 #define __field(type, item)
107 #define __field_ext(type, item, filter_type)
110 #define __array(type, item, len)
112 #undef __dynamic_array
113 #define __dynamic_array(type, item, len) u32 item;
116 #define __string(item, src) __dynamic_array(char, item, -1)
118 #undef DECLARE_EVENT_CLASS
119 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
120 struct ftrace_data_offsets_##call { \
125 #define DEFINE_EVENT(template, name, proto, args)
127 #undef DEFINE_EVENT_PRINT
128 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
129 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
131 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
134 * Stage 3 of the trace events.
136 * Override the macros in <trace/trace_events.h> to include the following:
139 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
141 * struct trace_seq *s = &iter->seq;
142 * struct ftrace_raw_<call> *field; <-- defined in stage 1
143 * struct trace_entry *entry;
144 * struct trace_seq *p;
149 * if (entry->type != event_<call>.id) {
151 * return TRACE_TYPE_UNHANDLED;
154 * field = (typeof(field))entry;
156 * p = get_cpu_var(ftrace_event_seq);
158 * ret = trace_seq_printf(s, <TP_printk> "\n");
161 * return TRACE_TYPE_PARTIAL_LINE;
163 * return TRACE_TYPE_HANDLED;
166 * This is the method used to print the raw event to the trace
167 * output format. Note, this is not needed if the data is read
172 #define __entry field
175 #define TP_printk(fmt, args...) fmt "\n", args
177 #undef __get_dynamic_array
178 #define __get_dynamic_array(field) \
179 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
182 #define __get_str(field) (char *)__get_dynamic_array(field)
185 #define __print_flags(flag, delim, flag_array...) \
187 static const struct trace_print_flags __flags[] = \
188 { flag_array, { -1, NULL }}; \
189 ftrace_print_flags_seq(p, delim, flag, __flags); \
192 #undef __print_symbolic
193 #define __print_symbolic(value, symbol_array...) \
195 static const struct trace_print_flags symbols[] = \
196 { symbol_array, { -1, NULL }}; \
197 ftrace_print_symbols_seq(p, value, symbols); \
200 #undef DECLARE_EVENT_CLASS
201 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
202 static enum print_line_t \
203 ftrace_raw_output_id_##call(int event_id, const char *name, \
204 struct trace_iterator *iter, int flags) \
206 struct trace_seq *s = &iter->seq; \
207 struct ftrace_raw_##call *field; \
208 struct trace_entry *entry; \
209 struct trace_seq *p; \
214 if (entry->type != event_id) { \
216 return TRACE_TYPE_UNHANDLED; \
219 field = (typeof(field))entry; \
221 p = &get_cpu_var(ftrace_event_seq); \
223 ret = trace_seq_printf(s, "%s: ", name); \
225 ret = trace_seq_printf(s, print); \
228 return TRACE_TYPE_PARTIAL_LINE; \
230 return TRACE_TYPE_HANDLED; \
234 #define DEFINE_EVENT(template, name, proto, args) \
235 static enum print_line_t \
236 ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
238 return ftrace_raw_output_id_##template(event_##name.id, \
239 #name, iter, flags); \
242 #undef DEFINE_EVENT_PRINT
243 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
244 static enum print_line_t \
245 ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
247 struct trace_seq *s = &iter->seq; \
248 struct ftrace_raw_##template *field; \
249 struct trace_entry *entry; \
250 struct trace_seq *p; \
255 if (entry->type != event_##call.id) { \
257 return TRACE_TYPE_UNHANDLED; \
260 field = (typeof(field))entry; \
262 p = &get_cpu_var(ftrace_event_seq); \
264 ret = trace_seq_printf(s, "%s: ", #call); \
266 ret = trace_seq_printf(s, print); \
269 return TRACE_TYPE_PARTIAL_LINE; \
271 return TRACE_TYPE_HANDLED; \
274 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
277 #define __field_ext(type, item, filter_type) \
278 ret = trace_define_field(event_call, #type, #item, \
279 offsetof(typeof(field), item), \
280 sizeof(field.item), \
281 is_signed_type(type), filter_type); \
286 #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
289 #define __array(type, item, len) \
290 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
291 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
292 offsetof(typeof(field), item), \
293 sizeof(field.item), \
294 is_signed_type(type), FILTER_OTHER); \
298 #undef __dynamic_array
299 #define __dynamic_array(type, item, len) \
300 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
301 offsetof(typeof(field), __data_loc_##item), \
302 sizeof(field.__data_loc_##item), \
303 is_signed_type(type), FILTER_OTHER);
306 #define __string(item, src) __dynamic_array(char, item, -1)
308 #undef DECLARE_EVENT_CLASS
309 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
311 ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
313 struct ftrace_raw_##call field; \
322 #define DEFINE_EVENT(template, name, proto, args)
324 #undef DEFINE_EVENT_PRINT
325 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
326 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
328 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
331 * remember the offset of each array from the beginning of the event.
335 #define __entry entry
338 #define __field(type, item)
341 #define __field_ext(type, item, filter_type)
344 #define __array(type, item, len)
346 #undef __dynamic_array
347 #define __dynamic_array(type, item, len) \
348 __data_offsets->item = __data_size + \
349 offsetof(typeof(*entry), __data); \
350 __data_offsets->item |= (len * sizeof(type)) << 16; \
351 __data_size += (len) * sizeof(type);
354 #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
356 #undef DECLARE_EVENT_CLASS
357 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
358 static inline int ftrace_get_offsets_##call( \
359 struct ftrace_data_offsets_##call *__data_offsets, proto) \
361 int __data_size = 0; \
362 struct ftrace_raw_##call __maybe_unused *entry; \
366 return __data_size; \
370 #define DEFINE_EVENT(template, name, proto, args)
372 #undef DEFINE_EVENT_PRINT
373 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
374 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
376 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
378 #ifdef CONFIG_EVENT_PROFILE
381 * Generate the functions needed for tracepoint perf_event support.
383 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
385 * static int ftrace_profile_enable_<call>(void)
387 * return register_trace_<call>(ftrace_profile_<call>);
390 * static void ftrace_profile_disable_<call>(void)
392 * unregister_trace_<call>(ftrace_profile_<call>);
397 #undef DECLARE_EVENT_CLASS
398 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
401 #define DEFINE_EVENT(template, name, proto, args) \
403 static void ftrace_profile_##name(proto); \
405 static int ftrace_profile_enable_##name(struct ftrace_event_call *unused)\
407 return register_trace_##name(ftrace_profile_##name); \
410 static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
412 unregister_trace_##name(ftrace_profile_##name); \
415 #undef DEFINE_EVENT_PRINT
416 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
417 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
419 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
424 * Stage 4 of the trace events.
426 * Override the macros in <trace/trace_events.h> to include the following:
428 * static void ftrace_event_<call>(proto)
430 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
433 * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
435 * return register_trace_<call>(ftrace_event_<call>);
438 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
440 * unregister_trace_<call>(ftrace_event_<call>);
444 * For those macros defined with TRACE_EVENT:
446 * static struct ftrace_event_call event_<call>;
448 * static void ftrace_raw_event_<call>(proto)
450 * struct ring_buffer_event *event;
451 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
452 * struct ring_buffer *buffer;
453 * unsigned long irq_flags;
456 * local_save_flags(irq_flags);
457 * pc = preempt_count();
459 * event = trace_current_buffer_lock_reserve(&buffer,
461 * sizeof(struct ftrace_raw_<call>),
465 * entry = ring_buffer_event_data(event);
467 * <assign>; <-- Here we assign the entries by the __field and
470 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
473 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
477 * ret = register_trace_<call>(ftrace_raw_event_<call>);
479 * pr_info("event trace: Could not activate trace point "
480 * "probe to <call>");
484 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
486 * unregister_trace_<call>(ftrace_raw_event_<call>);
489 * static struct trace_event ftrace_event_type_<call> = {
490 * .trace = ftrace_raw_output_<call>, <-- stage 2
493 * static struct ftrace_event_call __used
494 * __attribute__((__aligned__(4)))
495 * __attribute__((section("_ftrace_events"))) event_<call> = {
497 * .system = "<system>",
498 * .raw_init = trace_event_raw_init,
499 * .regfunc = ftrace_reg_event_<call>,
500 * .unregfunc = ftrace_unreg_event_<call>,
505 #ifdef CONFIG_EVENT_PROFILE
507 #define _TRACE_PROFILE_INIT(call) \
508 .profile_enable = ftrace_profile_enable_##call, \
509 .profile_disable = ftrace_profile_disable_##call,
512 #define _TRACE_PROFILE_INIT(call)
516 #define __entry entry
519 #define __field(type, item)
522 #define __array(type, item, len)
524 #undef __dynamic_array
525 #define __dynamic_array(type, item, len) \
526 __entry->__data_loc_##item = __data_offsets.item;
529 #define __string(item, src) __dynamic_array(char, item, -1) \
532 #define __assign_str(dst, src) \
533 strcpy(__get_str(dst), src);
535 #undef TP_fast_assign
536 #define TP_fast_assign(args...) args
538 #undef TP_perf_assign
539 #define TP_perf_assign(args...)
541 #undef DECLARE_EVENT_CLASS
542 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
544 static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
547 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
548 struct ring_buffer_event *event; \
549 struct ftrace_raw_##call *entry; \
550 struct ring_buffer *buffer; \
551 unsigned long irq_flags; \
555 local_save_flags(irq_flags); \
556 pc = preempt_count(); \
558 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
560 event = trace_current_buffer_lock_reserve(&buffer, \
562 sizeof(*entry) + __data_size, \
566 entry = ring_buffer_event_data(event); \
573 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
574 trace_nowake_buffer_unlock_commit(buffer, \
575 event, irq_flags, pc); \
579 #define DEFINE_EVENT(template, call, proto, args) \
581 static void ftrace_raw_event_##call(proto) \
583 ftrace_raw_event_id_##template(&event_##call, args); \
586 static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
588 return register_trace_##call(ftrace_raw_event_##call); \
591 static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\
593 unregister_trace_##call(ftrace_raw_event_##call); \
596 static struct trace_event ftrace_event_type_##call = { \
597 .trace = ftrace_raw_output_##call, \
600 #undef DEFINE_EVENT_PRINT
601 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
602 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
604 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
610 #undef __print_symbolic
611 #undef __get_dynamic_array
615 #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
617 #undef DECLARE_EVENT_CLASS
618 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
619 static const char print_fmt_##call[] = print;
622 #define DEFINE_EVENT(template, call, proto, args) \
624 static struct ftrace_event_call __used \
625 __attribute__((__aligned__(4))) \
626 __attribute__((section("_ftrace_events"))) event_##call = { \
628 .system = __stringify(TRACE_SYSTEM), \
629 .event = &ftrace_event_type_##call, \
630 .raw_init = trace_event_raw_init, \
631 .regfunc = ftrace_raw_reg_event_##call, \
632 .unregfunc = ftrace_raw_unreg_event_##call, \
633 .print_fmt = print_fmt_##template, \
634 .define_fields = ftrace_define_fields_##template, \
635 _TRACE_PROFILE_INIT(call) \
638 #undef DEFINE_EVENT_PRINT
639 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
641 static const char print_fmt_##call[] = print; \
643 static struct ftrace_event_call __used \
644 __attribute__((__aligned__(4))) \
645 __attribute__((section("_ftrace_events"))) event_##call = { \
647 .system = __stringify(TRACE_SYSTEM), \
648 .event = &ftrace_event_type_##call, \
649 .raw_init = trace_event_raw_init, \
650 .regfunc = ftrace_raw_reg_event_##call, \
651 .unregfunc = ftrace_raw_unreg_event_##call, \
652 .print_fmt = print_fmt_##call, \
653 .define_fields = ftrace_define_fields_##template, \
654 _TRACE_PROFILE_INIT(call) \
657 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
660 * Define the insertion callback to profile events
662 * The job is very similar to ftrace_raw_event_<call> except that we don't
663 * insert in the ring buffer but in a perf counter.
665 * static void ftrace_profile_<call>(proto)
667 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
668 * struct ftrace_event_call *event_call = &event_<call>;
669 * extern void perf_tp_event(int, u64, u64, void *, int);
670 * struct ftrace_raw_##call *entry;
671 * struct perf_trace_buf *trace_buf;
672 * u64 __addr = 0, __count = 1;
673 * unsigned long irq_flags;
674 * struct trace_entry *ent;
680 * pc = preempt_count();
682 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
684 * // Below we want to get the aligned size by taking into account
685 * // the u32 field that will later store the buffer size
686 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
688 * __entry_size -= sizeof(u32);
690 * // Protect the non nmi buffer
691 * // This also protects the rcu read side
692 * local_irq_save(irq_flags);
693 * __cpu = smp_processor_id();
696 * trace_buf = rcu_dereference(perf_trace_buf_nmi);
698 * trace_buf = rcu_dereference(perf_trace_buf);
703 * trace_buf = per_cpu_ptr(trace_buf, __cpu);
705 * // Avoid recursion from perf that could mess up the buffer
706 * if (trace_buf->recursion++)
707 * goto end_recursion;
709 * raw_data = trace_buf->buf;
711 * // Make recursion update visible before entering perf_tp_event
712 * // so that we protect from perf recursions.
716 * //zero dead bytes from alignment to avoid stack leak to userspace:
717 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
718 * entry = (struct ftrace_raw_<call> *)raw_data;
720 * tracing_generic_entry_update(ent, irq_flags, pc);
721 * ent->type = event_call->id;
723 * <tstruct> <- do some jobs with dynamic arrays
725 * <assign> <- affect our values
727 * perf_tp_event(event_call->id, __addr, __count, entry,
728 * __entry_size); <- submit them to perf counter
733 #ifdef CONFIG_EVENT_PROFILE
736 #define __entry entry
738 #undef __get_dynamic_array
739 #define __get_dynamic_array(field) \
740 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
743 #define __get_str(field) (char *)__get_dynamic_array(field)
746 #define __perf_addr(a) __addr = (a)
749 #define __perf_count(c) __count = (c)
751 #undef DECLARE_EVENT_CLASS
752 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
754 ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
757 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
758 extern int perf_swevent_get_recursion_context(void); \
759 extern void perf_swevent_put_recursion_context(int rctx); \
760 extern void perf_tp_event(int, u64, u64, void *, int); \
761 struct ftrace_raw_##call *entry; \
762 u64 __addr = 0, __count = 1; \
763 unsigned long irq_flags; \
764 struct trace_entry *ent; \
773 pc = preempt_count(); \
775 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
776 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
778 __entry_size -= sizeof(u32); \
780 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
781 "profile buffer not large enough")) \
784 local_irq_save(irq_flags); \
786 rctx = perf_swevent_get_recursion_context(); \
788 goto end_recursion; \
790 __cpu = smp_processor_id(); \
793 trace_buf = rcu_dereference(perf_trace_buf_nmi); \
795 trace_buf = rcu_dereference(perf_trace_buf); \
800 raw_data = per_cpu_ptr(trace_buf, __cpu); \
802 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
803 entry = (struct ftrace_raw_##call *)raw_data; \
805 tracing_generic_entry_update(ent, irq_flags, pc); \
806 ent->type = event_call->id; \
812 perf_tp_event(event_call->id, __addr, __count, entry, \
816 perf_swevent_put_recursion_context(rctx); \
818 local_irq_restore(irq_flags); \
822 #define DEFINE_EVENT(template, call, proto, args) \
823 static void ftrace_profile_##call(proto) \
825 struct ftrace_event_call *event_call = &event_##call; \
827 ftrace_profile_templ_##template(event_call, args); \
830 #undef DEFINE_EVENT_PRINT
831 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
832 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
834 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
835 #endif /* CONFIG_EVENT_PROFILE */
837 #undef _TRACE_PROFILE_INIT