dm snapshot: lock snapshot while supplying status
[linux-2.6-block.git] / include / trace / ftrace.h
CommitLineData
f42c85e7
SR
1/*
2 * Stage 1 of the trace events.
3 *
4 * Override the macros in <trace/trace_events.h> to include the following:
5 *
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
8 * <type> <item>;
9 * <type2> <item2>[<len>];
10 * [...]
11 * };
12 *
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
16 * in the structure.
17 */
18
19#include <linux/ftrace_event.h>
20
7fcb7c47
LZ
21#undef __field
22#define __field(type, item) type item;
23
43b51ead
LZ
24#undef __field_ext
25#define __field_ext(type, item, filter_type) type item;
26
f42c85e7
SR
27#undef __array
28#define __array(type, item, len) type item[len];
29
7fcb7c47 30#undef __dynamic_array
7d536cb3 31#define __dynamic_array(type, item, len) u32 __data_loc_##item;
f42c85e7 32
9cbf1176 33#undef __string
7fcb7c47 34#define __string(item, src) __dynamic_array(char, item, -1)
9cbf1176 35
f42c85e7
SR
36#undef TP_STRUCT__entry
37#define TP_STRUCT__entry(args...) args
38
39#undef TRACE_EVENT
40#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
41 struct ftrace_raw_##name { \
42 struct trace_entry ent; \
43 tstruct \
7fcb7c47 44 char __data[0]; \
f42c85e7
SR
45 }; \
46 static struct ftrace_event_call event_##name
47
0dd7b747
FW
48#undef __cpparg
49#define __cpparg(arg...) arg
50
97419875
JS
51/* Callbacks are meaningless to ftrace. */
52#undef TRACE_EVENT_FN
0dd7b747
FW
53#define TRACE_EVENT_FN(name, proto, args, tstruct, \
54 assign, print, reg, unreg) \
55 TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \
56 __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \
97419875 57
f42c85e7
SR
58#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
59
9cbf1176 60
f42c85e7
SR
61/*
62 * Stage 2 of the trace events.
63 *
9cbf1176
FW
64 * Include the following:
65 *
7fcb7c47 66 * struct ftrace_data_offsets_<call> {
7d536cb3
LZ
67 * u32 <item1>;
68 * u32 <item2>;
9cbf1176
FW
69 * [...]
70 * };
71 *
7d536cb3 72 * The __dynamic_array() macro will create each u32 <item>, this is
7fcb7c47 73 * to keep the offset of each array from the beginning of the event.
7d536cb3 74 * The size of an array is also encoded, in the higher 16 bits of <item>.
9cbf1176
FW
75 */
76
7fcb7c47 77#undef __field
43b51ead
LZ
78#define __field(type, item)
79
80#undef __field_ext
81#define __field_ext(type, item, filter_type)
7fcb7c47 82
9cbf1176
FW
83#undef __array
84#define __array(type, item, len)
85
7fcb7c47 86#undef __dynamic_array
7d536cb3 87#define __dynamic_array(type, item, len) u32 item;
9cbf1176
FW
88
89#undef __string
7fcb7c47 90#define __string(item, src) __dynamic_array(char, item, -1)
9cbf1176
FW
91
92#undef TRACE_EVENT
93#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
7fcb7c47 94 struct ftrace_data_offsets_##call { \
9cbf1176
FW
95 tstruct; \
96 };
97
98#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
99
6ff9a64d
SR
100/*
101 * Setup the showing format of trace point.
102 *
103 * int
104 * ftrace_format_##call(struct trace_seq *s)
105 * {
106 * struct ftrace_raw_##call field;
107 * int ret;
108 *
109 * ret = trace_seq_printf(s, #type " " #item ";"
110 * " offset:%u; size:%u;\n",
111 * offsetof(struct ftrace_raw_##call, item),
112 * sizeof(field.type));
113 *
114 * }
115 */
116
117#undef TP_STRUCT__entry
118#define TP_STRUCT__entry(args...) args
119
120#undef __field
121#define __field(type, item) \
122 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
123 "offset:%u;\tsize:%u;\n", \
124 (unsigned int)offsetof(typeof(field), item), \
125 (unsigned int)sizeof(field.item)); \
126 if (!ret) \
127 return 0;
128
43b51ead
LZ
129#undef __field_ext
130#define __field_ext(type, item, filter_type) __field(type, item)
131
6ff9a64d
SR
132#undef __array
133#define __array(type, item, len) \
134 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
135 "offset:%u;\tsize:%u;\n", \
136 (unsigned int)offsetof(typeof(field), item), \
137 (unsigned int)sizeof(field.item)); \
138 if (!ret) \
139 return 0;
140
141#undef __dynamic_array
142#define __dynamic_array(type, item, len) \
68fd60a8 143 ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
6ff9a64d
SR
144 "offset:%u;\tsize:%u;\n", \
145 (unsigned int)offsetof(typeof(field), \
146 __data_loc_##item), \
147 (unsigned int)sizeof(field.__data_loc_##item)); \
148 if (!ret) \
149 return 0;
150
151#undef __string
152#define __string(item, src) __dynamic_array(char, item, -1)
153
154#undef __entry
155#define __entry REC
156
157#undef __print_symbolic
158#undef __get_dynamic_array
159#undef __get_str
160
161#undef TP_printk
162#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
163
164#undef TP_fast_assign
165#define TP_fast_assign(args...) args
166
3a659305
PZ
167#undef TP_perf_assign
168#define TP_perf_assign(args...)
169
6ff9a64d
SR
170#undef TRACE_EVENT
171#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
172static int \
e8f9f4d7
FW
173ftrace_format_##call(struct ftrace_event_call *unused, \
174 struct trace_seq *s) \
6ff9a64d
SR
175{ \
176 struct ftrace_raw_##call field __attribute__((unused)); \
177 int ret = 0; \
178 \
179 tstruct; \
180 \
181 trace_seq_printf(s, "\nprint fmt: " print); \
182 \
183 return ret; \
184}
185
186#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
187
9cbf1176
FW
188/*
189 * Stage 3 of the trace events.
190 *
f42c85e7
SR
191 * Override the macros in <trace/trace_events.h> to include the following:
192 *
193 * enum print_line_t
194 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
195 * {
196 * struct trace_seq *s = &iter->seq;
197 * struct ftrace_raw_<call> *field; <-- defined in stage 1
198 * struct trace_entry *entry;
be74b73a 199 * struct trace_seq *p;
f42c85e7
SR
200 * int ret;
201 *
202 * entry = iter->ent;
203 *
204 * if (entry->type != event_<call>.id) {
205 * WARN_ON_ONCE(1);
206 * return TRACE_TYPE_UNHANDLED;
207 * }
208 *
209 * field = (typeof(field))entry;
210 *
be74b73a 211 * p = get_cpu_var(ftrace_event_seq);
56d8bd3f 212 * trace_seq_init(p);
f42c85e7 213 * ret = trace_seq_printf(s, <TP_printk> "\n");
be74b73a 214 * put_cpu();
f42c85e7
SR
215 * if (!ret)
216 * return TRACE_TYPE_PARTIAL_LINE;
217 *
218 * return TRACE_TYPE_HANDLED;
219 * }
220 *
221 * This is the method used to print the raw event to the trace
222 * output format. Note, this is not needed if the data is read
223 * in binary.
224 */
225
226#undef __entry
227#define __entry field
228
229#undef TP_printk
230#define TP_printk(fmt, args...) fmt "\n", args
231
7fcb7c47
LZ
232#undef __get_dynamic_array
233#define __get_dynamic_array(field) \
7d536cb3 234 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
7fcb7c47 235
9cbf1176 236#undef __get_str
7fcb7c47 237#define __get_str(field) (char *)__get_dynamic_array(field)
9cbf1176 238
be74b73a
SR
239#undef __print_flags
240#define __print_flags(flag, delim, flag_array...) \
241 ({ \
a48f494e 242 static const struct trace_print_flags __flags[] = \
be74b73a 243 { flag_array, { -1, NULL }}; \
a48f494e 244 ftrace_print_flags_seq(p, delim, flag, __flags); \
be74b73a
SR
245 })
246
0f4fc29d
SR
247#undef __print_symbolic
248#define __print_symbolic(value, symbol_array...) \
249 ({ \
250 static const struct trace_print_flags symbols[] = \
251 { symbol_array, { -1, NULL }}; \
252 ftrace_print_symbols_seq(p, value, symbols); \
253 })
254
f42c85e7
SR
255#undef TRACE_EVENT
256#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
ec827c7e 257static enum print_line_t \
f42c85e7
SR
258ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
259{ \
260 struct trace_seq *s = &iter->seq; \
261 struct ftrace_raw_##call *field; \
262 struct trace_entry *entry; \
be74b73a 263 struct trace_seq *p; \
f42c85e7
SR
264 int ret; \
265 \
266 entry = iter->ent; \
267 \
268 if (entry->type != event_##call.id) { \
269 WARN_ON_ONCE(1); \
270 return TRACE_TYPE_UNHANDLED; \
271 } \
272 \
273 field = (typeof(field))entry; \
274 \
be74b73a 275 p = &get_cpu_var(ftrace_event_seq); \
56d8bd3f 276 trace_seq_init(p); \
f42c85e7 277 ret = trace_seq_printf(s, #call ": " print); \
be74b73a 278 put_cpu(); \
f42c85e7
SR
279 if (!ret) \
280 return TRACE_TYPE_PARTIAL_LINE; \
281 \
282 return TRACE_TYPE_HANDLED; \
283}
284
285#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
286
43b51ead
LZ
287#undef __field_ext
288#define __field_ext(type, item, filter_type) \
f42c85e7
SR
289 ret = trace_define_field(event_call, #type, #item, \
290 offsetof(typeof(field), item), \
43b51ead
LZ
291 sizeof(field.item), \
292 is_signed_type(type), filter_type); \
f42c85e7
SR
293 if (ret) \
294 return ret;
295
43b51ead
LZ
296#undef __field
297#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
298
f42c85e7
SR
299#undef __array
300#define __array(type, item, len) \
301 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
302 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
303 offsetof(typeof(field), item), \
43b51ead 304 sizeof(field.item), 0, FILTER_OTHER); \
f42c85e7
SR
305 if (ret) \
306 return ret;
307
7fcb7c47
LZ
308#undef __dynamic_array
309#define __dynamic_array(type, item, len) \
68fd60a8 310 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
43b51ead
LZ
311 offsetof(typeof(field), __data_loc_##item), \
312 sizeof(field.__data_loc_##item), 0, \
313 FILTER_OTHER);
7fcb7c47 314
9cbf1176 315#undef __string
7fcb7c47 316#define __string(item, src) __dynamic_array(char, item, -1)
9cbf1176 317
f42c85e7
SR
318#undef TRACE_EVENT
319#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
ec827c7e 320static int \
14be96c9 321ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
f42c85e7
SR
322{ \
323 struct ftrace_raw_##call field; \
f42c85e7
SR
324 int ret; \
325 \
e647d6b3
LZ
326 ret = trace_define_common_fields(event_call); \
327 if (ret) \
328 return ret; \
f42c85e7
SR
329 \
330 tstruct; \
331 \
332 return ret; \
333}
334
335#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
336
7fcb7c47
LZ
337/*
338 * remember the offset of each array from the beginning of the event.
339 */
340
341#undef __entry
342#define __entry entry
343
344#undef __field
345#define __field(type, item)
346
43b51ead
LZ
347#undef __field_ext
348#define __field_ext(type, item, filter_type)
349
7fcb7c47
LZ
350#undef __array
351#define __array(type, item, len)
352
353#undef __dynamic_array
354#define __dynamic_array(type, item, len) \
355 __data_offsets->item = __data_size + \
356 offsetof(typeof(*entry), __data); \
7d536cb3 357 __data_offsets->item |= (len * sizeof(type)) << 16; \
7fcb7c47
LZ
358 __data_size += (len) * sizeof(type);
359
360#undef __string
361#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \
362
363#undef TRACE_EVENT
364#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
365static inline int ftrace_get_offsets_##call( \
366 struct ftrace_data_offsets_##call *__data_offsets, proto) \
367{ \
368 int __data_size = 0; \
369 struct ftrace_raw_##call __maybe_unused *entry; \
370 \
371 tstruct; \
372 \
373 return __data_size; \
374}
375
376#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
377
3a659305
PZ
378#ifdef CONFIG_EVENT_PROFILE
379
380/*
cdd6c482 381 * Generate the functions needed for tracepoint perf_event support.
3a659305 382 *
f413cdb8 383 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
3a659305 384 *
e5e25cf4 385 * static int ftrace_profile_enable_<call>(void)
3a659305 386 * {
e5e25cf4 387 * return register_trace_<call>(ftrace_profile_<call>);
3a659305
PZ
388 * }
389 *
e5e25cf4 390 * static void ftrace_profile_disable_<call>(void)
3a659305 391 * {
e5e25cf4 392 * unregister_trace_<call>(ftrace_profile_<call>);
3a659305
PZ
393 * }
394 *
395 */
396
3a659305
PZ
397#undef TRACE_EVENT
398#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
399 \
f413cdb8 400static void ftrace_profile_##call(proto); \
3a659305 401 \
e5e25cf4 402static int ftrace_profile_enable_##call(void) \
3a659305 403{ \
e5e25cf4 404 return register_trace_##call(ftrace_profile_##call); \
3a659305
PZ
405} \
406 \
e5e25cf4 407static void ftrace_profile_disable_##call(void) \
3a659305 408{ \
e5e25cf4 409 unregister_trace_##call(ftrace_profile_##call); \
3a659305
PZ
410}
411
412#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
413
3a659305
PZ
414#endif
415
c32e827b 416/*
9cbf1176 417 * Stage 4 of the trace events.
c32e827b 418 *
ea20d929 419 * Override the macros in <trace/trace_events.h> to include the following:
c32e827b
SR
420 *
421 * static void ftrace_event_<call>(proto)
422 * {
ef18012b 423 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
c32e827b
SR
424 * }
425 *
426 * static int ftrace_reg_event_<call>(void)
427 * {
ef18012b 428 * int ret;
c32e827b 429 *
ef18012b
SR
430 * ret = register_trace_<call>(ftrace_event_<call>);
431 * if (!ret)
432 * pr_info("event trace: Could not activate trace point "
433 * "probe to <call>");
434 * return ret;
c32e827b
SR
435 * }
436 *
437 * static void ftrace_unreg_event_<call>(void)
438 * {
ef18012b 439 * unregister_trace_<call>(ftrace_event_<call>);
c32e827b
SR
440 * }
441 *
c32e827b 442 *
157587d7 443 * For those macros defined with TRACE_EVENT:
c32e827b
SR
444 *
445 * static struct ftrace_event_call event_<call>;
446 *
447 * static void ftrace_raw_event_<call>(proto)
448 * {
ef18012b
SR
449 * struct ring_buffer_event *event;
450 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
e77405ad 451 * struct ring_buffer *buffer;
ef18012b
SR
452 * unsigned long irq_flags;
453 * int pc;
454 *
455 * local_save_flags(irq_flags);
456 * pc = preempt_count();
457 *
e77405ad
SR
458 * event = trace_current_buffer_lock_reserve(&buffer,
459 * event_<call>.id,
ef18012b
SR
460 * sizeof(struct ftrace_raw_<call>),
461 * irq_flags, pc);
462 * if (!event)
463 * return;
464 * entry = ring_buffer_event_data(event);
465 *
466 * <assign>; <-- Here we assign the entries by the __field and
0e3d0f05 467 * __array macros.
c32e827b 468 *
e77405ad 469 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
c32e827b
SR
470 * }
471 *
472 * static int ftrace_raw_reg_event_<call>(void)
473 * {
ef18012b 474 * int ret;
c32e827b 475 *
ef18012b
SR
476 * ret = register_trace_<call>(ftrace_raw_event_<call>);
477 * if (!ret)
478 * pr_info("event trace: Could not activate trace point "
479 * "probe to <call>");
480 * return ret;
c32e827b
SR
481 * }
482 *
483 * static void ftrace_unreg_event_<call>(void)
484 * {
ef18012b 485 * unregister_trace_<call>(ftrace_raw_event_<call>);
c32e827b
SR
486 * }
487 *
488 * static struct trace_event ftrace_event_type_<call> = {
ef18012b 489 * .trace = ftrace_raw_output_<call>, <-- stage 2
c32e827b
SR
490 * };
491 *
492 * static int ftrace_raw_init_event_<call>(void)
493 * {
ef18012b 494 * int id;
c32e827b 495 *
ef18012b
SR
496 * id = register_ftrace_event(&ftrace_event_type_<call>);
497 * if (!id)
498 * return -ENODEV;
499 * event_<call>.id = id;
500 * return 0;
c32e827b
SR
501 * }
502 *
503 * static struct ftrace_event_call __used
504 * __attribute__((__aligned__(4)))
505 * __attribute__((section("_ftrace_events"))) event_<call> = {
ef18012b 506 * .name = "<call>",
0e3d0f05 507 * .system = "<system>",
ef18012b
SR
508 * .raw_init = ftrace_raw_init_event_<call>,
509 * .regfunc = ftrace_reg_event_<call>,
510 * .unregfunc = ftrace_unreg_event_<call>,
981d081e 511 * .show_format = ftrace_format_<call>,
c32e827b
SR
512 * }
513 *
514 */
515
2939b046
SR
516#undef TP_FMT
517#define TP_FMT(fmt, args...) fmt "\n", ##args
c32e827b 518
ac199db0 519#ifdef CONFIG_EVENT_PROFILE
ac199db0
PZ
520
521#define _TRACE_PROFILE_INIT(call) \
522 .profile_count = ATOMIC_INIT(-1), \
523 .profile_enable = ftrace_profile_enable_##call, \
524 .profile_disable = ftrace_profile_disable_##call,
525
526#else
ac199db0
PZ
527#define _TRACE_PROFILE_INIT(call)
528#endif
529
da4d0302
SR
530#undef __entry
531#define __entry entry
d20e3b03 532
9cbf1176
FW
533#undef __field
534#define __field(type, item)
535
536#undef __array
537#define __array(type, item, len)
538
7fcb7c47
LZ
539#undef __dynamic_array
540#define __dynamic_array(type, item, len) \
541 __entry->__data_loc_##item = __data_offsets.item;
542
9cbf1176 543#undef __string
7fcb7c47 544#define __string(item, src) __dynamic_array(char, item, -1) \
9cbf1176
FW
545
546#undef __assign_str
547#define __assign_str(dst, src) \
9cbf1176
FW
548 strcpy(__get_str(dst), src);
549
da4d0302 550#undef TRACE_EVENT
30a8fecc 551#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
c32e827b
SR
552 \
553static struct ftrace_event_call event_##call; \
554 \
555static void ftrace_raw_event_##call(proto) \
556{ \
7fcb7c47 557 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
f2aebaee 558 struct ftrace_event_call *event_call = &event_##call; \
c32e827b
SR
559 struct ring_buffer_event *event; \
560 struct ftrace_raw_##call *entry; \
e77405ad 561 struct ring_buffer *buffer; \
c32e827b 562 unsigned long irq_flags; \
7fcb7c47 563 int __data_size; \
c32e827b
SR
564 int pc; \
565 \
566 local_save_flags(irq_flags); \
567 pc = preempt_count(); \
568 \
7fcb7c47 569 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
9cbf1176 570 \
e77405ad
SR
571 event = trace_current_buffer_lock_reserve(&buffer, \
572 event_##call.id, \
7fcb7c47 573 sizeof(*entry) + __data_size, \
9cbf1176 574 irq_flags, pc); \
c32e827b
SR
575 if (!event) \
576 return; \
577 entry = ring_buffer_event_data(event); \
578 \
7fcb7c47
LZ
579 \
580 tstruct \
581 \
a9c1c3ab 582 { assign; } \
c32e827b 583 \
e77405ad
SR
584 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
585 trace_nowake_buffer_unlock_commit(buffer, \
586 event, irq_flags, pc); \
c32e827b
SR
587} \
588 \
69fd4f0e 589static int ftrace_raw_reg_event_##call(void *ptr) \
c32e827b
SR
590{ \
591 int ret; \
592 \
593 ret = register_trace_##call(ftrace_raw_event_##call); \
633ddaa7 594 if (ret) \
c32e827b 595 pr_info("event trace: Could not activate trace point " \
633ddaa7 596 "probe to " #call "\n"); \
c32e827b
SR
597 return ret; \
598} \
599 \
69fd4f0e 600static void ftrace_raw_unreg_event_##call(void *ptr) \
c32e827b
SR
601{ \
602 unregister_trace_##call(ftrace_raw_event_##call); \
603} \
604 \
605static struct trace_event ftrace_event_type_##call = { \
606 .trace = ftrace_raw_output_##call, \
607}; \
608 \
609static int ftrace_raw_init_event_##call(void) \
610{ \
611 int id; \
612 \
613 id = register_ftrace_event(&ftrace_event_type_##call); \
614 if (!id) \
615 return -ENODEV; \
616 event_##call.id = id; \
cf027f64 617 INIT_LIST_HEAD(&event_##call.fields); \
c32e827b
SR
618 return 0; \
619} \
620 \
621static struct ftrace_event_call __used \
622__attribute__((__aligned__(4))) \
623__attribute__((section("_ftrace_events"))) event_##call = { \
ef18012b 624 .name = #call, \
9cc26a26 625 .system = __stringify(TRACE_SYSTEM), \
6d723736 626 .event = &ftrace_event_type_##call, \
c32e827b 627 .raw_init = ftrace_raw_init_event_##call, \
da4d0302
SR
628 .regfunc = ftrace_raw_reg_event_##call, \
629 .unregfunc = ftrace_raw_unreg_event_##call, \
981d081e 630 .show_format = ftrace_format_##call, \
cf027f64 631 .define_fields = ftrace_define_fields_##call, \
ac199db0 632 _TRACE_PROFILE_INIT(call) \
c32e827b 633}
ac199db0 634
f42c85e7 635#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
ac199db0 636
f413cdb8
FW
637/*
638 * Define the insertion callback to profile events
639 *
640 * The job is very similar to ftrace_raw_event_<call> except that we don't
641 * insert in the ring buffer but in a perf counter.
642 *
643 * static void ftrace_profile_<call>(proto)
644 * {
645 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
646 * struct ftrace_event_call *event_call = &event_<call>;
cdd6c482 647 * extern void perf_tp_event(int, u64, u64, void *, int);
f413cdb8
FW
648 * struct ftrace_raw_##call *entry;
649 * u64 __addr = 0, __count = 1;
650 * unsigned long irq_flags;
20ab4425 651 * struct trace_entry *ent;
f413cdb8
FW
652 * int __entry_size;
653 * int __data_size;
20ab4425 654 * int __cpu
f413cdb8
FW
655 * int pc;
656 *
f413cdb8
FW
657 * pc = preempt_count();
658 *
659 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
304703ab
FW
660 *
661 * // Below we want to get the aligned size by taking into account
662 * // the u32 field that will later store the buffer size
663 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
664 * sizeof(u64));
665 * __entry_size -= sizeof(u32);
f413cdb8 666 *
20ab4425
FW
667 * // Protect the non nmi buffer
668 * // This also protects the rcu read side
669 * local_irq_save(irq_flags);
670 * __cpu = smp_processor_id();
671 *
672 * if (in_nmi())
673 * raw_data = rcu_dereference(trace_profile_buf_nmi);
674 * else
675 * raw_data = rcu_dereference(trace_profile_buf);
676 *
677 * if (!raw_data)
678 * goto end;
f413cdb8 679 *
20ab4425 680 * raw_data = per_cpu_ptr(raw_data, __cpu);
1853db0e 681 *
20ab4425
FW
682 * //zero dead bytes from alignment to avoid stack leak to userspace:
683 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
684 * entry = (struct ftrace_raw_<call> *)raw_data;
685 * ent = &entry->ent;
686 * tracing_generic_entry_update(ent, irq_flags, pc);
687 * ent->type = event_call->id;
f413cdb8 688 *
20ab4425 689 * <tstruct> <- do some jobs with dynamic arrays
f413cdb8 690 *
20ab4425 691 * <assign> <- affect our values
f413cdb8 692 *
43c1266c 693 * perf_tp_event(event_call->id, __addr, __count, entry,
20ab4425 694 * __entry_size); <- submit them to perf counter
f413cdb8
FW
695 *
696 * }
697 */
698
699#ifdef CONFIG_EVENT_PROFILE
700
701#undef __perf_addr
702#define __perf_addr(a) __addr = (a)
703
704#undef __perf_count
705#define __perf_count(c) __count = (c)
706
707#undef TRACE_EVENT
708#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
709static void ftrace_profile_##call(proto) \
710{ \
711 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
712 struct ftrace_event_call *event_call = &event_##call; \
cdd6c482 713 extern void perf_tp_event(int, u64, u64, void *, int); \
f413cdb8
FW
714 struct ftrace_raw_##call *entry; \
715 u64 __addr = 0, __count = 1; \
716 unsigned long irq_flags; \
20ab4425 717 struct trace_entry *ent; \
f413cdb8
FW
718 int __entry_size; \
719 int __data_size; \
20ab4425
FW
720 char *raw_data; \
721 int __cpu; \
f413cdb8
FW
722 int pc; \
723 \
f413cdb8
FW
724 pc = preempt_count(); \
725 \
726 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
a044560c
PZ
727 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
728 sizeof(u64)); \
304703ab 729 __entry_size -= sizeof(u32); \
f413cdb8 730 \
20ab4425
FW
731 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
732 "profile buffer not large enough")) \
733 return; \
734 \
735 local_irq_save(irq_flags); \
736 __cpu = smp_processor_id(); \
f413cdb8 737 \
20ab4425
FW
738 if (in_nmi()) \
739 raw_data = rcu_dereference(trace_profile_buf_nmi); \
740 else \
741 raw_data = rcu_dereference(trace_profile_buf); \
f413cdb8 742 \
20ab4425
FW
743 if (!raw_data) \
744 goto end; \
f413cdb8 745 \
20ab4425 746 raw_data = per_cpu_ptr(raw_data, __cpu); \
f413cdb8 747 \
20ab4425
FW
748 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
749 entry = (struct ftrace_raw_##call *)raw_data; \
750 ent = &entry->ent; \
751 tracing_generic_entry_update(ent, irq_flags, pc); \
752 ent->type = event_call->id; \
753 \
754 tstruct \
755 \
756 { assign; } \
757 \
43c1266c 758 perf_tp_event(event_call->id, __addr, __count, entry, \
f413cdb8 759 __entry_size); \
20ab4425
FW
760 \
761end: \
762 local_irq_restore(irq_flags); \
f413cdb8
FW
763 \
764}
765
766#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
767#endif /* CONFIG_EVENT_PROFILE */
768
ac199db0
PZ
769#undef _TRACE_PROFILE_INIT
770