| 1 | #ifndef _LINUX_KERNEL_TRACE_H |
| 2 | #define _LINUX_KERNEL_TRACE_H |
| 3 | |
| 4 | #include <linux/fs.h> |
| 5 | #include <linux/atomic.h> |
| 6 | #include <linux/sched.h> |
| 7 | #include <linux/clocksource.h> |
| 8 | #include <linux/ring_buffer.h> |
| 9 | #include <linux/mmiotrace.h> |
| 10 | #include <linux/tracepoint.h> |
| 11 | #include <linux/ftrace.h> |
| 12 | #include <linux/hw_breakpoint.h> |
| 13 | #include <linux/trace_seq.h> |
| 14 | #include <linux/ftrace_event.h> |
| 15 | |
| 16 | #ifdef CONFIG_FTRACE_SYSCALLS |
| 17 | #include <asm/unistd.h> /* For NR_SYSCALLS */ |
| 18 | #include <asm/syscall.h> /* some archs define it here */ |
| 19 | #endif |
| 20 | |
| 21 | enum trace_type { |
| 22 | __TRACE_FIRST_TYPE = 0, |
| 23 | |
| 24 | TRACE_FN, |
| 25 | TRACE_CTX, |
| 26 | TRACE_WAKE, |
| 27 | TRACE_STACK, |
| 28 | TRACE_PRINT, |
| 29 | TRACE_BPRINT, |
| 30 | TRACE_MMIO_RW, |
| 31 | TRACE_MMIO_MAP, |
| 32 | TRACE_BRANCH, |
| 33 | TRACE_GRAPH_RET, |
| 34 | TRACE_GRAPH_ENT, |
| 35 | TRACE_USER_STACK, |
| 36 | TRACE_BLK, |
| 37 | TRACE_BPUTS, |
| 38 | |
| 39 | __TRACE_LAST_TYPE, |
| 40 | }; |
| 41 | |
| 42 | |
| 43 | #undef __field |
| 44 | #define __field(type, item) type item; |
| 45 | |
| 46 | #undef __field_struct |
| 47 | #define __field_struct(type, item) __field(type, item) |
| 48 | |
| 49 | #undef __field_desc |
| 50 | #define __field_desc(type, container, item) |
| 51 | |
| 52 | #undef __array |
| 53 | #define __array(type, item, size) type item[size]; |
| 54 | |
| 55 | #undef __array_desc |
| 56 | #define __array_desc(type, container, item, size) |
| 57 | |
| 58 | #undef __dynamic_array |
| 59 | #define __dynamic_array(type, item) type item[]; |
| 60 | |
| 61 | #undef F_STRUCT |
| 62 | #define F_STRUCT(args...) args |
| 63 | |
| 64 | #undef FTRACE_ENTRY |
| 65 | #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ |
| 66 | struct struct_name { \ |
| 67 | struct trace_entry ent; \ |
| 68 | tstruct \ |
| 69 | } |
| 70 | |
| 71 | #undef TP_ARGS |
| 72 | #define TP_ARGS(args...) args |
| 73 | |
| 74 | #undef FTRACE_ENTRY_DUP |
| 75 | #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter) |
| 76 | |
| 77 | #undef FTRACE_ENTRY_REG |
| 78 | #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \ |
| 79 | filter, regfn) \ |
| 80 | FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ |
| 81 | filter) |
| 82 | |
| 83 | #include "trace_entries.h" |
| 84 | |
| 85 | /* |
| 86 | * syscalls are special, and need special handling, this is why |
| 87 | * they are not included in trace_entries.h |
| 88 | */ |
| 89 | struct syscall_trace_enter { |
| 90 | struct trace_entry ent; |
| 91 | int nr; |
| 92 | unsigned long args[]; |
| 93 | }; |
| 94 | |
| 95 | struct syscall_trace_exit { |
| 96 | struct trace_entry ent; |
| 97 | int nr; |
| 98 | long ret; |
| 99 | }; |
| 100 | |
| 101 | struct kprobe_trace_entry_head { |
| 102 | struct trace_entry ent; |
| 103 | unsigned long ip; |
| 104 | }; |
| 105 | |
| 106 | struct kretprobe_trace_entry_head { |
| 107 | struct trace_entry ent; |
| 108 | unsigned long func; |
| 109 | unsigned long ret_ip; |
| 110 | }; |
| 111 | |
| 112 | /* |
| 113 | * trace_flag_type is an enumeration that holds different |
| 114 | * states when a trace occurs. These are: |
| 115 | * IRQS_OFF - interrupts were disabled |
| 116 | * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags |
| 117 | * NEED_RESCHED - reschedule is requested |
| 118 | * HARDIRQ - inside an interrupt handler |
| 119 | * SOFTIRQ - inside a softirq handler |
| 120 | */ |
| 121 | enum trace_flag_type { |
| 122 | TRACE_FLAG_IRQS_OFF = 0x01, |
| 123 | TRACE_FLAG_IRQS_NOSUPPORT = 0x02, |
| 124 | TRACE_FLAG_NEED_RESCHED = 0x04, |
| 125 | TRACE_FLAG_HARDIRQ = 0x08, |
| 126 | TRACE_FLAG_SOFTIRQ = 0x10, |
| 127 | }; |
| 128 | |
| 129 | #define TRACE_BUF_SIZE 1024 |
| 130 | |
| 131 | struct trace_array; |
| 132 | |
| 133 | /* |
| 134 | * The CPU trace array - it consists of thousands of trace entries |
| 135 | * plus some other descriptor data: (for example which task started |
| 136 | * the trace, etc.) |
| 137 | */ |
| 138 | struct trace_array_cpu { |
| 139 | atomic_t disabled; |
| 140 | void *buffer_page; /* ring buffer spare */ |
| 141 | |
| 142 | unsigned long entries; |
| 143 | unsigned long saved_latency; |
| 144 | unsigned long critical_start; |
| 145 | unsigned long critical_end; |
| 146 | unsigned long critical_sequence; |
| 147 | unsigned long nice; |
| 148 | unsigned long policy; |
| 149 | unsigned long rt_priority; |
| 150 | unsigned long skipped_entries; |
| 151 | cycle_t preempt_timestamp; |
| 152 | pid_t pid; |
| 153 | kuid_t uid; |
| 154 | char comm[TASK_COMM_LEN]; |
| 155 | }; |
| 156 | |
| 157 | struct tracer; |
| 158 | |
| 159 | struct trace_buffer { |
| 160 | struct trace_array *tr; |
| 161 | struct ring_buffer *buffer; |
| 162 | struct trace_array_cpu __percpu *data; |
| 163 | cycle_t time_start; |
| 164 | int cpu; |
| 165 | }; |
| 166 | |
| 167 | /* |
| 168 | * The trace array - an array of per-CPU trace arrays. This is the |
| 169 | * highest level data structure that individual tracers deal with. |
| 170 | * They have on/off state as well: |
| 171 | */ |
| 172 | struct trace_array { |
| 173 | struct list_head list; |
| 174 | char *name; |
| 175 | struct trace_buffer trace_buffer; |
| 176 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 177 | /* |
| 178 | * The max_buffer is used to snapshot the trace when a maximum |
| 179 | * latency is reached, or when the user initiates a snapshot. |
| 180 | * Some tracers will use this to store a maximum trace while |
| 181 | * it continues examining live traces. |
| 182 | * |
| 183 | * The buffers for the max_buffer are set up the same as the trace_buffer |
| 184 | * When a snapshot is taken, the buffer of the max_buffer is swapped |
| 185 | * with the buffer of the trace_buffer and the buffers are reset for |
| 186 | * the trace_buffer so the tracing can continue. |
| 187 | */ |
| 188 | struct trace_buffer max_buffer; |
| 189 | bool allocated_snapshot; |
| 190 | #endif |
| 191 | int buffer_disabled; |
| 192 | #ifdef CONFIG_FTRACE_SYSCALLS |
| 193 | int sys_refcount_enter; |
| 194 | int sys_refcount_exit; |
| 195 | DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); |
| 196 | DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); |
| 197 | #endif |
| 198 | int stop_count; |
| 199 | int clock_id; |
| 200 | struct tracer *current_trace; |
| 201 | unsigned int flags; |
| 202 | raw_spinlock_t start_lock; |
| 203 | struct dentry *dir; |
| 204 | struct dentry *options; |
| 205 | struct dentry *percpu_dir; |
| 206 | struct dentry *event_dir; |
| 207 | struct list_head systems; |
| 208 | struct list_head events; |
| 209 | cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ |
| 210 | int ref; |
| 211 | }; |
| 212 | |
| 213 | enum { |
| 214 | TRACE_ARRAY_FL_GLOBAL = (1 << 0) |
| 215 | }; |
| 216 | |
| 217 | extern struct list_head ftrace_trace_arrays; |
| 218 | |
| 219 | extern struct mutex trace_types_lock; |
| 220 | |
| 221 | extern int trace_array_get(struct trace_array *tr); |
| 222 | extern void trace_array_put(struct trace_array *tr); |
| 223 | |
| 224 | /* |
| 225 | * The global tracer (top) should be the first trace array added, |
| 226 | * but we check the flag anyway. |
| 227 | */ |
| 228 | static inline struct trace_array *top_trace_array(void) |
| 229 | { |
| 230 | struct trace_array *tr; |
| 231 | |
| 232 | tr = list_entry(ftrace_trace_arrays.prev, |
| 233 | typeof(*tr), list); |
| 234 | WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); |
| 235 | return tr; |
| 236 | } |
| 237 | |
| 238 | #define FTRACE_CMP_TYPE(var, type) \ |
| 239 | __builtin_types_compatible_p(typeof(var), type *) |
| 240 | |
| 241 | #undef IF_ASSIGN |
| 242 | #define IF_ASSIGN(var, entry, etype, id) \ |
| 243 | if (FTRACE_CMP_TYPE(var, etype)) { \ |
| 244 | var = (typeof(var))(entry); \ |
| 245 | WARN_ON(id && (entry)->type != id); \ |
| 246 | break; \ |
| 247 | } |
| 248 | |
| 249 | /* Will cause compile errors if type is not found. */ |
| 250 | extern void __ftrace_bad_type(void); |
| 251 | |
| 252 | /* |
| 253 | * The trace_assign_type is a verifier that the entry type is |
| 254 | * the same as the type being assigned. To add new types simply |
| 255 | * add a line with the following format: |
| 256 | * |
| 257 | * IF_ASSIGN(var, ent, type, id); |
| 258 | * |
| 259 | * Where "type" is the trace type that includes the trace_entry |
| 260 | * as the "ent" item. And "id" is the trace identifier that is |
| 261 | * used in the trace_type enum. |
| 262 | * |
| 263 | * If the type can have more than one id, then use zero. |
| 264 | */ |
| 265 | #define trace_assign_type(var, ent) \ |
| 266 | do { \ |
| 267 | IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ |
| 268 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ |
| 269 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ |
| 270 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ |
| 271 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
| 272 | IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ |
| 273 | IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \ |
| 274 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ |
| 275 | TRACE_MMIO_RW); \ |
| 276 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ |
| 277 | TRACE_MMIO_MAP); \ |
| 278 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ |
| 279 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ |
| 280 | TRACE_GRAPH_ENT); \ |
| 281 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ |
| 282 | TRACE_GRAPH_RET); \ |
| 283 | __ftrace_bad_type(); \ |
| 284 | } while (0) |
| 285 | |
| 286 | /* |
| 287 | * An option specific to a tracer. This is a boolean value. |
| 288 | * The bit is the bit index that sets its value on the |
| 289 | * flags value in struct tracer_flags. |
| 290 | */ |
| 291 | struct tracer_opt { |
| 292 | const char *name; /* Will appear on the trace_options file */ |
| 293 | u32 bit; /* Mask assigned in val field in tracer_flags */ |
| 294 | }; |
| 295 | |
| 296 | /* |
| 297 | * The set of specific options for a tracer. Your tracer |
| 298 | * have to set the initial value of the flags val. |
| 299 | */ |
| 300 | struct tracer_flags { |
| 301 | u32 val; |
| 302 | struct tracer_opt *opts; |
| 303 | }; |
| 304 | |
| 305 | /* Makes more easy to define a tracer opt */ |
| 306 | #define TRACER_OPT(s, b) .name = #s, .bit = b |
| 307 | |
| 308 | |
| 309 | /** |
| 310 | * struct tracer - a specific tracer and its callbacks to interact with debugfs |
| 311 | * @name: the name chosen to select it on the available_tracers file |
| 312 | * @init: called when one switches to this tracer (echo name > current_tracer) |
| 313 | * @reset: called when one switches to another tracer |
| 314 | * @start: called when tracing is unpaused (echo 1 > tracing_enabled) |
| 315 | * @stop: called when tracing is paused (echo 0 > tracing_enabled) |
| 316 | * @open: called when the trace file is opened |
| 317 | * @pipe_open: called when the trace_pipe file is opened |
| 318 | * @wait_pipe: override how the user waits for traces on trace_pipe |
| 319 | * @close: called when the trace file is released |
| 320 | * @pipe_close: called when the trace_pipe file is released |
| 321 | * @read: override the default read callback on trace_pipe |
| 322 | * @splice_read: override the default splice_read callback on trace_pipe |
| 323 | * @selftest: selftest to run on boot (see trace_selftest.c) |
| 324 | * @print_headers: override the first lines that describe your columns |
| 325 | * @print_line: callback that prints a trace |
| 326 | * @set_flag: signals one of your private flags changed (trace_options file) |
| 327 | * @flags: your private flags |
| 328 | */ |
| 329 | struct tracer { |
| 330 | const char *name; |
| 331 | int (*init)(struct trace_array *tr); |
| 332 | void (*reset)(struct trace_array *tr); |
| 333 | void (*start)(struct trace_array *tr); |
| 334 | void (*stop)(struct trace_array *tr); |
| 335 | void (*open)(struct trace_iterator *iter); |
| 336 | void (*pipe_open)(struct trace_iterator *iter); |
| 337 | void (*wait_pipe)(struct trace_iterator *iter); |
| 338 | void (*close)(struct trace_iterator *iter); |
| 339 | void (*pipe_close)(struct trace_iterator *iter); |
| 340 | ssize_t (*read)(struct trace_iterator *iter, |
| 341 | struct file *filp, char __user *ubuf, |
| 342 | size_t cnt, loff_t *ppos); |
| 343 | ssize_t (*splice_read)(struct trace_iterator *iter, |
| 344 | struct file *filp, |
| 345 | loff_t *ppos, |
| 346 | struct pipe_inode_info *pipe, |
| 347 | size_t len, |
| 348 | unsigned int flags); |
| 349 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
| 350 | int (*selftest)(struct tracer *trace, |
| 351 | struct trace_array *tr); |
| 352 | #endif |
| 353 | void (*print_header)(struct seq_file *m); |
| 354 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
| 355 | /* If you handled the flag setting, return 0 */ |
| 356 | int (*set_flag)(u32 old_flags, u32 bit, int set); |
| 357 | /* Return 0 if OK with change, else return non-zero */ |
| 358 | int (*flag_changed)(struct tracer *tracer, |
| 359 | u32 mask, int set); |
| 360 | struct tracer *next; |
| 361 | struct tracer_flags *flags; |
| 362 | bool print_max; |
| 363 | bool enabled; |
| 364 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 365 | bool use_max_tr; |
| 366 | #endif |
| 367 | }; |
| 368 | |
| 369 | |
| 370 | /* Only current can touch trace_recursion */ |
| 371 | |
| 372 | /* |
| 373 | * For function tracing recursion: |
| 374 | * The order of these bits are important. |
| 375 | * |
| 376 | * When function tracing occurs, the following steps are made: |
| 377 | * If arch does not support a ftrace feature: |
| 378 | * call internal function (uses INTERNAL bits) which calls... |
| 379 | * If callback is registered to the "global" list, the list |
| 380 | * function is called and recursion checks the GLOBAL bits. |
| 381 | * then this function calls... |
| 382 | * The function callback, which can use the FTRACE bits to |
| 383 | * check for recursion. |
| 384 | * |
| 385 | * Now if the arch does not suppport a feature, and it calls |
| 386 | * the global list function which calls the ftrace callback |
| 387 | * all three of these steps will do a recursion protection. |
| 388 | * There's no reason to do one if the previous caller already |
| 389 | * did. The recursion that we are protecting against will |
| 390 | * go through the same steps again. |
| 391 | * |
| 392 | * To prevent the multiple recursion checks, if a recursion |
| 393 | * bit is set that is higher than the MAX bit of the current |
| 394 | * check, then we know that the check was made by the previous |
| 395 | * caller, and we can skip the current check. |
| 396 | */ |
| 397 | enum { |
| 398 | TRACE_BUFFER_BIT, |
| 399 | TRACE_BUFFER_NMI_BIT, |
| 400 | TRACE_BUFFER_IRQ_BIT, |
| 401 | TRACE_BUFFER_SIRQ_BIT, |
| 402 | |
| 403 | /* Start of function recursion bits */ |
| 404 | TRACE_FTRACE_BIT, |
| 405 | TRACE_FTRACE_NMI_BIT, |
| 406 | TRACE_FTRACE_IRQ_BIT, |
| 407 | TRACE_FTRACE_SIRQ_BIT, |
| 408 | |
| 409 | /* GLOBAL_BITs must be greater than FTRACE_BITs */ |
| 410 | TRACE_GLOBAL_BIT, |
| 411 | TRACE_GLOBAL_NMI_BIT, |
| 412 | TRACE_GLOBAL_IRQ_BIT, |
| 413 | TRACE_GLOBAL_SIRQ_BIT, |
| 414 | |
| 415 | /* INTERNAL_BITs must be greater than GLOBAL_BITs */ |
| 416 | TRACE_INTERNAL_BIT, |
| 417 | TRACE_INTERNAL_NMI_BIT, |
| 418 | TRACE_INTERNAL_IRQ_BIT, |
| 419 | TRACE_INTERNAL_SIRQ_BIT, |
| 420 | |
| 421 | TRACE_CONTROL_BIT, |
| 422 | |
| 423 | /* |
| 424 | * Abuse of the trace_recursion. |
| 425 | * As we need a way to maintain state if we are tracing the function |
| 426 | * graph in irq because we want to trace a particular function that |
| 427 | * was called in irq context but we have irq tracing off. Since this |
| 428 | * can only be modified by current, we can reuse trace_recursion. |
| 429 | */ |
| 430 | TRACE_IRQ_BIT, |
| 431 | }; |
| 432 | |
| 433 | #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) |
| 434 | #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) |
| 435 | #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) |
| 436 | |
| 437 | #define TRACE_CONTEXT_BITS 4 |
| 438 | |
| 439 | #define TRACE_FTRACE_START TRACE_FTRACE_BIT |
| 440 | #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1) |
| 441 | |
| 442 | #define TRACE_GLOBAL_START TRACE_GLOBAL_BIT |
| 443 | #define TRACE_GLOBAL_MAX ((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1) |
| 444 | |
| 445 | #define TRACE_LIST_START TRACE_INTERNAL_BIT |
| 446 | #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) |
| 447 | |
| 448 | #define TRACE_CONTEXT_MASK TRACE_LIST_MAX |
| 449 | |
| 450 | static __always_inline int trace_get_context_bit(void) |
| 451 | { |
| 452 | int bit; |
| 453 | |
| 454 | if (in_interrupt()) { |
| 455 | if (in_nmi()) |
| 456 | bit = 0; |
| 457 | |
| 458 | else if (in_irq()) |
| 459 | bit = 1; |
| 460 | else |
| 461 | bit = 2; |
| 462 | } else |
| 463 | bit = 3; |
| 464 | |
| 465 | return bit; |
| 466 | } |
| 467 | |
| 468 | static __always_inline int trace_test_and_set_recursion(int start, int max) |
| 469 | { |
| 470 | unsigned int val = current->trace_recursion; |
| 471 | int bit; |
| 472 | |
| 473 | /* A previous recursion check was made */ |
| 474 | if ((val & TRACE_CONTEXT_MASK) > max) |
| 475 | return 0; |
| 476 | |
| 477 | bit = trace_get_context_bit() + start; |
| 478 | if (unlikely(val & (1 << bit))) |
| 479 | return -1; |
| 480 | |
| 481 | val |= 1 << bit; |
| 482 | current->trace_recursion = val; |
| 483 | barrier(); |
| 484 | |
| 485 | return bit; |
| 486 | } |
| 487 | |
| 488 | static __always_inline void trace_clear_recursion(int bit) |
| 489 | { |
| 490 | unsigned int val = current->trace_recursion; |
| 491 | |
| 492 | if (!bit) |
| 493 | return; |
| 494 | |
| 495 | bit = 1 << bit; |
| 496 | val &= ~bit; |
| 497 | |
| 498 | barrier(); |
| 499 | current->trace_recursion = val; |
| 500 | } |
| 501 | |
| 502 | static inline struct ring_buffer_iter * |
| 503 | trace_buffer_iter(struct trace_iterator *iter, int cpu) |
| 504 | { |
| 505 | if (iter->buffer_iter && iter->buffer_iter[cpu]) |
| 506 | return iter->buffer_iter[cpu]; |
| 507 | return NULL; |
| 508 | } |
| 509 | |
| 510 | int tracer_init(struct tracer *t, struct trace_array *tr); |
| 511 | int tracing_is_enabled(void); |
| 512 | void tracing_reset(struct trace_buffer *buf, int cpu); |
| 513 | void tracing_reset_online_cpus(struct trace_buffer *buf); |
| 514 | void tracing_reset_current(int cpu); |
| 515 | void tracing_reset_all_online_cpus(void); |
| 516 | int tracing_open_generic(struct inode *inode, struct file *filp); |
| 517 | struct dentry *trace_create_file(const char *name, |
| 518 | umode_t mode, |
| 519 | struct dentry *parent, |
| 520 | void *data, |
| 521 | const struct file_operations *fops); |
| 522 | |
| 523 | struct dentry *tracing_init_dentry_tr(struct trace_array *tr); |
| 524 | struct dentry *tracing_init_dentry(void); |
| 525 | |
| 526 | struct ring_buffer_event; |
| 527 | |
| 528 | struct ring_buffer_event * |
| 529 | trace_buffer_lock_reserve(struct ring_buffer *buffer, |
| 530 | int type, |
| 531 | unsigned long len, |
| 532 | unsigned long flags, |
| 533 | int pc); |
| 534 | |
| 535 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
| 536 | struct trace_array_cpu *data); |
| 537 | |
| 538 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
| 539 | int *ent_cpu, u64 *ent_ts); |
| 540 | |
| 541 | void __buffer_unlock_commit(struct ring_buffer *buffer, |
| 542 | struct ring_buffer_event *event); |
| 543 | |
| 544 | int trace_empty(struct trace_iterator *iter); |
| 545 | |
| 546 | void *trace_find_next_entry_inc(struct trace_iterator *iter); |
| 547 | |
| 548 | void trace_init_global_iter(struct trace_iterator *iter); |
| 549 | |
| 550 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); |
| 551 | |
| 552 | void poll_wait_pipe(struct trace_iterator *iter); |
| 553 | |
| 554 | void tracing_sched_switch_trace(struct trace_array *tr, |
| 555 | struct task_struct *prev, |
| 556 | struct task_struct *next, |
| 557 | unsigned long flags, int pc); |
| 558 | |
| 559 | void tracing_sched_wakeup_trace(struct trace_array *tr, |
| 560 | struct task_struct *wakee, |
| 561 | struct task_struct *cur, |
| 562 | unsigned long flags, int pc); |
| 563 | void trace_function(struct trace_array *tr, |
| 564 | unsigned long ip, |
| 565 | unsigned long parent_ip, |
| 566 | unsigned long flags, int pc); |
| 567 | void trace_graph_function(struct trace_array *tr, |
| 568 | unsigned long ip, |
| 569 | unsigned long parent_ip, |
| 570 | unsigned long flags, int pc); |
| 571 | void trace_latency_header(struct seq_file *m); |
| 572 | void trace_default_header(struct seq_file *m); |
| 573 | void print_trace_header(struct seq_file *m, struct trace_iterator *iter); |
| 574 | int trace_empty(struct trace_iterator *iter); |
| 575 | |
| 576 | void trace_graph_return(struct ftrace_graph_ret *trace); |
| 577 | int trace_graph_entry(struct ftrace_graph_ent *trace); |
| 578 | void set_graph_array(struct trace_array *tr); |
| 579 | |
| 580 | void tracing_start_cmdline_record(void); |
| 581 | void tracing_stop_cmdline_record(void); |
| 582 | void tracing_sched_switch_assign_trace(struct trace_array *tr); |
| 583 | void tracing_stop_sched_switch_record(void); |
| 584 | void tracing_start_sched_switch_record(void); |
| 585 | int register_tracer(struct tracer *type); |
| 586 | int is_tracing_stopped(void); |
| 587 | |
| 588 | extern cpumask_var_t __read_mostly tracing_buffer_mask; |
| 589 | |
| 590 | #define for_each_tracing_cpu(cpu) \ |
| 591 | for_each_cpu(cpu, tracing_buffer_mask) |
| 592 | |
| 593 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); |
| 594 | |
| 595 | extern unsigned long tracing_thresh; |
| 596 | |
| 597 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 598 | extern unsigned long tracing_max_latency; |
| 599 | |
| 600 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); |
| 601 | void update_max_tr_single(struct trace_array *tr, |
| 602 | struct task_struct *tsk, int cpu); |
| 603 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
| 604 | |
| 605 | #ifdef CONFIG_STACKTRACE |
| 606 | void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, |
| 607 | int skip, int pc); |
| 608 | |
| 609 | void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, |
| 610 | int skip, int pc, struct pt_regs *regs); |
| 611 | |
| 612 | void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, |
| 613 | int pc); |
| 614 | |
| 615 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, |
| 616 | int pc); |
| 617 | #else |
| 618 | static inline void ftrace_trace_stack(struct ring_buffer *buffer, |
| 619 | unsigned long flags, int skip, int pc) |
| 620 | { |
| 621 | } |
| 622 | |
| 623 | static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer, |
| 624 | unsigned long flags, int skip, |
| 625 | int pc, struct pt_regs *regs) |
| 626 | { |
| 627 | } |
| 628 | |
| 629 | static inline void ftrace_trace_userstack(struct ring_buffer *buffer, |
| 630 | unsigned long flags, int pc) |
| 631 | { |
| 632 | } |
| 633 | |
| 634 | static inline void __trace_stack(struct trace_array *tr, unsigned long flags, |
| 635 | int skip, int pc) |
| 636 | { |
| 637 | } |
| 638 | #endif /* CONFIG_STACKTRACE */ |
| 639 | |
| 640 | extern cycle_t ftrace_now(int cpu); |
| 641 | |
| 642 | extern void trace_find_cmdline(int pid, char comm[]); |
| 643 | |
| 644 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 645 | extern unsigned long ftrace_update_tot_cnt; |
| 646 | #endif |
| 647 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
| 648 | extern int DYN_FTRACE_TEST_NAME(void); |
| 649 | #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 |
| 650 | extern int DYN_FTRACE_TEST_NAME2(void); |
| 651 | |
| 652 | extern bool ring_buffer_expanded; |
| 653 | extern bool tracing_selftest_disabled; |
| 654 | DECLARE_PER_CPU(int, ftrace_cpu_disabled); |
| 655 | |
| 656 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
| 657 | extern int trace_selftest_startup_function(struct tracer *trace, |
| 658 | struct trace_array *tr); |
| 659 | extern int trace_selftest_startup_function_graph(struct tracer *trace, |
| 660 | struct trace_array *tr); |
| 661 | extern int trace_selftest_startup_irqsoff(struct tracer *trace, |
| 662 | struct trace_array *tr); |
| 663 | extern int trace_selftest_startup_preemptoff(struct tracer *trace, |
| 664 | struct trace_array *tr); |
| 665 | extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, |
| 666 | struct trace_array *tr); |
| 667 | extern int trace_selftest_startup_wakeup(struct tracer *trace, |
| 668 | struct trace_array *tr); |
| 669 | extern int trace_selftest_startup_nop(struct tracer *trace, |
| 670 | struct trace_array *tr); |
| 671 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, |
| 672 | struct trace_array *tr); |
| 673 | extern int trace_selftest_startup_branch(struct tracer *trace, |
| 674 | struct trace_array *tr); |
| 675 | /* |
| 676 | * Tracer data references selftest functions that only occur |
| 677 | * on boot up. These can be __init functions. Thus, when selftests |
| 678 | * are enabled, then the tracers need to reference __init functions. |
| 679 | */ |
| 680 | #define __tracer_data __refdata |
| 681 | #else |
| 682 | /* Tracers are seldom changed. Optimize when selftests are disabled. */ |
| 683 | #define __tracer_data __read_mostly |
| 684 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
| 685 | |
| 686 | extern void *head_page(struct trace_array_cpu *data); |
| 687 | extern unsigned long long ns2usecs(cycle_t nsec); |
| 688 | extern int |
| 689 | trace_vbprintk(unsigned long ip, const char *fmt, va_list args); |
| 690 | extern int |
| 691 | trace_vprintk(unsigned long ip, const char *fmt, va_list args); |
| 692 | extern int |
| 693 | trace_array_vprintk(struct trace_array *tr, |
| 694 | unsigned long ip, const char *fmt, va_list args); |
| 695 | int trace_array_printk(struct trace_array *tr, |
| 696 | unsigned long ip, const char *fmt, ...); |
| 697 | int trace_array_printk_buf(struct ring_buffer *buffer, |
| 698 | unsigned long ip, const char *fmt, ...); |
| 699 | void trace_printk_seq(struct trace_seq *s); |
| 700 | enum print_line_t print_trace_line(struct trace_iterator *iter); |
| 701 | |
| 702 | extern unsigned long trace_flags; |
| 703 | |
| 704 | /* Standard output formatting function used for function return traces */ |
| 705 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 706 | |
| 707 | /* Flag options */ |
| 708 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 |
| 709 | #define TRACE_GRAPH_PRINT_CPU 0x2 |
| 710 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 |
| 711 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
| 712 | #define TRACE_GRAPH_PRINT_DURATION 0x10 |
| 713 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 |
| 714 | |
| 715 | extern enum print_line_t |
| 716 | print_graph_function_flags(struct trace_iterator *iter, u32 flags); |
| 717 | extern void print_graph_headers_flags(struct seq_file *s, u32 flags); |
| 718 | extern enum print_line_t |
| 719 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); |
| 720 | extern void graph_trace_open(struct trace_iterator *iter); |
| 721 | extern void graph_trace_close(struct trace_iterator *iter); |
| 722 | extern int __trace_graph_entry(struct trace_array *tr, |
| 723 | struct ftrace_graph_ent *trace, |
| 724 | unsigned long flags, int pc); |
| 725 | extern void __trace_graph_return(struct trace_array *tr, |
| 726 | struct ftrace_graph_ret *trace, |
| 727 | unsigned long flags, int pc); |
| 728 | |
| 729 | |
| 730 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 731 | /* TODO: make this variable */ |
| 732 | #define FTRACE_GRAPH_MAX_FUNCS 32 |
| 733 | extern int ftrace_graph_count; |
| 734 | extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; |
| 735 | |
| 736 | static inline int ftrace_graph_addr(unsigned long addr) |
| 737 | { |
| 738 | int i; |
| 739 | |
| 740 | if (!ftrace_graph_count) |
| 741 | return 1; |
| 742 | |
| 743 | for (i = 0; i < ftrace_graph_count; i++) { |
| 744 | if (addr == ftrace_graph_funcs[i]) { |
| 745 | /* |
| 746 | * If no irqs are to be traced, but a set_graph_function |
| 747 | * is set, and called by an interrupt handler, we still |
| 748 | * want to trace it. |
| 749 | */ |
| 750 | if (in_irq()) |
| 751 | trace_recursion_set(TRACE_IRQ_BIT); |
| 752 | else |
| 753 | trace_recursion_clear(TRACE_IRQ_BIT); |
| 754 | return 1; |
| 755 | } |
| 756 | } |
| 757 | |
| 758 | return 0; |
| 759 | } |
| 760 | #else |
| 761 | static inline int ftrace_graph_addr(unsigned long addr) |
| 762 | { |
| 763 | return 1; |
| 764 | } |
| 765 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 766 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 767 | static inline enum print_line_t |
| 768 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) |
| 769 | { |
| 770 | return TRACE_TYPE_UNHANDLED; |
| 771 | } |
| 772 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 773 | |
| 774 | extern struct list_head ftrace_pids; |
| 775 | |
| 776 | #ifdef CONFIG_FUNCTION_TRACER |
| 777 | extern bool ftrace_filter_param __initdata; |
| 778 | static inline int ftrace_trace_task(struct task_struct *task) |
| 779 | { |
| 780 | if (list_empty(&ftrace_pids)) |
| 781 | return 1; |
| 782 | |
| 783 | return test_tsk_trace_trace(task); |
| 784 | } |
| 785 | extern int ftrace_is_dead(void); |
| 786 | #else |
| 787 | static inline int ftrace_trace_task(struct task_struct *task) |
| 788 | { |
| 789 | return 1; |
| 790 | } |
| 791 | static inline int ftrace_is_dead(void) { return 0; } |
| 792 | #endif |
| 793 | |
| 794 | int ftrace_event_is_function(struct ftrace_event_call *call); |
| 795 | |
| 796 | /* |
| 797 | * struct trace_parser - servers for reading the user input separated by spaces |
| 798 | * @cont: set if the input is not complete - no final space char was found |
| 799 | * @buffer: holds the parsed user input |
| 800 | * @idx: user input length |
| 801 | * @size: buffer size |
| 802 | */ |
| 803 | struct trace_parser { |
| 804 | bool cont; |
| 805 | char *buffer; |
| 806 | unsigned idx; |
| 807 | unsigned size; |
| 808 | }; |
| 809 | |
| 810 | static inline bool trace_parser_loaded(struct trace_parser *parser) |
| 811 | { |
| 812 | return (parser->idx != 0); |
| 813 | } |
| 814 | |
| 815 | static inline bool trace_parser_cont(struct trace_parser *parser) |
| 816 | { |
| 817 | return parser->cont; |
| 818 | } |
| 819 | |
| 820 | static inline void trace_parser_clear(struct trace_parser *parser) |
| 821 | { |
| 822 | parser->cont = false; |
| 823 | parser->idx = 0; |
| 824 | } |
| 825 | |
| 826 | extern int trace_parser_get_init(struct trace_parser *parser, int size); |
| 827 | extern void trace_parser_put(struct trace_parser *parser); |
| 828 | extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, |
| 829 | size_t cnt, loff_t *ppos); |
| 830 | |
| 831 | /* |
| 832 | * trace_iterator_flags is an enumeration that defines bit |
| 833 | * positions into trace_flags that controls the output. |
| 834 | * |
| 835 | * NOTE: These bits must match the trace_options array in |
| 836 | * trace.c. |
| 837 | */ |
| 838 | enum trace_iterator_flags { |
| 839 | TRACE_ITER_PRINT_PARENT = 0x01, |
| 840 | TRACE_ITER_SYM_OFFSET = 0x02, |
| 841 | TRACE_ITER_SYM_ADDR = 0x04, |
| 842 | TRACE_ITER_VERBOSE = 0x08, |
| 843 | TRACE_ITER_RAW = 0x10, |
| 844 | TRACE_ITER_HEX = 0x20, |
| 845 | TRACE_ITER_BIN = 0x40, |
| 846 | TRACE_ITER_BLOCK = 0x80, |
| 847 | TRACE_ITER_STACKTRACE = 0x100, |
| 848 | TRACE_ITER_PRINTK = 0x200, |
| 849 | TRACE_ITER_PREEMPTONLY = 0x400, |
| 850 | TRACE_ITER_BRANCH = 0x800, |
| 851 | TRACE_ITER_ANNOTATE = 0x1000, |
| 852 | TRACE_ITER_USERSTACKTRACE = 0x2000, |
| 853 | TRACE_ITER_SYM_USEROBJ = 0x4000, |
| 854 | TRACE_ITER_PRINTK_MSGONLY = 0x8000, |
| 855 | TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */ |
| 856 | TRACE_ITER_LATENCY_FMT = 0x20000, |
| 857 | TRACE_ITER_SLEEP_TIME = 0x40000, |
| 858 | TRACE_ITER_GRAPH_TIME = 0x80000, |
| 859 | TRACE_ITER_RECORD_CMD = 0x100000, |
| 860 | TRACE_ITER_OVERWRITE = 0x200000, |
| 861 | TRACE_ITER_STOP_ON_FREE = 0x400000, |
| 862 | TRACE_ITER_IRQ_INFO = 0x800000, |
| 863 | TRACE_ITER_MARKERS = 0x1000000, |
| 864 | TRACE_ITER_FUNCTION = 0x2000000, |
| 865 | }; |
| 866 | |
| 867 | /* |
| 868 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that |
| 869 | * control the output of kernel symbols. |
| 870 | */ |
| 871 | #define TRACE_ITER_SYM_MASK \ |
| 872 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) |
| 873 | |
| 874 | extern struct tracer nop_trace; |
| 875 | |
| 876 | #ifdef CONFIG_BRANCH_TRACER |
| 877 | extern int enable_branch_tracing(struct trace_array *tr); |
| 878 | extern void disable_branch_tracing(void); |
| 879 | static inline int trace_branch_enable(struct trace_array *tr) |
| 880 | { |
| 881 | if (trace_flags & TRACE_ITER_BRANCH) |
| 882 | return enable_branch_tracing(tr); |
| 883 | return 0; |
| 884 | } |
| 885 | static inline void trace_branch_disable(void) |
| 886 | { |
| 887 | /* due to races, always disable */ |
| 888 | disable_branch_tracing(); |
| 889 | } |
| 890 | #else |
| 891 | static inline int trace_branch_enable(struct trace_array *tr) |
| 892 | { |
| 893 | return 0; |
| 894 | } |
| 895 | static inline void trace_branch_disable(void) |
| 896 | { |
| 897 | } |
| 898 | #endif /* CONFIG_BRANCH_TRACER */ |
| 899 | |
| 900 | /* set ring buffers to default size if not already done so */ |
| 901 | int tracing_update_buffers(void); |
| 902 | |
| 903 | struct ftrace_event_field { |
| 904 | struct list_head link; |
| 905 | const char *name; |
| 906 | const char *type; |
| 907 | int filter_type; |
| 908 | int offset; |
| 909 | int size; |
| 910 | int is_signed; |
| 911 | }; |
| 912 | |
| 913 | struct event_filter { |
| 914 | int n_preds; /* Number assigned */ |
| 915 | int a_preds; /* allocated */ |
| 916 | struct filter_pred *preds; |
| 917 | struct filter_pred *root; |
| 918 | char *filter_string; |
| 919 | }; |
| 920 | |
| 921 | struct event_subsystem { |
| 922 | struct list_head list; |
| 923 | const char *name; |
| 924 | struct event_filter *filter; |
| 925 | int ref_count; |
| 926 | }; |
| 927 | |
| 928 | struct ftrace_subsystem_dir { |
| 929 | struct list_head list; |
| 930 | struct event_subsystem *subsystem; |
| 931 | struct trace_array *tr; |
| 932 | struct dentry *entry; |
| 933 | int ref_count; |
| 934 | int nr_events; |
| 935 | }; |
| 936 | |
| 937 | #define FILTER_PRED_INVALID ((unsigned short)-1) |
| 938 | #define FILTER_PRED_IS_RIGHT (1 << 15) |
| 939 | #define FILTER_PRED_FOLD (1 << 15) |
| 940 | |
| 941 | /* |
| 942 | * The max preds is the size of unsigned short with |
| 943 | * two flags at the MSBs. One bit is used for both the IS_RIGHT |
| 944 | * and FOLD flags. The other is reserved. |
| 945 | * |
| 946 | * 2^14 preds is way more than enough. |
| 947 | */ |
| 948 | #define MAX_FILTER_PRED 16384 |
| 949 | |
| 950 | struct filter_pred; |
| 951 | struct regex; |
| 952 | |
| 953 | typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event); |
| 954 | |
| 955 | typedef int (*regex_match_func)(char *str, struct regex *r, int len); |
| 956 | |
| 957 | enum regex_type { |
| 958 | MATCH_FULL = 0, |
| 959 | MATCH_FRONT_ONLY, |
| 960 | MATCH_MIDDLE_ONLY, |
| 961 | MATCH_END_ONLY, |
| 962 | }; |
| 963 | |
| 964 | struct regex { |
| 965 | char pattern[MAX_FILTER_STR_VAL]; |
| 966 | int len; |
| 967 | int field_len; |
| 968 | regex_match_func match; |
| 969 | }; |
| 970 | |
| 971 | struct filter_pred { |
| 972 | filter_pred_fn_t fn; |
| 973 | u64 val; |
| 974 | struct regex regex; |
| 975 | unsigned short *ops; |
| 976 | struct ftrace_event_field *field; |
| 977 | int offset; |
| 978 | int not; |
| 979 | int op; |
| 980 | unsigned short index; |
| 981 | unsigned short parent; |
| 982 | unsigned short left; |
| 983 | unsigned short right; |
| 984 | }; |
| 985 | |
| 986 | extern enum regex_type |
| 987 | filter_parse_regex(char *buff, int len, char **search, int *not); |
| 988 | extern void print_event_filter(struct ftrace_event_call *call, |
| 989 | struct trace_seq *s); |
| 990 | extern int apply_event_filter(struct ftrace_event_call *call, |
| 991 | char *filter_string); |
| 992 | extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, |
| 993 | char *filter_string); |
| 994 | extern void print_subsystem_event_filter(struct event_subsystem *system, |
| 995 | struct trace_seq *s); |
| 996 | extern int filter_assign_type(const char *type); |
| 997 | |
| 998 | struct ftrace_event_field * |
| 999 | trace_find_event_field(struct ftrace_event_call *call, char *name); |
| 1000 | |
| 1001 | static inline int |
| 1002 | filter_check_discard(struct ftrace_event_call *call, void *rec, |
| 1003 | struct ring_buffer *buffer, |
| 1004 | struct ring_buffer_event *event) |
| 1005 | { |
| 1006 | if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && |
| 1007 | !filter_match_preds(call->filter, rec)) { |
| 1008 | ring_buffer_discard_commit(buffer, event); |
| 1009 | return 1; |
| 1010 | } |
| 1011 | |
| 1012 | return 0; |
| 1013 | } |
| 1014 | |
| 1015 | extern void trace_event_enable_cmd_record(bool enable); |
| 1016 | extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); |
| 1017 | extern int event_trace_del_tracer(struct trace_array *tr); |
| 1018 | |
| 1019 | extern struct mutex event_mutex; |
| 1020 | extern struct list_head ftrace_events; |
| 1021 | |
| 1022 | extern const char *__start___trace_bprintk_fmt[]; |
| 1023 | extern const char *__stop___trace_bprintk_fmt[]; |
| 1024 | |
| 1025 | extern const char *__start___tracepoint_str[]; |
| 1026 | extern const char *__stop___tracepoint_str[]; |
| 1027 | |
| 1028 | void trace_printk_init_buffers(void); |
| 1029 | void trace_printk_start_comm(void); |
| 1030 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); |
| 1031 | int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); |
| 1032 | |
| 1033 | /* |
| 1034 | * Normal trace_printk() and friends allocates special buffers |
| 1035 | * to do the manipulation, as well as saves the print formats |
| 1036 | * into sections to display. But the trace infrastructure wants |
| 1037 | * to use these without the added overhead at the price of being |
| 1038 | * a bit slower (used mainly for warnings, where we don't care |
| 1039 | * about performance). The internal_trace_puts() is for such |
| 1040 | * a purpose. |
| 1041 | */ |
| 1042 | #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str)) |
| 1043 | |
| 1044 | #undef FTRACE_ENTRY |
| 1045 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ |
| 1046 | extern struct ftrace_event_call \ |
| 1047 | __attribute__((__aligned__(4))) event_##call; |
| 1048 | #undef FTRACE_ENTRY_DUP |
| 1049 | #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ |
| 1050 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ |
| 1051 | filter) |
| 1052 | #include "trace_entries.h" |
| 1053 | |
| 1054 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) |
| 1055 | int perf_ftrace_event_register(struct ftrace_event_call *call, |
| 1056 | enum trace_reg type, void *data); |
| 1057 | #else |
| 1058 | #define perf_ftrace_event_register NULL |
| 1059 | #endif |
| 1060 | |
| 1061 | #endif /* _LINUX_KERNEL_TRACE_H */ |