| 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | #ifndef _LINUX_TRACEPOINT_H |
| 3 | #define _LINUX_TRACEPOINT_H |
| 4 | |
| 5 | /* |
| 6 | * Kernel Tracepoint API. |
| 7 | * |
| 8 | * See Documentation/trace/tracepoints.rst. |
| 9 | * |
| 10 | * Copyright (C) 2008-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
| 11 | * |
| 12 | * Heavily inspired from the Linux Kernel Markers. |
| 13 | */ |
| 14 | |
| 15 | #include <linux/smp.h> |
| 16 | #include <linux/srcu.h> |
| 17 | #include <linux/errno.h> |
| 18 | #include <linux/types.h> |
| 19 | #include <linux/rcupdate.h> |
| 20 | #include <linux/rcupdate_trace.h> |
| 21 | #include <linux/tracepoint-defs.h> |
| 22 | #include <linux/static_call.h> |
| 23 | |
| 24 | struct module; |
| 25 | struct tracepoint; |
| 26 | struct notifier_block; |
| 27 | |
| 28 | struct trace_eval_map { |
| 29 | const char *system; |
| 30 | const char *eval_string; |
| 31 | unsigned long eval_value; |
| 32 | }; |
| 33 | |
| 34 | #define TRACEPOINT_DEFAULT_PRIO 10 |
| 35 | |
| 36 | extern int |
| 37 | tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data); |
| 38 | extern int |
| 39 | tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data, |
| 40 | int prio); |
| 41 | extern int |
| 42 | tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe, void *data, |
| 43 | int prio); |
| 44 | extern int |
| 45 | tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data); |
| 46 | static inline int |
| 47 | tracepoint_probe_register_may_exist(struct tracepoint *tp, void *probe, |
| 48 | void *data) |
| 49 | { |
| 50 | return tracepoint_probe_register_prio_may_exist(tp, probe, data, |
| 51 | TRACEPOINT_DEFAULT_PRIO); |
| 52 | } |
| 53 | extern void |
| 54 | for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), |
| 55 | void *priv); |
| 56 | |
| 57 | #ifdef CONFIG_MODULES |
| 58 | struct tp_module { |
| 59 | struct list_head list; |
| 60 | struct module *mod; |
| 61 | }; |
| 62 | |
| 63 | bool trace_module_has_bad_taint(struct module *mod); |
| 64 | extern int register_tracepoint_module_notifier(struct notifier_block *nb); |
| 65 | extern int unregister_tracepoint_module_notifier(struct notifier_block *nb); |
| 66 | void for_each_module_tracepoint(void (*fct)(struct tracepoint *, |
| 67 | struct module *, void *), |
| 68 | void *priv); |
| 69 | void for_each_tracepoint_in_module(struct module *, |
| 70 | void (*fct)(struct tracepoint *, |
| 71 | struct module *, void *), |
| 72 | void *priv); |
| 73 | #else |
| 74 | static inline bool trace_module_has_bad_taint(struct module *mod) |
| 75 | { |
| 76 | return false; |
| 77 | } |
| 78 | static inline |
| 79 | int register_tracepoint_module_notifier(struct notifier_block *nb) |
| 80 | { |
| 81 | return 0; |
| 82 | } |
| 83 | static inline |
| 84 | int unregister_tracepoint_module_notifier(struct notifier_block *nb) |
| 85 | { |
| 86 | return 0; |
| 87 | } |
| 88 | static inline |
| 89 | void for_each_module_tracepoint(void (*fct)(struct tracepoint *, |
| 90 | struct module *, void *), |
| 91 | void *priv) |
| 92 | { |
| 93 | } |
| 94 | static inline |
| 95 | void for_each_tracepoint_in_module(struct module *mod, |
| 96 | void (*fct)(struct tracepoint *, |
| 97 | struct module *, void *), |
| 98 | void *priv) |
| 99 | { |
| 100 | } |
| 101 | #endif /* CONFIG_MODULES */ |
| 102 | |
| 103 | /* |
| 104 | * tracepoint_synchronize_unregister must be called between the last tracepoint |
| 105 | * probe unregistration and the end of module exit to make sure there is no |
| 106 | * caller executing a probe when it is freed. |
| 107 | * |
| 108 | * An alternative is to use the following for batch reclaim associated |
| 109 | * with a given tracepoint: |
| 110 | * |
| 111 | * - tracepoint_is_faultable() == false: call_rcu() |
| 112 | * - tracepoint_is_faultable() == true: call_rcu_tasks_trace() |
| 113 | */ |
| 114 | #ifdef CONFIG_TRACEPOINTS |
| 115 | static inline void tracepoint_synchronize_unregister(void) |
| 116 | { |
| 117 | synchronize_rcu_tasks_trace(); |
| 118 | synchronize_rcu(); |
| 119 | } |
| 120 | static inline bool tracepoint_is_faultable(struct tracepoint *tp) |
| 121 | { |
| 122 | return tp->ext && tp->ext->faultable; |
| 123 | } |
| 124 | #else |
| 125 | static inline void tracepoint_synchronize_unregister(void) |
| 126 | { } |
| 127 | static inline bool tracepoint_is_faultable(struct tracepoint *tp) |
| 128 | { |
| 129 | return false; |
| 130 | } |
| 131 | #endif |
| 132 | |
| 133 | #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS |
| 134 | extern int syscall_regfunc(void); |
| 135 | extern void syscall_unregfunc(void); |
| 136 | #endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */ |
| 137 | |
| 138 | #ifndef PARAMS |
| 139 | #define PARAMS(args...) args |
| 140 | #endif |
| 141 | |
| 142 | #define TRACE_DEFINE_ENUM(x) |
| 143 | #define TRACE_DEFINE_SIZEOF(x) |
| 144 | |
| 145 | #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS |
| 146 | static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) |
| 147 | { |
| 148 | return offset_to_ptr(p); |
| 149 | } |
| 150 | |
| 151 | #define __TRACEPOINT_ENTRY(name) \ |
| 152 | asm(" .section \"__tracepoints_ptrs\", \"a\" \n" \ |
| 153 | " .balign 4 \n" \ |
| 154 | " .long __tracepoint_" #name " - . \n" \ |
| 155 | " .previous \n") |
| 156 | #else |
| 157 | static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) |
| 158 | { |
| 159 | return *p; |
| 160 | } |
| 161 | |
| 162 | #define __TRACEPOINT_ENTRY(name) \ |
| 163 | static tracepoint_ptr_t __tracepoint_ptr_##name __used \ |
| 164 | __section("__tracepoints_ptrs") = &__tracepoint_##name |
| 165 | #endif |
| 166 | |
| 167 | #endif /* _LINUX_TRACEPOINT_H */ |
| 168 | |
| 169 | /* |
| 170 | * Note: we keep the TRACE_EVENT and DECLARE_TRACE outside the include |
| 171 | * file ifdef protection. |
| 172 | * This is due to the way trace events work. If a file includes two |
| 173 | * trace event headers under one "CREATE_TRACE_POINTS" the first include |
| 174 | * will override the TRACE_EVENT and break the second include. |
| 175 | */ |
| 176 | |
| 177 | #ifndef DECLARE_TRACE |
| 178 | |
| 179 | #define TP_PROTO(args...) args |
| 180 | #define TP_ARGS(args...) args |
| 181 | #define TP_CONDITION(args...) args |
| 182 | |
| 183 | /* |
| 184 | * Individual subsystem my have a separate configuration to |
| 185 | * enable their tracepoints. By default, this file will create |
| 186 | * the tracepoints if CONFIG_TRACEPOINTS is defined. If a subsystem |
| 187 | * wants to be able to disable its tracepoints from being created |
| 188 | * it can define NOTRACE before including the tracepoint headers. |
| 189 | */ |
| 190 | #if defined(CONFIG_TRACEPOINTS) && !defined(NOTRACE) |
| 191 | #define TRACEPOINTS_ENABLED |
| 192 | #endif |
| 193 | |
| 194 | #ifdef TRACEPOINTS_ENABLED |
| 195 | |
| 196 | #ifdef CONFIG_HAVE_STATIC_CALL |
| 197 | #define __DO_TRACE_CALL(name, args) \ |
| 198 | do { \ |
| 199 | struct tracepoint_func *it_func_ptr; \ |
| 200 | void *__data; \ |
| 201 | it_func_ptr = \ |
| 202 | rcu_dereference_raw((&__tracepoint_##name)->funcs); \ |
| 203 | if (it_func_ptr) { \ |
| 204 | __data = (it_func_ptr)->data; \ |
| 205 | static_call(tp_func_##name)(__data, args); \ |
| 206 | } \ |
| 207 | } while (0) |
| 208 | #else |
| 209 | #define __DO_TRACE_CALL(name, args) __traceiter_##name(NULL, args) |
| 210 | #endif /* CONFIG_HAVE_STATIC_CALL */ |
| 211 | |
| 212 | /* |
| 213 | * Declare an exported function that Rust code can call to trigger this |
| 214 | * tracepoint. This function does not include the static branch; that is done |
| 215 | * in Rust to avoid a function call when the tracepoint is disabled. |
| 216 | */ |
| 217 | #define DEFINE_RUST_DO_TRACE(name, proto, args) |
| 218 | #define __DEFINE_RUST_DO_TRACE(name, proto, args) \ |
| 219 | notrace void rust_do_trace_##name(proto) \ |
| 220 | { \ |
| 221 | __do_trace_##name(args); \ |
| 222 | } |
| 223 | |
| 224 | /* |
| 225 | * Make sure the alignment of the structure in the __tracepoints section will |
| 226 | * not add unwanted padding between the beginning of the section and the |
| 227 | * structure. Force alignment to the same alignment as the section start. |
| 228 | * |
| 229 | * When lockdep is enabled, we make sure to always test if RCU is |
| 230 | * "watching" regardless if the tracepoint is enabled or not. Tracepoints |
| 231 | * require RCU to be active, and it should always warn at the tracepoint |
| 232 | * site if it is not watching, as it will need to be active when the |
| 233 | * tracepoint is enabled. |
| 234 | */ |
| 235 | #define __DECLARE_TRACE_COMMON(name, proto, args, data_proto) \ |
| 236 | extern int __traceiter_##name(data_proto); \ |
| 237 | DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \ |
| 238 | extern struct tracepoint __tracepoint_##name; \ |
| 239 | extern void rust_do_trace_##name(proto); \ |
| 240 | static inline int \ |
| 241 | register_trace_##name(void (*probe)(data_proto), void *data) \ |
| 242 | { \ |
| 243 | return tracepoint_probe_register(&__tracepoint_##name, \ |
| 244 | (void *)probe, data); \ |
| 245 | } \ |
| 246 | static inline int \ |
| 247 | register_trace_prio_##name(void (*probe)(data_proto), void *data,\ |
| 248 | int prio) \ |
| 249 | { \ |
| 250 | return tracepoint_probe_register_prio(&__tracepoint_##name, \ |
| 251 | (void *)probe, data, prio); \ |
| 252 | } \ |
| 253 | static inline int \ |
| 254 | unregister_trace_##name(void (*probe)(data_proto), void *data) \ |
| 255 | { \ |
| 256 | return tracepoint_probe_unregister(&__tracepoint_##name,\ |
| 257 | (void *)probe, data); \ |
| 258 | } \ |
| 259 | static inline void \ |
| 260 | check_trace_callback_type_##name(void (*cb)(data_proto)) \ |
| 261 | { \ |
| 262 | } \ |
| 263 | static inline bool \ |
| 264 | trace_##name##_enabled(void) \ |
| 265 | { \ |
| 266 | return static_branch_unlikely(&__tracepoint_##name.key);\ |
| 267 | } |
| 268 | |
| 269 | #define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ |
| 270 | __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \ |
| 271 | static inline void __do_trace_##name(proto) \ |
| 272 | { \ |
| 273 | if (cond) { \ |
| 274 | guard(preempt_notrace)(); \ |
| 275 | __DO_TRACE_CALL(name, TP_ARGS(args)); \ |
| 276 | } \ |
| 277 | } \ |
| 278 | static inline void trace_##name(proto) \ |
| 279 | { \ |
| 280 | if (static_branch_unlikely(&__tracepoint_##name.key)) \ |
| 281 | __do_trace_##name(args); \ |
| 282 | if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ |
| 283 | WARN_ONCE(!rcu_is_watching(), \ |
| 284 | "RCU not watching for tracepoint"); \ |
| 285 | } \ |
| 286 | } |
| 287 | |
| 288 | #define __DECLARE_TRACE_SYSCALL(name, proto, args, data_proto) \ |
| 289 | __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \ |
| 290 | static inline void __do_trace_##name(proto) \ |
| 291 | { \ |
| 292 | guard(rcu_tasks_trace)(); \ |
| 293 | __DO_TRACE_CALL(name, TP_ARGS(args)); \ |
| 294 | } \ |
| 295 | static inline void trace_##name(proto) \ |
| 296 | { \ |
| 297 | might_fault(); \ |
| 298 | if (static_branch_unlikely(&__tracepoint_##name.key)) \ |
| 299 | __do_trace_##name(args); \ |
| 300 | if (IS_ENABLED(CONFIG_LOCKDEP)) { \ |
| 301 | WARN_ONCE(!rcu_is_watching(), \ |
| 302 | "RCU not watching for tracepoint"); \ |
| 303 | } \ |
| 304 | } |
| 305 | |
| 306 | /* |
| 307 | * We have no guarantee that gcc and the linker won't up-align the tracepoint |
| 308 | * structures, so we create an array of pointers that will be used for iteration |
| 309 | * on the tracepoints. |
| 310 | * |
| 311 | * it_func[0] is never NULL because there is at least one element in the array |
| 312 | * when the array itself is non NULL. |
| 313 | */ |
| 314 | #define __DEFINE_TRACE_EXT(_name, _ext, proto, args) \ |
| 315 | static const char __tpstrtab_##_name[] \ |
| 316 | __section("__tracepoints_strings") = #_name; \ |
| 317 | extern struct static_call_key STATIC_CALL_KEY(tp_func_##_name); \ |
| 318 | int __traceiter_##_name(void *__data, proto); \ |
| 319 | void __probestub_##_name(void *__data, proto); \ |
| 320 | struct tracepoint __tracepoint_##_name __used \ |
| 321 | __section("__tracepoints") = { \ |
| 322 | .name = __tpstrtab_##_name, \ |
| 323 | .key = STATIC_KEY_FALSE_INIT, \ |
| 324 | .static_call_key = &STATIC_CALL_KEY(tp_func_##_name), \ |
| 325 | .static_call_tramp = STATIC_CALL_TRAMP_ADDR(tp_func_##_name), \ |
| 326 | .iterator = &__traceiter_##_name, \ |
| 327 | .probestub = &__probestub_##_name, \ |
| 328 | .funcs = NULL, \ |
| 329 | .ext = _ext, \ |
| 330 | }; \ |
| 331 | __TRACEPOINT_ENTRY(_name); \ |
| 332 | int __traceiter_##_name(void *__data, proto) \ |
| 333 | { \ |
| 334 | struct tracepoint_func *it_func_ptr; \ |
| 335 | void *it_func; \ |
| 336 | \ |
| 337 | it_func_ptr = \ |
| 338 | rcu_dereference_raw((&__tracepoint_##_name)->funcs); \ |
| 339 | if (it_func_ptr) { \ |
| 340 | do { \ |
| 341 | it_func = READ_ONCE((it_func_ptr)->func); \ |
| 342 | __data = (it_func_ptr)->data; \ |
| 343 | ((void(*)(void *, proto))(it_func))(__data, args); \ |
| 344 | } while ((++it_func_ptr)->func); \ |
| 345 | } \ |
| 346 | return 0; \ |
| 347 | } \ |
| 348 | void __probestub_##_name(void *__data, proto) \ |
| 349 | { \ |
| 350 | } \ |
| 351 | DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name); \ |
| 352 | DEFINE_RUST_DO_TRACE(_name, TP_PROTO(proto), TP_ARGS(args)) |
| 353 | |
| 354 | #define DEFINE_TRACE_FN(_name, _reg, _unreg, _proto, _args) \ |
| 355 | static struct tracepoint_ext __tracepoint_ext_##_name = { \ |
| 356 | .regfunc = _reg, \ |
| 357 | .unregfunc = _unreg, \ |
| 358 | .faultable = false, \ |
| 359 | }; \ |
| 360 | __DEFINE_TRACE_EXT(_name, &__tracepoint_ext_##_name, PARAMS(_proto), PARAMS(_args)); |
| 361 | |
| 362 | #define DEFINE_TRACE_SYSCALL(_name, _reg, _unreg, _proto, _args) \ |
| 363 | static struct tracepoint_ext __tracepoint_ext_##_name = { \ |
| 364 | .regfunc = _reg, \ |
| 365 | .unregfunc = _unreg, \ |
| 366 | .faultable = true, \ |
| 367 | }; \ |
| 368 | __DEFINE_TRACE_EXT(_name, &__tracepoint_ext_##_name, PARAMS(_proto), PARAMS(_args)); |
| 369 | |
| 370 | #define DEFINE_TRACE(_name, _proto, _args) \ |
| 371 | __DEFINE_TRACE_EXT(_name, NULL, PARAMS(_proto), PARAMS(_args)); |
| 372 | |
| 373 | #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \ |
| 374 | EXPORT_SYMBOL_GPL(__tracepoint_##name); \ |
| 375 | EXPORT_SYMBOL_GPL(__traceiter_##name); \ |
| 376 | EXPORT_STATIC_CALL_GPL(tp_func_##name) |
| 377 | #define EXPORT_TRACEPOINT_SYMBOL(name) \ |
| 378 | EXPORT_SYMBOL(__tracepoint_##name); \ |
| 379 | EXPORT_SYMBOL(__traceiter_##name); \ |
| 380 | EXPORT_STATIC_CALL(tp_func_##name) |
| 381 | |
| 382 | |
| 383 | #else /* !TRACEPOINTS_ENABLED */ |
| 384 | #define __DECLARE_TRACE_COMMON(name, proto, args, data_proto) \ |
| 385 | static inline void trace_##name(proto) \ |
| 386 | { } \ |
| 387 | static inline int \ |
| 388 | register_trace_##name(void (*probe)(data_proto), \ |
| 389 | void *data) \ |
| 390 | { \ |
| 391 | return -ENOSYS; \ |
| 392 | } \ |
| 393 | static inline int \ |
| 394 | unregister_trace_##name(void (*probe)(data_proto), \ |
| 395 | void *data) \ |
| 396 | { \ |
| 397 | return -ENOSYS; \ |
| 398 | } \ |
| 399 | static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \ |
| 400 | { \ |
| 401 | } \ |
| 402 | static inline bool \ |
| 403 | trace_##name##_enabled(void) \ |
| 404 | { \ |
| 405 | return false; \ |
| 406 | } |
| 407 | |
| 408 | #define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ |
| 409 | __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) |
| 410 | |
| 411 | #define __DECLARE_TRACE_SYSCALL(name, proto, args, data_proto) \ |
| 412 | __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) |
| 413 | |
| 414 | #define DEFINE_TRACE_FN(name, reg, unreg, proto, args) |
| 415 | #define DEFINE_TRACE_SYSCALL(name, reg, unreg, proto, args) |
| 416 | #define DEFINE_TRACE(name, proto, args) |
| 417 | #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) |
| 418 | #define EXPORT_TRACEPOINT_SYMBOL(name) |
| 419 | |
| 420 | #endif /* TRACEPOINTS_ENABLED */ |
| 421 | |
| 422 | #ifdef CONFIG_TRACING |
| 423 | /** |
| 424 | * tracepoint_string - register constant persistent string to trace system |
| 425 | * @str - a constant persistent string that will be referenced in tracepoints |
| 426 | * |
| 427 | * If constant strings are being used in tracepoints, it is faster and |
| 428 | * more efficient to just save the pointer to the string and reference |
| 429 | * that with a printf "%s" instead of saving the string in the ring buffer |
| 430 | * and wasting space and time. |
| 431 | * |
| 432 | * The problem with the above approach is that userspace tools that read |
| 433 | * the binary output of the trace buffers do not have access to the string. |
| 434 | * Instead they just show the address of the string which is not very |
| 435 | * useful to users. |
| 436 | * |
| 437 | * With tracepoint_string(), the string will be registered to the tracing |
| 438 | * system and exported to userspace via the debugfs/tracing/printk_formats |
| 439 | * file that maps the string address to the string text. This way userspace |
| 440 | * tools that read the binary buffers have a way to map the pointers to |
| 441 | * the ASCII strings they represent. |
| 442 | * |
| 443 | * The @str used must be a constant string and persistent as it would not |
| 444 | * make sense to show a string that no longer exists. But it is still fine |
| 445 | * to be used with modules, because when modules are unloaded, if they |
| 446 | * had tracepoints, the ring buffers are cleared too. As long as the string |
| 447 | * does not change during the life of the module, it is fine to use |
| 448 | * tracepoint_string() within a module. |
| 449 | */ |
| 450 | #define tracepoint_string(str) \ |
| 451 | ({ \ |
| 452 | static const char *___tp_str __tracepoint_string = str; \ |
| 453 | ___tp_str; \ |
| 454 | }) |
| 455 | #define __tracepoint_string __used __section("__tracepoint_str") |
| 456 | #else |
| 457 | /* |
| 458 | * tracepoint_string() is used to save the string address for userspace |
| 459 | * tracing tools. When tracing isn't configured, there's no need to save |
| 460 | * anything. |
| 461 | */ |
| 462 | # define tracepoint_string(str) str |
| 463 | # define __tracepoint_string |
| 464 | #endif |
| 465 | |
| 466 | #define DECLARE_TRACE(name, proto, args) \ |
| 467 | __DECLARE_TRACE(name##_tp, PARAMS(proto), PARAMS(args), \ |
| 468 | cpu_online(raw_smp_processor_id()), \ |
| 469 | PARAMS(void *__data, proto)) |
| 470 | |
| 471 | #define DECLARE_TRACE_CONDITION(name, proto, args, cond) \ |
| 472 | __DECLARE_TRACE(name##_tp, PARAMS(proto), PARAMS(args), \ |
| 473 | cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \ |
| 474 | PARAMS(void *__data, proto)) |
| 475 | |
| 476 | #define DECLARE_TRACE_SYSCALL(name, proto, args) \ |
| 477 | __DECLARE_TRACE_SYSCALL(name##_tp, PARAMS(proto), PARAMS(args), \ |
| 478 | PARAMS(void *__data, proto)) |
| 479 | |
| 480 | #define DECLARE_TRACE_EVENT(name, proto, args) \ |
| 481 | __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ |
| 482 | cpu_online(raw_smp_processor_id()), \ |
| 483 | PARAMS(void *__data, proto)) |
| 484 | |
| 485 | #define DECLARE_TRACE_EVENT_CONDITION(name, proto, args, cond) \ |
| 486 | __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ |
| 487 | cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \ |
| 488 | PARAMS(void *__data, proto)) |
| 489 | |
| 490 | #define DECLARE_TRACE_EVENT_SYSCALL(name, proto, args) \ |
| 491 | __DECLARE_TRACE_SYSCALL(name, PARAMS(proto), PARAMS(args), \ |
| 492 | PARAMS(void *__data, proto)) |
| 493 | |
| 494 | #define TRACE_EVENT_FLAGS(event, flag) |
| 495 | |
| 496 | #define TRACE_EVENT_PERF_PERM(event, expr...) |
| 497 | |
| 498 | #endif /* DECLARE_TRACE */ |
| 499 | |
| 500 | #ifndef TRACE_EVENT |
| 501 | /* |
| 502 | * For use with the TRACE_EVENT macro: |
| 503 | * |
| 504 | * We define a tracepoint, its arguments, its printk format |
| 505 | * and its 'fast binary record' layout. |
| 506 | * |
| 507 | * Firstly, name your tracepoint via TRACE_EVENT(name : the |
| 508 | * 'subsystem_event' notation is fine. |
| 509 | * |
| 510 | * Think about this whole construct as the |
| 511 | * 'trace_sched_switch() function' from now on. |
| 512 | * |
| 513 | * |
| 514 | * TRACE_EVENT(sched_switch, |
| 515 | * |
| 516 | * * |
| 517 | * * A function has a regular function arguments |
| 518 | * * prototype, declare it via TP_PROTO(): |
| 519 | * * |
| 520 | * |
| 521 | * TP_PROTO(struct rq *rq, struct task_struct *prev, |
| 522 | * struct task_struct *next), |
| 523 | * |
| 524 | * * |
| 525 | * * Define the call signature of the 'function'. |
| 526 | * * (Design sidenote: we use this instead of a |
| 527 | * * TP_PROTO1/TP_PROTO2/TP_PROTO3 ugliness.) |
| 528 | * * |
| 529 | * |
| 530 | * TP_ARGS(rq, prev, next), |
| 531 | * |
| 532 | * * |
| 533 | * * Fast binary tracing: define the trace record via |
| 534 | * * TP_STRUCT__entry(). You can think about it like a |
| 535 | * * regular C structure local variable definition. |
| 536 | * * |
| 537 | * * This is how the trace record is structured and will |
| 538 | * * be saved into the ring buffer. These are the fields |
| 539 | * * that will be exposed to user-space in |
| 540 | * * /sys/kernel/tracing/events/<*>/format. |
| 541 | * * |
| 542 | * * The declared 'local variable' is called '__entry' |
| 543 | * * |
| 544 | * * __field(pid_t, prev_pid) is equivalent to a standard declaration: |
| 545 | * * |
| 546 | * * pid_t prev_pid; |
| 547 | * * |
| 548 | * * __array(char, prev_comm, TASK_COMM_LEN) is equivalent to: |
| 549 | * * |
| 550 | * * char prev_comm[TASK_COMM_LEN]; |
| 551 | * * |
| 552 | * |
| 553 | * TP_STRUCT__entry( |
| 554 | * __array( char, prev_comm, TASK_COMM_LEN ) |
| 555 | * __field( pid_t, prev_pid ) |
| 556 | * __field( int, prev_prio ) |
| 557 | * __array( char, next_comm, TASK_COMM_LEN ) |
| 558 | * __field( pid_t, next_pid ) |
| 559 | * __field( int, next_prio ) |
| 560 | * ), |
| 561 | * |
| 562 | * * |
| 563 | * * Assign the entry into the trace record, by embedding |
| 564 | * * a full C statement block into TP_fast_assign(). You |
| 565 | * * can refer to the trace record as '__entry' - |
| 566 | * * otherwise you can put arbitrary C code in here. |
| 567 | * * |
| 568 | * * Note: this C code will execute every time a trace event |
| 569 | * * happens, on an active tracepoint. |
| 570 | * * |
| 571 | * |
| 572 | * TP_fast_assign( |
| 573 | * memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); |
| 574 | * __entry->prev_pid = prev->pid; |
| 575 | * __entry->prev_prio = prev->prio; |
| 576 | * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); |
| 577 | * __entry->next_pid = next->pid; |
| 578 | * __entry->next_prio = next->prio; |
| 579 | * ), |
| 580 | * |
| 581 | * * |
| 582 | * * Formatted output of a trace record via TP_printk(). |
| 583 | * * This is how the tracepoint will appear under ftrace |
| 584 | * * plugins that make use of this tracepoint. |
| 585 | * * |
| 586 | * * (raw-binary tracing wont actually perform this step.) |
| 587 | * * |
| 588 | * |
| 589 | * TP_printk("task %s:%d [%d] ==> %s:%d [%d]", |
| 590 | * __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, |
| 591 | * __entry->next_comm, __entry->next_pid, __entry->next_prio), |
| 592 | * |
| 593 | * ); |
| 594 | * |
| 595 | * This macro construct is thus used for the regular printk format |
| 596 | * tracing setup, it is used to construct a function pointer based |
| 597 | * tracepoint callback (this is used by programmatic plugins and |
| 598 | * can also by used by generic instrumentation like SystemTap), and |
| 599 | * it is also used to expose a structured trace record in |
| 600 | * /sys/kernel/tracing/events/. |
| 601 | * |
| 602 | * A set of (un)registration functions can be passed to the variant |
| 603 | * TRACE_EVENT_FN to perform any (un)registration work. |
| 604 | */ |
| 605 | |
| 606 | #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) |
| 607 | #define DEFINE_EVENT(template, name, proto, args) \ |
| 608 | DECLARE_TRACE_EVENT(name, PARAMS(proto), PARAMS(args)) |
| 609 | #define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg)\ |
| 610 | DECLARE_TRACE_EVENT(name, PARAMS(proto), PARAMS(args)) |
| 611 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ |
| 612 | DECLARE_TRACE_EVENT(name, PARAMS(proto), PARAMS(args)) |
| 613 | #define DEFINE_EVENT_CONDITION(template, name, proto, \ |
| 614 | args, cond) \ |
| 615 | DECLARE_TRACE_EVENT_CONDITION(name, PARAMS(proto), \ |
| 616 | PARAMS(args), PARAMS(cond)) |
| 617 | |
| 618 | #define TRACE_EVENT(name, proto, args, struct, assign, print) \ |
| 619 | DECLARE_TRACE_EVENT(name, PARAMS(proto), PARAMS(args)) |
| 620 | #define TRACE_EVENT_FN(name, proto, args, struct, \ |
| 621 | assign, print, reg, unreg) \ |
| 622 | DECLARE_TRACE_EVENT(name, PARAMS(proto), PARAMS(args)) |
| 623 | #define TRACE_EVENT_FN_COND(name, proto, args, cond, struct, \ |
| 624 | assign, print, reg, unreg) \ |
| 625 | DECLARE_TRACE_EVENT_CONDITION(name, PARAMS(proto), \ |
| 626 | PARAMS(args), PARAMS(cond)) |
| 627 | #define TRACE_EVENT_CONDITION(name, proto, args, cond, \ |
| 628 | struct, assign, print) \ |
| 629 | DECLARE_TRACE_EVENT_CONDITION(name, PARAMS(proto), \ |
| 630 | PARAMS(args), PARAMS(cond)) |
| 631 | #define TRACE_EVENT_SYSCALL(name, proto, args, struct, assign, \ |
| 632 | print, reg, unreg) \ |
| 633 | DECLARE_TRACE_EVENT_SYSCALL(name, PARAMS(proto), PARAMS(args)) |
| 634 | |
| 635 | #define TRACE_EVENT_FLAGS(event, flag) |
| 636 | |
| 637 | #define TRACE_EVENT_PERF_PERM(event, expr...) |
| 638 | |
| 639 | #define DECLARE_EVENT_NOP(name, proto, args) \ |
| 640 | static inline void trace_##name(proto) \ |
| 641 | { } \ |
| 642 | static inline bool trace_##name##_enabled(void) \ |
| 643 | { \ |
| 644 | return false; \ |
| 645 | } |
| 646 | |
| 647 | #define TRACE_EVENT_NOP(name, proto, args, struct, assign, print) \ |
| 648 | DECLARE_EVENT_NOP(name, PARAMS(proto), PARAMS(args)) |
| 649 | |
| 650 | #define DECLARE_EVENT_CLASS_NOP(name, proto, args, tstruct, assign, print) |
| 651 | #define DEFINE_EVENT_NOP(template, name, proto, args) \ |
| 652 | DECLARE_EVENT_NOP(name, PARAMS(proto), PARAMS(args)) |
| 653 | |
| 654 | #endif /* ifdef TRACE_EVENT (see note above) */ |