Merge tag 'perf-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / include / linux / ftrace.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
9849ed4d
MF
2/*
3 * Ftrace header. For implementation details beyond the random comments
5fb94e9c 4 * scattered below, see: Documentation/trace/ftrace-design.rst
9849ed4d
MF
5 */
6
16444a8a
ACM
7#ifndef _LINUX_FTRACE_H
8#define _LINUX_FTRACE_H
9
0264c8c9 10#include <linux/trace_recursion.h>
0012693a 11#include <linux/trace_clock.h>
18bfee32 12#include <linux/jump_label.h>
5601020f 13#include <linux/kallsyms.h>
0012693a 14#include <linux/linkage.h>
ea4e2bc4 15#include <linux/bitops.h>
a1e2e31d 16#include <linux/ptrace.h>
0012693a 17#include <linux/ktime.h>
21a8c466 18#include <linux/sched.h>
0012693a
FW
19#include <linux/types.h>
20#include <linux/init.h>
21#include <linux/fs.h>
16444a8a 22
c79a61f5
UKK
23#include <asm/ftrace.h>
24
2f5f6ad9
SR
25/*
26 * If the arch supports passing the variable contents of
27 * function_trace_op as the third parameter back from the
28 * mcount call, then the arch should define this as 1.
29 */
30#ifndef ARCH_SUPPORTS_FTRACE_OPS
31#define ARCH_SUPPORTS_FTRACE_OPS 0
32#endif
33
380af29b
SRG
34#ifdef CONFIG_TRACING
35extern void ftrace_boot_snapshot(void);
36#else
37static inline void ftrace_boot_snapshot(void) { }
38#endif
39
34cdd18b
SRV
40struct ftrace_ops;
41struct ftrace_regs;
cbad0fb2 42struct dyn_ftrace;
9705bc70
MR
43
44#ifdef CONFIG_FUNCTION_TRACER
ccf3672d
SR
45/*
46 * If the arch's mcount caller does not support all of ftrace's
47 * features, then it must call an indirect function that
f2cc020d 48 * does. Or at least does enough to prevent any unwelcome side effects.
34cdd18b
SRV
49 *
50 * Also define the function prototype that these architectures use
51 * to call the ftrace_ops_list_func().
ccf3672d 52 */
7544256a 53#if !ARCH_SUPPORTS_FTRACE_OPS
ccf3672d 54# define FTRACE_FORCE_LIST_FUNC 1
34cdd18b 55void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
ccf3672d
SR
56#else
57# define FTRACE_FORCE_LIST_FUNC 0
34cdd18b
SRV
58void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
59 struct ftrace_ops *op, struct ftrace_regs *fregs);
ccf3672d 60#endif
cbad0fb2
MR
61extern const struct ftrace_ops ftrace_nop_ops;
62extern const struct ftrace_ops ftrace_list_ops;
63struct ftrace_ops *ftrace_find_unique_ops(struct dyn_ftrace *rec);
34cdd18b 64#endif /* CONFIG_FUNCTION_TRACER */
ccf3672d 65
5f893b26
SRRH
66/* Main tracing buffer and events set up */
67#ifdef CONFIG_TRACING
68void trace_init(void);
e725c731 69void early_trace_init(void);
5f893b26
SRRH
70#else
71static inline void trace_init(void) { }
e725c731 72static inline void early_trace_init(void) { }
5f893b26 73#endif
ccf3672d 74
de477254 75struct module;
04da85b8 76struct ftrace_hash;
013bf0da 77struct ftrace_direct_func;
04da85b8 78
aba4b5c2
SRV
79#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
80 defined(CONFIG_DYNAMIC_FTRACE)
81const char *
82ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
83 unsigned long *off, char **modname, char *sym);
84#else
85static inline const char *
86ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
87 unsigned long *off, char **modname, char *sym)
88{
89 return NULL;
90}
fc0ea795
AH
91#endif
92
93#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
94int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
95 char *type, char *name,
96 char *module_name, int *exported);
97#else
6171a031
SRV
98static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
99 char *type, char *name,
100 char *module_name, int *exported)
101{
102 return -1;
103}
aba4b5c2
SRV
104#endif
105
606576ce 106#ifdef CONFIG_FUNCTION_TRACER
3e1932ad 107
b0fc494f 108extern int ftrace_enabled;
b0fc494f 109
02a474ca
SRV
110#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
111
d19ad077
SRV
112struct ftrace_regs {
113 struct pt_regs regs;
114};
02a474ca
SRV
115#define arch_ftrace_get_regs(fregs) (&(fregs)->regs)
116
2860cd8a 117/*
0ef86097
MR
118 * ftrace_regs_set_instruction_pointer() is to be defined by the architecture
119 * if to allow setting of the instruction pointer from the ftrace_regs when
120 * HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports live kernel patching.
2860cd8a 121 */
0ef86097 122#define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0)
02a474ca 123#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
d19ad077
SRV
124
125static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
126{
127 if (!fregs)
128 return NULL;
129
02a474ca 130 return arch_ftrace_get_regs(fregs);
d19ad077
SRV
131}
132
94d095ff
MR
133/*
134 * When true, the ftrace_regs_{get,set}_*() functions may be used on fregs.
135 * Note: this can be true even when ftrace_get_regs() cannot provide a pt_regs.
136 */
137static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs)
138{
139 if (IS_ENABLED(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS))
140 return true;
141
142 return ftrace_get_regs(fregs) != NULL;
143}
144
145#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
146#define ftrace_regs_get_instruction_pointer(fregs) \
147 instruction_pointer(ftrace_get_regs(fregs))
148#define ftrace_regs_get_argument(fregs, n) \
149 regs_get_kernel_argument(ftrace_get_regs(fregs), n)
150#define ftrace_regs_get_stack_pointer(fregs) \
151 kernel_stack_pointer(ftrace_get_regs(fregs))
152#define ftrace_regs_return_value(fregs) \
153 regs_return_value(ftrace_get_regs(fregs))
154#define ftrace_regs_set_return_value(fregs, ret) \
155 regs_set_return_value(ftrace_get_regs(fregs), ret)
156#define ftrace_override_function_with_return(fregs) \
157 override_function_with_return(ftrace_get_regs(fregs))
158#define ftrace_regs_query_register_offset(name) \
159 regs_query_register_offset(name)
160#endif
161
2f5f6ad9 162typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
d19ad077 163 struct ftrace_ops *op, struct ftrace_regs *fregs);
16444a8a 164
87354059
SRRH
165ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
166
e248491a
JO
167/*
168 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
169 * set in the flags member.
a25d036d 170 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and
f8b8be8a
MH
171 * IPMODIFY are a kind of attribute flags which can be set only before
172 * registering the ftrace_ops, and can not be modified while registered.
ad61dd30 173 * Changing those attribute flags after registering ftrace_ops will
f8b8be8a 174 * cause unexpected results.
e248491a
JO
175 *
176 * ENABLED - set/unset when ftrace_ops is registered/unregistered
e248491a
JO
177 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
178 * allocated ftrace_ops which need special care
08f6fba5
SR
179 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
180 * and passed to the callback. If this flag is set, but the
181 * architecture does not support passing regs
06aeaaea 182 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
08f6fba5
SR
183 * ftrace_ops will fail to register, unless the next flag
184 * is set.
185 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
186 * handler can handle an arch that does not save regs
187 * (the handler tests if regs == NULL), then it can set
188 * this flag instead. It will not fail registering the ftrace_ops
189 * but, the regs field will be NULL if the arch does not support
190 * passing regs to the handler.
191 * Note, if this flag is set, the SAVE_REGS flag will automatically
192 * get set upon registering the ftrace_ops, if the arch supports it.
a25d036d
SRV
193 * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure
194 * that the call back needs recursion protection. If it does
195 * not set this, then the ftrace infrastructure will assume
196 * that the callback can handle recursion on its own.
395b97a3 197 * STUB - The ftrace_ops is just a place holder.
f04f24fb
MH
198 * INITIALIZED - The ftrace_ops has already been initialized (first use time
199 * register_ftrace_function() is called, it will initialized the ops)
591dffda 200 * DELETED - The ops are being deleted, do not let them be registered again.
e1effa01
SRRH
201 * ADDING - The ops is in the process of being added.
202 * REMOVING - The ops is in the process of being removed.
203 * MODIFYING - The ops is in the process of changing its filter functions.
f3bea491
SRRH
204 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
205 * The arch specific code sets this flag when it allocated a
206 * trampoline. This lets the arch know that it can update the
207 * trampoline in case the callback function changes.
208 * The ftrace_ops trampoline can be set by the ftrace users, and
209 * in such cases the arch must not modify it. Only the arch ftrace
210 * core code should set this flag.
f8b8be8a
MH
211 * IPMODIFY - The ops can modify the IP register. This can only be set with
212 * SAVE_REGS. If another ops with this flag set is already registered
213 * for any of the functions that this ops will be registered for, then
214 * this ops will fail to register or set_filter_ip.
e3eea140 215 * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
d0ba52f1 216 * RCU - Set when the ops can only be called when RCU is watching.
8c08f0d5 217 * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
7162431d
MB
218 * PERMANENT - Set when the ops is permanent and should not be affected by
219 * ftrace_enabled.
763e34e7
SRV
220 * DIRECT - Used by the direct ftrace_ops helper for direct functions
221 * (internal ftrace only, should not be used by others)
e248491a 222 */
b848914c 223enum {
b41db132
EWI
224 FTRACE_OPS_FL_ENABLED = BIT(0),
225 FTRACE_OPS_FL_DYNAMIC = BIT(1),
226 FTRACE_OPS_FL_SAVE_REGS = BIT(2),
227 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3),
a25d036d 228 FTRACE_OPS_FL_RECURSION = BIT(4),
b41db132
EWI
229 FTRACE_OPS_FL_STUB = BIT(5),
230 FTRACE_OPS_FL_INITIALIZED = BIT(6),
231 FTRACE_OPS_FL_DELETED = BIT(7),
232 FTRACE_OPS_FL_ADDING = BIT(8),
233 FTRACE_OPS_FL_REMOVING = BIT(9),
234 FTRACE_OPS_FL_MODIFYING = BIT(10),
235 FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11),
236 FTRACE_OPS_FL_IPMODIFY = BIT(12),
237 FTRACE_OPS_FL_PID = BIT(13),
238 FTRACE_OPS_FL_RCU = BIT(14),
239 FTRACE_OPS_FL_TRACE_ARRAY = BIT(15),
240 FTRACE_OPS_FL_PERMANENT = BIT(16),
241 FTRACE_OPS_FL_DIRECT = BIT(17),
b848914c
SR
242};
243
60c89718
FR
244#ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
245#define FTRACE_OPS_FL_SAVE_ARGS FTRACE_OPS_FL_SAVE_REGS
246#else
247#define FTRACE_OPS_FL_SAVE_ARGS 0
248#endif
249
53cd885b
SL
250/*
251 * FTRACE_OPS_CMD_* commands allow the ftrace core logic to request changes
252 * to a ftrace_ops. Note, the requests may fail.
253 *
254 * ENABLE_SHARE_IPMODIFY_SELF - enable a DIRECT ops to work on the same
255 * function as an ops with IPMODIFY. Called
256 * when the DIRECT ops is being registered.
257 * This is called with both direct_mutex and
258 * ftrace_lock are locked.
259 *
260 * ENABLE_SHARE_IPMODIFY_PEER - enable a DIRECT ops to work on the same
261 * function as an ops with IPMODIFY. Called
262 * when the other ops (the one with IPMODIFY)
263 * is being registered.
264 * This is called with direct_mutex locked.
265 *
266 * DISABLE_SHARE_IPMODIFY_PEER - disable a DIRECT ops to work on the same
267 * function as an ops with IPMODIFY. Called
268 * when the other ops (the one with IPMODIFY)
269 * is being unregistered.
270 * This is called with direct_mutex locked.
271 */
272enum ftrace_ops_cmd {
273 FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF,
274 FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER,
275 FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER,
276};
277
278/*
279 * For most ftrace_ops_cmd,
280 * Returns:
281 * 0 - Success.
282 * Negative on failure. The return value is dependent on the
283 * callback.
284 */
285typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd);
286
33b7f99c
SRRH
287#ifdef CONFIG_DYNAMIC_FTRACE
288/* The hash used to know what functions callbacks trace */
289struct ftrace_ops_hash {
f86f4180
CZ
290 struct ftrace_hash __rcu *notrace_hash;
291 struct ftrace_hash __rcu *filter_hash;
33b7f99c
SRRH
292 struct mutex regex_lock;
293};
42c269c8 294
b80f0f6c 295void ftrace_free_init_mem(void);
aba4b5c2 296void ftrace_free_mem(struct module *mod, void *start, void *end);
42c269c8 297#else
380af29b
SRG
298static inline void ftrace_free_init_mem(void)
299{
300 ftrace_boot_snapshot();
301}
aba4b5c2 302static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
33b7f99c
SRRH
303#endif
304
b7e00a6c 305/*
ba27f2bc
SRRH
306 * Note, ftrace_ops can be referenced outside of RCU protection, unless
307 * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
308 * core data, the unregistering of it will perform a scheduling on all CPUs
309 * to make sure that there are no more users. Depending on the load of the
310 * system that may take a bit of time.
b7e00a6c
SRRH
311 *
312 * Any private data added must also take care not to be freed and if private
313 * data is added to a ftrace_ops that is in core code, the user of the
314 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
315 */
16444a8a 316struct ftrace_ops {
f45948e8 317 ftrace_func_t func;
f86f4180 318 struct ftrace_ops __rcu *next;
b848914c 319 unsigned long flags;
b7e00a6c 320 void *private;
e3eea140 321 ftrace_func_t saved_func;
f45948e8 322#ifdef CONFIG_DYNAMIC_FTRACE
33b7f99c
SRRH
323 struct ftrace_ops_hash local_hash;
324 struct ftrace_ops_hash *func_hash;
fef5aeee 325 struct ftrace_ops_hash old_hash;
79922b80 326 unsigned long trampoline;
aec0be2d 327 unsigned long trampoline_size;
fc0ea795 328 struct list_head list;
53cd885b 329 ftrace_ops_func_t ops_func;
dbaccb61
FR
330#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
331 unsigned long direct_call;
332#endif
f45948e8 333#endif
16444a8a
ACM
334};
335
59566b0b
SRV
336extern struct ftrace_ops __rcu *ftrace_ops_list;
337extern struct ftrace_ops ftrace_list_end;
338
339/*
40dc4a42 340 * Traverse the ftrace_ops_list, invoking all entries. The reason that we
59566b0b
SRV
341 * can use rcu_dereference_raw_check() is that elements removed from this list
342 * are simply leaked, so there is no need to interact with a grace-period
343 * mechanism. The rcu_dereference_raw_check() calls are needed to handle
40dc4a42 344 * concurrent insertions into the ftrace_ops_list.
59566b0b
SRV
345 *
346 * Silly Alpha and silly pointer-speculation compiler optimizations!
347 */
348#define do_for_each_ftrace_op(op, list) \
349 op = rcu_dereference_raw_check(list); \
350 do
351
352/*
353 * Optimized for just a single item in the list (as that is the normal case).
354 */
355#define while_for_each_ftrace_op(op) \
356 while (likely(op = rcu_dereference_raw_check((op)->next)) && \
357 unlikely((op) != &ftrace_list_end))
358
e7d3737e
FW
359/*
360 * Type of the current tracing.
361 */
362enum ftrace_tracing_type_t {
363 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
364 FTRACE_TYPE_RETURN, /* Hook the return of the function */
365};
366
367/* Current tracing type, default is FTRACE_TYPE_ENTER */
368extern enum ftrace_tracing_type_t ftrace_tracing_type;
369
16444a8a
ACM
370/*
371 * The ftrace_ops must be a static and should also
372 * be read_mostly. These functions do modify read_mostly variables
373 * so use them sparely. Never free an ftrace_op or modify the
374 * next pointer after it has been registered. Even after unregistering
375 * it, the next pointer may still be used internally.
376 */
377int register_ftrace_function(struct ftrace_ops *ops);
378int unregister_ftrace_function(struct ftrace_ops *ops);
16444a8a 379
a1e2e31d 380extern void ftrace_stub(unsigned long a0, unsigned long a1,
d19ad077 381 struct ftrace_ops *op, struct ftrace_regs *fregs);
16444a8a 382
bed0d9a5
JO
383
384int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs);
606576ce 385#else /* !CONFIG_FUNCTION_TRACER */
4dbf6bc2
SR
386/*
387 * (un)register_ftrace_function must be a macro since the ops parameter
388 * must not be evaluated.
389 */
390#define register_ftrace_function(ops) ({ 0; })
391#define unregister_ftrace_function(ops) ({ 0; })
81adbdc0 392static inline void ftrace_kill(void) { }
b80f0f6c 393static inline void ftrace_free_init_mem(void) { }
aba4b5c2 394static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
bed0d9a5
JO
395static inline int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
396{
397 return -EOPNOTSUPP;
398}
606576ce 399#endif /* CONFIG_FUNCTION_TRACER */
352ad25a 400
ea806eb3
SRV
401struct ftrace_func_entry {
402 struct hlist_node hlist;
403 unsigned long ip;
404 unsigned long direct; /* for direct lookup only */
405};
406
763e34e7 407#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
a3ad1a7e 408extern int ftrace_direct_func_count;
ff205766 409unsigned long ftrace_find_rec_direct(unsigned long ip);
da8bdfbd
FR
410int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
411int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
412 bool free_filters);
413int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
414int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr);
ccf5a89e 415
fee86a4e 416void ftrace_stub_direct_tramp(void);
ccf5a89e 417
763e34e7 418#else
f64dd462 419struct ftrace_ops;
a3ad1a7e 420# define ftrace_direct_func_count 0
ff205766
AS
421static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
422{
423 return 0;
424}
da8bdfbd 425static inline int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
f64dd462
JO
426{
427 return -ENODEV;
428}
da8bdfbd
FR
429static inline int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
430 bool free_filters)
f64dd462
JO
431{
432 return -ENODEV;
433}
da8bdfbd 434static inline int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
ccf5a89e
JO
435{
436 return -ENODEV;
437}
da8bdfbd 438static inline int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr)
f96f644a
SL
439{
440 return -ENODEV;
441}
763e34e7 442
763e34e7
SRV
443/*
444 * This must be implemented by the architecture.
445 * It is the way the ftrace direct_ops helper, when called
446 * via ftrace (because there's other callbacks besides the
447 * direct call), can inform the architecture's trampoline that this
448 * routine has a direct caller, and what the caller is.
562955fe
SRV
449 *
450 * For example, in x86, it returns the direct caller
451 * callback function via the regs->orig_ax parameter.
452 * Then in the ftrace trampoline, if this is set, it makes
453 * the return from the trampoline jump to the direct caller
454 * instead of going back to the function it just traced.
763e34e7 455 */
9705bc70 456static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
763e34e7 457 unsigned long addr) { }
9705bc70 458#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
763e34e7 459
f38f1d2a 460#ifdef CONFIG_STACK_TRACER
bb99d8cc 461
f38f1d2a 462extern int stack_tracer_enabled;
3d9a8072 463
7ff0d449
CH
464int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
465 size_t *lenp, loff_t *ppos);
5367278c 466
8aaf1ee7
SRV
467/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
468DECLARE_PER_CPU(int, disable_stack_tracer);
469
470/**
471 * stack_tracer_disable - temporarily disable the stack tracer
472 *
473 * There's a few locations (namely in RCU) where stack tracing
474 * cannot be executed. This function is used to disable stack
475 * tracing during those critical sections.
476 *
477 * This function must be called with preemption or interrupts
478 * disabled and stack_tracer_enable() must be called shortly after
479 * while preemption or interrupts are still disabled.
480 */
481static inline void stack_tracer_disable(void)
482{
f2cc020d 483 /* Preemption or interrupts must be disabled */
60361e12 484 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
8aaf1ee7
SRV
485 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
486 this_cpu_inc(disable_stack_tracer);
487}
488
489/**
490 * stack_tracer_enable - re-enable the stack tracer
491 *
492 * After stack_tracer_disable() is called, stack_tracer_enable()
493 * must be called shortly afterward.
494 */
495static inline void stack_tracer_enable(void)
496{
60361e12 497 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
8aaf1ee7
SRV
498 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
499 this_cpu_dec(disable_stack_tracer);
500}
5367278c
SRV
501#else
502static inline void stack_tracer_disable(void) { }
503static inline void stack_tracer_enable(void) { }
f38f1d2a
SR
504#endif
505
3d083395 506#ifdef CONFIG_DYNAMIC_FTRACE
31e88909 507
3a2bfec0
L
508void ftrace_arch_code_modify_prepare(void);
509void ftrace_arch_code_modify_post_process(void);
000ab691 510
02a392a0
SRRH
511enum ftrace_bug_type {
512 FTRACE_BUG_UNKNOWN,
513 FTRACE_BUG_INIT,
514 FTRACE_BUG_NOP,
515 FTRACE_BUG_CALL,
516 FTRACE_BUG_UPDATE,
517};
518extern enum ftrace_bug_type ftrace_bug_type;
519
b05086c7
SRRH
520/*
521 * Archs can set this to point to a variable that holds the value that was
522 * expected at the call site before calling ftrace_bug().
523 */
524extern const void *ftrace_expected;
525
4fd3279b 526void ftrace_bug(int err, struct dyn_ftrace *rec);
c88fd863 527
809dcf29
SR
528struct seq_file;
529
d88471cb 530extern int ftrace_text_reserved(const void *start, const void *end);
2cfa1978 531
6be7fa3c
SRV
532struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
533
aec0be2d
SRRH
534bool is_ftrace_trampoline(unsigned long addr);
535
08f6fba5
SR
536/*
537 * The dyn_ftrace record's flags field is split into two parts.
538 * the first part which is '0-FTRACE_REF_MAX' is a counter of
539 * the number of callbacks that have registered the function that
540 * the dyn_ftrace descriptor represents.
541 *
542 * The second part is a mask:
543 * ENABLED - the function is being traced
544 * REGS - the record wants the function to save regs
545 * REGS_EN - the function is set up to save regs.
f8b8be8a 546 * IPMODIFY - the record allows for the IP address to be changed.
b7ffffbb 547 * DISABLED - the record is not ready to be touched yet
763e34e7 548 * DIRECT - there is a direct function to call
cbad0fb2
MR
549 * CALL_OPS - the record can use callsite-specific ops
550 * CALL_OPS_EN - the function is set up to use callsite-specific ops
08f6fba5
SR
551 *
552 * When a new ftrace_ops is registered and wants a function to save
02dae28f 553 * pt_regs, the rec->flags REGS is set. When the function has been
08f6fba5
SR
554 * set up to save regs, the REG_EN flag is set. Once a function
555 * starts saving regs it will do so until all ftrace_ops are removed
556 * from tracing that function.
557 */
3c1720f0 558enum {
79922b80 559 FTRACE_FL_ENABLED = (1UL << 31),
08f6fba5 560 FTRACE_FL_REGS = (1UL << 30),
79922b80
SRRH
561 FTRACE_FL_REGS_EN = (1UL << 29),
562 FTRACE_FL_TRAMP = (1UL << 28),
563 FTRACE_FL_TRAMP_EN = (1UL << 27),
f8b8be8a 564 FTRACE_FL_IPMODIFY = (1UL << 26),
b7ffffbb 565 FTRACE_FL_DISABLED = (1UL << 25),
763e34e7
SRV
566 FTRACE_FL_DIRECT = (1UL << 24),
567 FTRACE_FL_DIRECT_EN = (1UL << 23),
cbad0fb2
MR
568 FTRACE_FL_CALL_OPS = (1UL << 22),
569 FTRACE_FL_CALL_OPS_EN = (1UL << 21),
3c1720f0
SR
570};
571
cbad0fb2 572#define FTRACE_REF_MAX_SHIFT 21
cf2cb0b2 573#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
ed926f9b 574
02dae28f 575#define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX)
0376bde1 576
3d083395 577struct dyn_ftrace {
a762782d 578 unsigned long ip; /* address of mcount call-site */
85ae32ae 579 unsigned long flags;
a762782d 580 struct dyn_arch_ftrace arch;
3d083395
SR
581};
582
647664ea
MH
583int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
584 int remove, int reset);
4f554e95
JO
585int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
586 unsigned int cnt, int remove, int reset);
ac483c44 587int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
936e074b 588 int len, int reset);
ac483c44 589int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
936e074b
SR
590 int len, int reset);
591void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
592void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
5500fa51 593void ftrace_free_filter(struct ftrace_ops *ops);
d032ae89 594void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
e1c08bdd 595
c88fd863
SR
596enum {
597 FTRACE_UPDATE_CALLS = (1 << 0),
598 FTRACE_DISABLE_CALLS = (1 << 1),
599 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
600 FTRACE_START_FUNC_RET = (1 << 3),
601 FTRACE_STOP_FUNC_RET = (1 << 4),
a0572f68 602 FTRACE_MAY_SLEEP = (1 << 5),
c88fd863
SR
603};
604
08f6fba5
SR
605/*
606 * The FTRACE_UPDATE_* enum is used to pass information back
607 * from the ftrace_update_record() and ftrace_test_record()
608 * functions. These are called by the code update routines
609 * to find out what is to be done for a given function.
610 *
611 * IGNORE - The function is already what we want it to be
612 * MAKE_CALL - Start tracing the function
613 * MODIFY_CALL - Stop saving regs for the function
08f6fba5
SR
614 * MAKE_NOP - Stop tracing the function
615 */
c88fd863
SR
616enum {
617 FTRACE_UPDATE_IGNORE,
618 FTRACE_UPDATE_MAKE_CALL,
08f6fba5 619 FTRACE_UPDATE_MODIFY_CALL,
c88fd863
SR
620 FTRACE_UPDATE_MAKE_NOP,
621};
622
fc13cb0c
SR
623enum {
624 FTRACE_ITER_FILTER = (1 << 0),
625 FTRACE_ITER_NOTRACE = (1 << 1),
626 FTRACE_ITER_PRINTALL = (1 << 2),
eee8ded1
SRV
627 FTRACE_ITER_DO_PROBES = (1 << 3),
628 FTRACE_ITER_PROBE = (1 << 4),
5985ea8b
SRV
629 FTRACE_ITER_MOD = (1 << 5),
630 FTRACE_ITER_ENABLED = (1 << 6),
fc13cb0c
SR
631};
632
c88fd863 633void arch_ftrace_update_code(int command);
89f579ce
YW
634void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
635void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
636void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
c88fd863
SR
637
638struct ftrace_rec_iter;
639
640struct ftrace_rec_iter *ftrace_rec_iter_start(void);
641struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
642struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
643
08d636b6
SR
644#define for_ftrace_rec_iter(iter) \
645 for (iter = ftrace_rec_iter_start(); \
646 iter; \
647 iter = ftrace_rec_iter_next(iter))
648
649
7375dca1
SRV
650int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
651int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
c88fd863 652void ftrace_run_stop_machine(int command);
f0cf973a 653unsigned long ftrace_location(unsigned long ip);
04cf31a7 654unsigned long ftrace_location_range(unsigned long start, unsigned long end);
7413af1f
SRRH
655unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
656unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
c88fd863
SR
657
658extern ftrace_func_t ftrace_trace_function;
659
fc13cb0c
SR
660int ftrace_regex_open(struct ftrace_ops *ops, int flag,
661 struct inode *inode, struct file *file);
662ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
663 size_t cnt, loff_t *ppos);
664ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
665 size_t cnt, loff_t *ppos);
fc13cb0c
SR
666int ftrace_regex_release(struct inode *inode, struct file *file);
667
2a85a37f
SR
668void __init
669ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
670
3d083395 671/* defined in arch */
3c1720f0 672extern int ftrace_ip_converted(unsigned long ip);
3a36cb11 673extern int ftrace_dyn_arch_init(void);
e4f5d544 674extern void ftrace_replace_code(int enable);
d61f82d0
SR
675extern int ftrace_update_ftrace_func(ftrace_func_t func);
676extern void ftrace_caller(void);
08f6fba5 677extern void ftrace_regs_caller(void);
d61f82d0 678extern void ftrace_call(void);
08f6fba5 679extern void ftrace_regs_call(void);
d61f82d0 680extern void mcount_call(void);
f0001207 681
8ed3e2cf
SR
682void ftrace_modify_all_code(int command);
683
f0001207
SL
684#ifndef FTRACE_ADDR
685#define FTRACE_ADDR ((unsigned long)ftrace_caller)
686#endif
08f6fba5 687
79922b80
SRRH
688#ifndef FTRACE_GRAPH_ADDR
689#define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
690#endif
691
08f6fba5 692#ifndef FTRACE_REGS_ADDR
06aeaaea 693#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
08f6fba5
SR
694# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
695#else
696# define FTRACE_REGS_ADDR FTRACE_ADDR
697#endif
698#endif
699
646d7043
SRRH
700/*
701 * If an arch would like functions that are only traced
702 * by the function graph tracer to jump directly to its own
703 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
704 * to be that address to jump to.
705 */
706#ifndef FTRACE_GRAPH_TRAMP_ADDR
707#define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
708#endif
709
fb52607a
FW
710#ifdef CONFIG_FUNCTION_GRAPH_TRACER
711extern void ftrace_graph_caller(void);
5a45cfe1
SR
712extern int ftrace_enable_ftrace_graph_caller(void);
713extern int ftrace_disable_ftrace_graph_caller(void);
714#else
715static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
716static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
e7d3737e 717#endif
ad90c0e3 718
31e88909 719/**
57794a9d 720 * ftrace_make_nop - convert code into nop
31e88909 721 * @mod: module structure if called by module load initialization
fbf6c73c 722 * @rec: the call site record (e.g. mcount/fentry)
31e88909
SR
723 * @addr: the address that the call site should be calling
724 *
725 * This is a very sensitive operation and great care needs
726 * to be taken by the arch. The operation should carefully
727 * read the location, check to see if what is read is indeed
728 * what we expect it to be, and then on success of the compare,
729 * it should write to the location.
730 *
731 * The code segment at @rec->ip should be a caller to @addr
732 *
733 * Return must be:
734 * 0 on success
735 * -EFAULT on error reading the location
736 * -EINVAL on a failed compare of the contents
737 * -EPERM on error writing to the location
738 * Any other value will be considered a failure.
739 */
740extern int ftrace_make_nop(struct module *mod,
741 struct dyn_ftrace *rec, unsigned long addr);
a26a2a27 742
67ccddf8
IL
743/**
744 * ftrace_need_init_nop - return whether nop call sites should be initialized
745 *
746 * Normally the compiler's -mnop-mcount generates suitable nops, so we don't
747 * need to call ftrace_init_nop() if the code is built with that flag.
748 * Architectures where this is not always the case may define their own
749 * condition.
750 *
751 * Return must be:
752 * 0 if ftrace_init_nop() should be called
753 * Nonzero if ftrace_init_nop() should not be called
754 */
755
756#ifndef ftrace_need_init_nop
757#define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT))
758#endif
fbf6c73c
MR
759
760/**
761 * ftrace_init_nop - initialize a nop call site
762 * @mod: module structure if called by module load initialization
763 * @rec: the call site record (e.g. mcount/fentry)
764 *
765 * This is a very sensitive operation and great care needs
766 * to be taken by the arch. The operation should carefully
767 * read the location, check to see if what is read is indeed
768 * what we expect it to be, and then on success of the compare,
769 * it should write to the location.
770 *
771 * The code segment at @rec->ip should contain the contents created by
772 * the compiler
773 *
774 * Return must be:
775 * 0 on success
776 * -EFAULT on error reading the location
777 * -EINVAL on a failed compare of the contents
778 * -EPERM on error writing to the location
779 * Any other value will be considered a failure.
780 */
781#ifndef ftrace_init_nop
782static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
783{
784 return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
785}
786#endif
787
593eb8a2 788/**
31e88909 789 * ftrace_make_call - convert a nop call site into a call to addr
fbf6c73c 790 * @rec: the call site record (e.g. mcount/fentry)
31e88909 791 * @addr: the address that the call site should call
593eb8a2
SR
792 *
793 * This is a very sensitive operation and great care needs
794 * to be taken by the arch. The operation should carefully
795 * read the location, check to see if what is read is indeed
796 * what we expect it to be, and then on success of the compare,
797 * it should write to the location.
798 *
31e88909
SR
799 * The code segment at @rec->ip should be a nop
800 *
593eb8a2
SR
801 * Return must be:
802 * 0 on success
803 * -EFAULT on error reading the location
804 * -EINVAL on a failed compare of the contents
805 * -EPERM on error writing to the location
806 * Any other value will be considered a failure.
807 */
31e88909
SR
808extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
809
cbad0fb2
MR
810#if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) || \
811 defined(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)
08f6fba5
SR
812/**
813 * ftrace_modify_call - convert from one addr to another (no nop)
fbf6c73c 814 * @rec: the call site record (e.g. mcount/fentry)
08f6fba5
SR
815 * @old_addr: the address expected to be currently called to
816 * @addr: the address to change to
817 *
818 * This is a very sensitive operation and great care needs
819 * to be taken by the arch. The operation should carefully
820 * read the location, check to see if what is read is indeed
821 * what we expect it to be, and then on success of the compare,
822 * it should write to the location.
823 *
cbad0fb2
MR
824 * When using call ops, this is called when the associated ops change, even
825 * when (addr == old_addr).
826 *
08f6fba5
SR
827 * The code segment at @rec->ip should be a caller to @old_addr
828 *
829 * Return must be:
830 * 0 on success
831 * -EFAULT on error reading the location
832 * -EINVAL on a failed compare of the contents
833 * -EPERM on error writing to the location
834 * Any other value will be considered a failure.
835 */
836extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
837 unsigned long addr);
838#else
839/* Should never be called */
840static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
841 unsigned long addr)
842{
843 return -EINVAL;
844}
845#endif
846
31e88909
SR
847/* May be defined in arch */
848extern int ftrace_arch_read_dyn_info(char *buf, int size);
593eb8a2 849
ecea656d 850extern int skip_trace(unsigned long ip);
a949ae56 851extern void ftrace_module_init(struct module *mod);
7dcd182b 852extern void ftrace_module_enable(struct module *mod);
049fb9bd 853extern void ftrace_release_mod(struct module *mod);
ecea656d 854
c0719e5a
SR
855extern void ftrace_disable_daemon(void);
856extern void ftrace_enable_daemon(void);
4dc93676 857#else /* CONFIG_DYNAMIC_FTRACE */
4dbf6bc2 858static inline int skip_trace(unsigned long ip) { return 0; }
4dbf6bc2
SR
859static inline void ftrace_disable_daemon(void) { }
860static inline void ftrace_enable_daemon(void) { }
7dcd182b
JY
861static inline void ftrace_module_init(struct module *mod) { }
862static inline void ftrace_module_enable(struct module *mod) { }
863static inline void ftrace_release_mod(struct module *mod) { }
d88471cb 864static inline int ftrace_text_reserved(const void *start, const void *end)
2cfa1978
MH
865{
866 return 0;
867}
4dc93676
SR
868static inline unsigned long ftrace_location(unsigned long ip)
869{
870 return 0;
871}
fc13cb0c
SR
872
873/*
874 * Again users of functions that have ftrace_ops may not
875 * have them defined when ftrace is not enabled, but these
876 * functions may still be called. Use a macro instead of inline.
877 */
878#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
96de37b6 879#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
647664ea 880#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
4f554e95 881#define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; })
5500fa51
JO
882#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
883#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
884#define ftrace_free_filter(ops) do { } while (0)
d032ae89 885#define ftrace_ops_set_global_filter(ops) do { } while (0)
fc13cb0c
SR
886
887static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
888 size_t cnt, loff_t *ppos) { return -ENODEV; }
889static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
890 size_t cnt, loff_t *ppos) { return -ENODEV; }
fc13cb0c
SR
891static inline int
892ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
aec0be2d
SRRH
893
894static inline bool is_ftrace_trampoline(unsigned long addr)
895{
896 return false;
897}
ecea656d 898#endif /* CONFIG_DYNAMIC_FTRACE */
352ad25a 899
0c0593b4
SRV
900#ifdef CONFIG_FUNCTION_GRAPH_TRACER
901#ifndef ftrace_graph_func
902#define ftrace_graph_func ftrace_stub
903#define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB
904#else
905#define FTRACE_OPS_GRAPH_STUB 0
906#endif
907#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
908
aeaee8a2
IM
909/* totally disable ftrace - can not re-enable after this */
910void ftrace_kill(void);
911
f43fdad8
IM
912static inline void tracer_disable(void)
913{
606576ce 914#ifdef CONFIG_FUNCTION_TRACER
f43fdad8
IM
915 ftrace_enabled = 0;
916#endif
917}
918
37002735
HY
919/*
920 * Ftrace disable/restore without lock. Some synchronization mechanism
9bdeb7b5 921 * must be used to prevent ftrace_enabled to be changed between
37002735
HY
922 * disable/restore.
923 */
9bdeb7b5
HY
924static inline int __ftrace_enabled_save(void)
925{
606576ce 926#ifdef CONFIG_FUNCTION_TRACER
9bdeb7b5
HY
927 int saved_ftrace_enabled = ftrace_enabled;
928 ftrace_enabled = 0;
929 return saved_ftrace_enabled;
930#else
931 return 0;
932#endif
933}
934
935static inline void __ftrace_enabled_restore(int enabled)
936{
606576ce 937#ifdef CONFIG_FUNCTION_TRACER
9bdeb7b5
HY
938 ftrace_enabled = enabled;
939#endif
940}
941
eed542d6
AT
942/* All archs should have this, but we define it for consistency */
943#ifndef ftrace_return_address0
944# define ftrace_return_address0 __builtin_return_address(0)
945#endif
946
947/* Archs may use other ways for ADDR1 and beyond */
948#ifndef ftrace_return_address
c79a61f5 949# ifdef CONFIG_FRAME_POINTER
eed542d6 950# define ftrace_return_address(n) __builtin_return_address(n)
c79a61f5 951# else
eed542d6 952# define ftrace_return_address(n) 0UL
c79a61f5 953# endif
eed542d6
AT
954#endif
955
956#define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
957#define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
958#define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
959#define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
960#define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
961#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
962#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
352ad25a 963
ea65b418 964static __always_inline unsigned long get_lock_parent_ip(void)
f904f582
SAS
965{
966 unsigned long addr = CALLER_ADDR0;
967
968 if (!in_lock_functions(addr))
969 return addr;
970 addr = CALLER_ADDR1;
971 if (!in_lock_functions(addr))
972 return addr;
973 return CALLER_ADDR2;
974}
975
c3bc8fd6 976#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
489f1396
IM
977 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
978 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
6cd8a4bb 979#else
b02ee9a3
MB
980/*
981 * Use defines instead of static inlines because some arches will make code out
982 * of the CALLER_ADDR, when we really want these to be a real nop.
983 */
984# define trace_preempt_on(a0, a1) do { } while (0)
985# define trace_preempt_off(a0, a1) do { } while (0)
6cd8a4bb
SR
986#endif
987
68bf21aa
SR
988#ifdef CONFIG_FTRACE_MCOUNT_RECORD
989extern void ftrace_init(void);
a1326b17
MR
990#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
991#define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
992#else
993#define FTRACE_CALLSITE_SECTION "__mcount_loc"
994#endif
68bf21aa
SR
995#else
996static inline void ftrace_init(void) { }
997#endif
998
287b6e68
FW
999/*
1000 * Structure that defines an entry function trace.
a4a551b8
NK
1001 * It's already packed but the attribute "packed" is needed
1002 * to remove extra padding at the end.
287b6e68
FW
1003 */
1004struct ftrace_graph_ent {
1005 unsigned long func; /* Current function */
1006 int depth;
a4a551b8 1007} __packed;
dd0e545f 1008
caf4b323
FW
1009/*
1010 * Structure that defines a return function trace.
a4a551b8
NK
1011 * It's already packed but the attribute "packed" is needed
1012 * to remove extra padding at the end.
caf4b323 1013 */
fb52607a 1014struct ftrace_graph_ret {
caf4b323 1015 unsigned long func; /* Current function */
60602cb5 1016 int depth;
0231022c 1017 /* Number of functions that overran the depth limit for current task */
60602cb5 1018 unsigned int overrun;
a4a551b8
NK
1019 unsigned long long calltime;
1020 unsigned long long rettime;
a4a551b8 1021} __packed;
caf4b323 1022
62b915f1
JO
1023/* Type of the callback handlers for tracing function graph*/
1024typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
1025typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
1026
e8025bab
SRV
1027extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
1028
fb52607a 1029#ifdef CONFIG_FUNCTION_GRAPH_TRACER
8b96f011 1030
688f7089
SRV
1031struct fgraph_ops {
1032 trace_func_graph_ent_t entryfunc;
1033 trace_func_graph_ret_t retfunc;
1034};
1035
712406a6
SR
1036/*
1037 * Stack of return addresses for functions
1038 * of a thread.
1039 * Used in struct thread_info
1040 */
1041struct ftrace_ret_stack {
1042 unsigned long ret;
1043 unsigned long func;
1044 unsigned long long calltime;
8861dd30 1045#ifdef CONFIG_FUNCTION_PROFILER
a2a16d6a 1046 unsigned long long subtime;
8861dd30 1047#endif
daa460a8 1048#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
71e308a2 1049 unsigned long fp;
daa460a8 1050#endif
9a7c348b
JP
1051#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
1052 unsigned long *retp;
1053#endif
712406a6
SR
1054};
1055
1056/*
1057 * Primary handler of a function return.
1058 * It relays on ftrace_return_to_handler.
1059 * Defined in entry_32/64.S
1060 */
1061extern void return_to_handler(void);
1062
1063extern int
8114865f
SRV
1064function_graph_enter(unsigned long ret, unsigned long func,
1065 unsigned long frame_pointer, unsigned long *retp);
712406a6 1066
b0e21a61
SRV
1067struct ftrace_ret_stack *
1068ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
1069
223918e3
JP
1070unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
1071 unsigned long ret, unsigned long *retp);
1072
8b96f011
FW
1073/*
1074 * Sometimes we don't want to trace a function with the function
1075 * graph tracer but we want them to keep traced by the usual function
1076 * tracer if the function graph tracer is not configured.
1077 */
1078#define __notrace_funcgraph notrace
1079
f201ae23
FW
1080#define FTRACE_RETFUNC_DEPTH 50
1081#define FTRACE_RETSTACK_ALLOC_SIZE 32
688f7089
SRV
1082
1083extern int register_ftrace_graph(struct fgraph_ops *ops);
1084extern void unregister_ftrace_graph(struct fgraph_ops *ops);
287b6e68 1085
18bfee32
CL
1086/**
1087 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
1088 *
1089 * ftrace_graph_stop() is called when a severe error is detected in
1090 * the function graph tracing. This function is called by the critical
1091 * paths of function graph to keep those paths from doing any more harm.
1092 */
1093DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph);
1094
1095static inline bool ftrace_graph_is_dead(void)
1096{
1097 return static_branch_unlikely(&kill_ftrace_graph);
1098}
1099
14a866c5
SR
1100extern void ftrace_graph_stop(void);
1101
287b6e68
FW
1102/* The current handlers in use */
1103extern trace_func_graph_ret_t ftrace_graph_return;
1104extern trace_func_graph_ent_t ftrace_graph_entry;
caf4b323 1105
fb52607a
FW
1106extern void ftrace_graph_init_task(struct task_struct *t);
1107extern void ftrace_graph_exit_task(struct task_struct *t);
868baf07 1108extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
21a8c466 1109
380c4b14
FW
1110static inline void pause_graph_tracing(void)
1111{
1112 atomic_inc(&current->tracing_graph_pause);
1113}
1114
1115static inline void unpause_graph_tracing(void)
1116{
1117 atomic_dec(&current->tracing_graph_pause);
1118}
5ac9f622 1119#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
8b96f011
FW
1120
1121#define __notrace_funcgraph
1122
fb52607a
FW
1123static inline void ftrace_graph_init_task(struct task_struct *t) { }
1124static inline void ftrace_graph_exit_task(struct task_struct *t) { }
868baf07 1125static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
21a8c466 1126
688f7089
SRV
1127/* Define as macros as fgraph_ops may not be defined */
1128#define register_ftrace_graph(ops) ({ -1; })
1129#define unregister_ftrace_graph(ops) do { } while (0)
380c4b14 1130
223918e3
JP
1131static inline unsigned long
1132ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
1133 unsigned long *retp)
1134{
1135 return ret;
1136}
1137
380c4b14
FW
1138static inline void pause_graph_tracing(void) { }
1139static inline void unpause_graph_tracing(void) { }
5ac9f622 1140#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
caf4b323 1141
ea4e2bc4 1142#ifdef CONFIG_TRACING
cecbca96
FW
1143enum ftrace_dump_mode;
1144
1145extern enum ftrace_dump_mode ftrace_dump_on_oops;
0daa2302 1146extern int tracepoint_printk;
526211bc 1147
de7edd31
SRRH
1148extern void disable_trace_on_warning(void);
1149extern int __disable_trace_on_warning;
1150
42391745 1151int tracepoint_printk_sysctl(struct ctl_table *table, int write,
32927393 1152 void *buffer, size_t *lenp, loff_t *ppos);
42391745 1153
de7edd31
SRRH
1154#else /* CONFIG_TRACING */
1155static inline void disable_trace_on_warning(void) { }
ea4e2bc4
SR
1156#endif /* CONFIG_TRACING */
1157
e7b8e675
MF
1158#ifdef CONFIG_FTRACE_SYSCALLS
1159
1160unsigned long arch_syscall_addr(int nr);
1161
1162#endif /* CONFIG_FTRACE_SYSCALLS */
1163
16444a8a 1164#endif /* _LINUX_FTRACE_H */