ftrace: Rename _ftrace_direct_multi APIs to _ftrace_direct APIs
[linux-2.6-block.git] / include / linux / ftrace.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
9849ed4d
MF
2/*
3 * Ftrace header. For implementation details beyond the random comments
5fb94e9c 4 * scattered below, see: Documentation/trace/ftrace-design.rst
9849ed4d
MF
5 */
6
16444a8a
ACM
7#ifndef _LINUX_FTRACE_H
8#define _LINUX_FTRACE_H
9
0264c8c9 10#include <linux/trace_recursion.h>
0012693a 11#include <linux/trace_clock.h>
18bfee32 12#include <linux/jump_label.h>
5601020f 13#include <linux/kallsyms.h>
0012693a 14#include <linux/linkage.h>
ea4e2bc4 15#include <linux/bitops.h>
a1e2e31d 16#include <linux/ptrace.h>
0012693a 17#include <linux/ktime.h>
21a8c466 18#include <linux/sched.h>
0012693a
FW
19#include <linux/types.h>
20#include <linux/init.h>
21#include <linux/fs.h>
16444a8a 22
c79a61f5
UKK
23#include <asm/ftrace.h>
24
2f5f6ad9
SR
25/*
26 * If the arch supports passing the variable contents of
27 * function_trace_op as the third parameter back from the
28 * mcount call, then the arch should define this as 1.
29 */
30#ifndef ARCH_SUPPORTS_FTRACE_OPS
31#define ARCH_SUPPORTS_FTRACE_OPS 0
32#endif
33
380af29b
SRG
34#ifdef CONFIG_TRACING
35extern void ftrace_boot_snapshot(void);
36#else
37static inline void ftrace_boot_snapshot(void) { }
38#endif
39
34cdd18b
SRV
40struct ftrace_ops;
41struct ftrace_regs;
cbad0fb2 42struct dyn_ftrace;
9705bc70
MR
43
44#ifdef CONFIG_FUNCTION_TRACER
ccf3672d
SR
45/*
46 * If the arch's mcount caller does not support all of ftrace's
47 * features, then it must call an indirect function that
f2cc020d 48 * does. Or at least does enough to prevent any unwelcome side effects.
34cdd18b
SRV
49 *
50 * Also define the function prototype that these architectures use
51 * to call the ftrace_ops_list_func().
ccf3672d 52 */
7544256a 53#if !ARCH_SUPPORTS_FTRACE_OPS
ccf3672d 54# define FTRACE_FORCE_LIST_FUNC 1
34cdd18b 55void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
ccf3672d
SR
56#else
57# define FTRACE_FORCE_LIST_FUNC 0
34cdd18b
SRV
58void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
59 struct ftrace_ops *op, struct ftrace_regs *fregs);
ccf3672d 60#endif
cbad0fb2
MR
61extern const struct ftrace_ops ftrace_nop_ops;
62extern const struct ftrace_ops ftrace_list_ops;
63struct ftrace_ops *ftrace_find_unique_ops(struct dyn_ftrace *rec);
34cdd18b 64#endif /* CONFIG_FUNCTION_TRACER */
ccf3672d 65
5f893b26
SRRH
66/* Main tracing buffer and events set up */
67#ifdef CONFIG_TRACING
68void trace_init(void);
e725c731 69void early_trace_init(void);
5f893b26
SRRH
70#else
71static inline void trace_init(void) { }
e725c731 72static inline void early_trace_init(void) { }
5f893b26 73#endif
ccf3672d 74
de477254 75struct module;
04da85b8 76struct ftrace_hash;
013bf0da 77struct ftrace_direct_func;
04da85b8 78
aba4b5c2
SRV
79#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
80 defined(CONFIG_DYNAMIC_FTRACE)
81const char *
82ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
83 unsigned long *off, char **modname, char *sym);
84#else
85static inline const char *
86ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
87 unsigned long *off, char **modname, char *sym)
88{
89 return NULL;
90}
fc0ea795
AH
91#endif
92
93#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
94int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
95 char *type, char *name,
96 char *module_name, int *exported);
97#else
6171a031
SRV
98static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
99 char *type, char *name,
100 char *module_name, int *exported)
101{
102 return -1;
103}
aba4b5c2
SRV
104#endif
105
606576ce 106#ifdef CONFIG_FUNCTION_TRACER
3e1932ad 107
b0fc494f 108extern int ftrace_enabled;
b0fc494f 109
02a474ca
SRV
110#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
111
d19ad077
SRV
112struct ftrace_regs {
113 struct pt_regs regs;
114};
02a474ca
SRV
115#define arch_ftrace_get_regs(fregs) (&(fregs)->regs)
116
2860cd8a 117/*
0ef86097
MR
118 * ftrace_regs_set_instruction_pointer() is to be defined by the architecture
119 * if to allow setting of the instruction pointer from the ftrace_regs when
120 * HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports live kernel patching.
2860cd8a 121 */
0ef86097 122#define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0)
02a474ca 123#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
d19ad077
SRV
124
125static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
126{
127 if (!fregs)
128 return NULL;
129
02a474ca 130 return arch_ftrace_get_regs(fregs);
d19ad077
SRV
131}
132
94d095ff
MR
133/*
134 * When true, the ftrace_regs_{get,set}_*() functions may be used on fregs.
135 * Note: this can be true even when ftrace_get_regs() cannot provide a pt_regs.
136 */
137static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs)
138{
139 if (IS_ENABLED(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS))
140 return true;
141
142 return ftrace_get_regs(fregs) != NULL;
143}
144
145#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
146#define ftrace_regs_get_instruction_pointer(fregs) \
147 instruction_pointer(ftrace_get_regs(fregs))
148#define ftrace_regs_get_argument(fregs, n) \
149 regs_get_kernel_argument(ftrace_get_regs(fregs), n)
150#define ftrace_regs_get_stack_pointer(fregs) \
151 kernel_stack_pointer(ftrace_get_regs(fregs))
152#define ftrace_regs_return_value(fregs) \
153 regs_return_value(ftrace_get_regs(fregs))
154#define ftrace_regs_set_return_value(fregs, ret) \
155 regs_set_return_value(ftrace_get_regs(fregs), ret)
156#define ftrace_override_function_with_return(fregs) \
157 override_function_with_return(ftrace_get_regs(fregs))
158#define ftrace_regs_query_register_offset(name) \
159 regs_query_register_offset(name)
160#endif
161
2f5f6ad9 162typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
d19ad077 163 struct ftrace_ops *op, struct ftrace_regs *fregs);
16444a8a 164
87354059
SRRH
165ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
166
e248491a
JO
167/*
168 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
169 * set in the flags member.
a25d036d 170 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and
f8b8be8a
MH
171 * IPMODIFY are a kind of attribute flags which can be set only before
172 * registering the ftrace_ops, and can not be modified while registered.
ad61dd30 173 * Changing those attribute flags after registering ftrace_ops will
f8b8be8a 174 * cause unexpected results.
e248491a
JO
175 *
176 * ENABLED - set/unset when ftrace_ops is registered/unregistered
e248491a
JO
177 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
178 * allocated ftrace_ops which need special care
08f6fba5
SR
179 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
180 * and passed to the callback. If this flag is set, but the
181 * architecture does not support passing regs
06aeaaea 182 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
08f6fba5
SR
183 * ftrace_ops will fail to register, unless the next flag
184 * is set.
185 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
186 * handler can handle an arch that does not save regs
187 * (the handler tests if regs == NULL), then it can set
188 * this flag instead. It will not fail registering the ftrace_ops
189 * but, the regs field will be NULL if the arch does not support
190 * passing regs to the handler.
191 * Note, if this flag is set, the SAVE_REGS flag will automatically
192 * get set upon registering the ftrace_ops, if the arch supports it.
a25d036d
SRV
193 * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure
194 * that the call back needs recursion protection. If it does
195 * not set this, then the ftrace infrastructure will assume
196 * that the callback can handle recursion on its own.
395b97a3 197 * STUB - The ftrace_ops is just a place holder.
f04f24fb
MH
198 * INITIALIZED - The ftrace_ops has already been initialized (first use time
199 * register_ftrace_function() is called, it will initialized the ops)
591dffda 200 * DELETED - The ops are being deleted, do not let them be registered again.
e1effa01
SRRH
201 * ADDING - The ops is in the process of being added.
202 * REMOVING - The ops is in the process of being removed.
203 * MODIFYING - The ops is in the process of changing its filter functions.
f3bea491
SRRH
204 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
205 * The arch specific code sets this flag when it allocated a
206 * trampoline. This lets the arch know that it can update the
207 * trampoline in case the callback function changes.
208 * The ftrace_ops trampoline can be set by the ftrace users, and
209 * in such cases the arch must not modify it. Only the arch ftrace
210 * core code should set this flag.
f8b8be8a
MH
211 * IPMODIFY - The ops can modify the IP register. This can only be set with
212 * SAVE_REGS. If another ops with this flag set is already registered
213 * for any of the functions that this ops will be registered for, then
214 * this ops will fail to register or set_filter_ip.
e3eea140 215 * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
d0ba52f1 216 * RCU - Set when the ops can only be called when RCU is watching.
8c08f0d5 217 * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
7162431d
MB
218 * PERMANENT - Set when the ops is permanent and should not be affected by
219 * ftrace_enabled.
763e34e7
SRV
220 * DIRECT - Used by the direct ftrace_ops helper for direct functions
221 * (internal ftrace only, should not be used by others)
e248491a 222 */
b848914c 223enum {
b41db132
EWI
224 FTRACE_OPS_FL_ENABLED = BIT(0),
225 FTRACE_OPS_FL_DYNAMIC = BIT(1),
226 FTRACE_OPS_FL_SAVE_REGS = BIT(2),
227 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3),
a25d036d 228 FTRACE_OPS_FL_RECURSION = BIT(4),
b41db132
EWI
229 FTRACE_OPS_FL_STUB = BIT(5),
230 FTRACE_OPS_FL_INITIALIZED = BIT(6),
231 FTRACE_OPS_FL_DELETED = BIT(7),
232 FTRACE_OPS_FL_ADDING = BIT(8),
233 FTRACE_OPS_FL_REMOVING = BIT(9),
234 FTRACE_OPS_FL_MODIFYING = BIT(10),
235 FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11),
236 FTRACE_OPS_FL_IPMODIFY = BIT(12),
237 FTRACE_OPS_FL_PID = BIT(13),
238 FTRACE_OPS_FL_RCU = BIT(14),
239 FTRACE_OPS_FL_TRACE_ARRAY = BIT(15),
240 FTRACE_OPS_FL_PERMANENT = BIT(16),
241 FTRACE_OPS_FL_DIRECT = BIT(17),
b848914c
SR
242};
243
53cd885b
SL
244/*
245 * FTRACE_OPS_CMD_* commands allow the ftrace core logic to request changes
246 * to a ftrace_ops. Note, the requests may fail.
247 *
248 * ENABLE_SHARE_IPMODIFY_SELF - enable a DIRECT ops to work on the same
249 * function as an ops with IPMODIFY. Called
250 * when the DIRECT ops is being registered.
251 * This is called with both direct_mutex and
252 * ftrace_lock are locked.
253 *
254 * ENABLE_SHARE_IPMODIFY_PEER - enable a DIRECT ops to work on the same
255 * function as an ops with IPMODIFY. Called
256 * when the other ops (the one with IPMODIFY)
257 * is being registered.
258 * This is called with direct_mutex locked.
259 *
260 * DISABLE_SHARE_IPMODIFY_PEER - disable a DIRECT ops to work on the same
261 * function as an ops with IPMODIFY. Called
262 * when the other ops (the one with IPMODIFY)
263 * is being unregistered.
264 * This is called with direct_mutex locked.
265 */
266enum ftrace_ops_cmd {
267 FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF,
268 FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER,
269 FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER,
270};
271
272/*
273 * For most ftrace_ops_cmd,
274 * Returns:
275 * 0 - Success.
276 * Negative on failure. The return value is dependent on the
277 * callback.
278 */
279typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd);
280
33b7f99c
SRRH
281#ifdef CONFIG_DYNAMIC_FTRACE
282/* The hash used to know what functions callbacks trace */
283struct ftrace_ops_hash {
f86f4180
CZ
284 struct ftrace_hash __rcu *notrace_hash;
285 struct ftrace_hash __rcu *filter_hash;
33b7f99c
SRRH
286 struct mutex regex_lock;
287};
42c269c8 288
b80f0f6c 289void ftrace_free_init_mem(void);
aba4b5c2 290void ftrace_free_mem(struct module *mod, void *start, void *end);
42c269c8 291#else
380af29b
SRG
292static inline void ftrace_free_init_mem(void)
293{
294 ftrace_boot_snapshot();
295}
aba4b5c2 296static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
33b7f99c
SRRH
297#endif
298
b7e00a6c 299/*
ba27f2bc
SRRH
300 * Note, ftrace_ops can be referenced outside of RCU protection, unless
301 * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
302 * core data, the unregistering of it will perform a scheduling on all CPUs
303 * to make sure that there are no more users. Depending on the load of the
304 * system that may take a bit of time.
b7e00a6c
SRRH
305 *
306 * Any private data added must also take care not to be freed and if private
307 * data is added to a ftrace_ops that is in core code, the user of the
308 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
309 */
16444a8a 310struct ftrace_ops {
f45948e8 311 ftrace_func_t func;
f86f4180 312 struct ftrace_ops __rcu *next;
b848914c 313 unsigned long flags;
b7e00a6c 314 void *private;
e3eea140 315 ftrace_func_t saved_func;
f45948e8 316#ifdef CONFIG_DYNAMIC_FTRACE
33b7f99c
SRRH
317 struct ftrace_ops_hash local_hash;
318 struct ftrace_ops_hash *func_hash;
fef5aeee 319 struct ftrace_ops_hash old_hash;
79922b80 320 unsigned long trampoline;
aec0be2d 321 unsigned long trampoline_size;
fc0ea795 322 struct list_head list;
53cd885b 323 ftrace_ops_func_t ops_func;
f45948e8 324#endif
16444a8a
ACM
325};
326
59566b0b
SRV
327extern struct ftrace_ops __rcu *ftrace_ops_list;
328extern struct ftrace_ops ftrace_list_end;
329
330/*
40dc4a42 331 * Traverse the ftrace_ops_list, invoking all entries. The reason that we
59566b0b
SRV
332 * can use rcu_dereference_raw_check() is that elements removed from this list
333 * are simply leaked, so there is no need to interact with a grace-period
334 * mechanism. The rcu_dereference_raw_check() calls are needed to handle
40dc4a42 335 * concurrent insertions into the ftrace_ops_list.
59566b0b
SRV
336 *
337 * Silly Alpha and silly pointer-speculation compiler optimizations!
338 */
339#define do_for_each_ftrace_op(op, list) \
340 op = rcu_dereference_raw_check(list); \
341 do
342
343/*
344 * Optimized for just a single item in the list (as that is the normal case).
345 */
346#define while_for_each_ftrace_op(op) \
347 while (likely(op = rcu_dereference_raw_check((op)->next)) && \
348 unlikely((op) != &ftrace_list_end))
349
e7d3737e
FW
350/*
351 * Type of the current tracing.
352 */
353enum ftrace_tracing_type_t {
354 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
355 FTRACE_TYPE_RETURN, /* Hook the return of the function */
356};
357
358/* Current tracing type, default is FTRACE_TYPE_ENTER */
359extern enum ftrace_tracing_type_t ftrace_tracing_type;
360
16444a8a
ACM
361/*
362 * The ftrace_ops must be a static and should also
363 * be read_mostly. These functions do modify read_mostly variables
364 * so use them sparely. Never free an ftrace_op or modify the
365 * next pointer after it has been registered. Even after unregistering
366 * it, the next pointer may still be used internally.
367 */
368int register_ftrace_function(struct ftrace_ops *ops);
369int unregister_ftrace_function(struct ftrace_ops *ops);
16444a8a 370
a1e2e31d 371extern void ftrace_stub(unsigned long a0, unsigned long a1,
d19ad077 372 struct ftrace_ops *op, struct ftrace_regs *fregs);
16444a8a 373
bed0d9a5
JO
374
375int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs);
606576ce 376#else /* !CONFIG_FUNCTION_TRACER */
4dbf6bc2
SR
377/*
378 * (un)register_ftrace_function must be a macro since the ops parameter
379 * must not be evaluated.
380 */
381#define register_ftrace_function(ops) ({ 0; })
382#define unregister_ftrace_function(ops) ({ 0; })
81adbdc0 383static inline void ftrace_kill(void) { }
b80f0f6c 384static inline void ftrace_free_init_mem(void) { }
aba4b5c2 385static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
bed0d9a5
JO
386static inline int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
387{
388 return -EOPNOTSUPP;
389}
606576ce 390#endif /* CONFIG_FUNCTION_TRACER */
352ad25a 391
ea806eb3
SRV
392struct ftrace_func_entry {
393 struct hlist_node hlist;
394 unsigned long ip;
395 unsigned long direct; /* for direct lookup only */
396};
397
763e34e7 398#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
a3ad1a7e 399extern int ftrace_direct_func_count;
ff205766 400unsigned long ftrace_find_rec_direct(unsigned long ip);
da8bdfbd
FR
401int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
402int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
403 bool free_filters);
404int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
405int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr);
ccf5a89e 406
763e34e7 407#else
f64dd462 408struct ftrace_ops;
a3ad1a7e 409# define ftrace_direct_func_count 0
ff205766
AS
410static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
411{
412 return 0;
413}
da8bdfbd 414static inline int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
f64dd462
JO
415{
416 return -ENODEV;
417}
da8bdfbd
FR
418static inline int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
419 bool free_filters)
f64dd462
JO
420{
421 return -ENODEV;
422}
da8bdfbd 423static inline int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
ccf5a89e
JO
424{
425 return -ENODEV;
426}
da8bdfbd 427static inline int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr)
f96f644a
SL
428{
429 return -ENODEV;
430}
763e34e7 431
763e34e7
SRV
432/*
433 * This must be implemented by the architecture.
434 * It is the way the ftrace direct_ops helper, when called
435 * via ftrace (because there's other callbacks besides the
436 * direct call), can inform the architecture's trampoline that this
437 * routine has a direct caller, and what the caller is.
562955fe
SRV
438 *
439 * For example, in x86, it returns the direct caller
440 * callback function via the regs->orig_ax parameter.
441 * Then in the ftrace trampoline, if this is set, it makes
442 * the return from the trampoline jump to the direct caller
443 * instead of going back to the function it just traced.
763e34e7 444 */
9705bc70 445static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
763e34e7 446 unsigned long addr) { }
9705bc70 447#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
763e34e7 448
f38f1d2a 449#ifdef CONFIG_STACK_TRACER
bb99d8cc 450
f38f1d2a 451extern int stack_tracer_enabled;
3d9a8072 452
7ff0d449
CH
453int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
454 size_t *lenp, loff_t *ppos);
5367278c 455
8aaf1ee7
SRV
456/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
457DECLARE_PER_CPU(int, disable_stack_tracer);
458
459/**
460 * stack_tracer_disable - temporarily disable the stack tracer
461 *
462 * There's a few locations (namely in RCU) where stack tracing
463 * cannot be executed. This function is used to disable stack
464 * tracing during those critical sections.
465 *
466 * This function must be called with preemption or interrupts
467 * disabled and stack_tracer_enable() must be called shortly after
468 * while preemption or interrupts are still disabled.
469 */
470static inline void stack_tracer_disable(void)
471{
f2cc020d 472 /* Preemption or interrupts must be disabled */
60361e12 473 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
8aaf1ee7
SRV
474 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
475 this_cpu_inc(disable_stack_tracer);
476}
477
478/**
479 * stack_tracer_enable - re-enable the stack tracer
480 *
481 * After stack_tracer_disable() is called, stack_tracer_enable()
482 * must be called shortly afterward.
483 */
484static inline void stack_tracer_enable(void)
485{
60361e12 486 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
8aaf1ee7
SRV
487 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
488 this_cpu_dec(disable_stack_tracer);
489}
5367278c
SRV
490#else
491static inline void stack_tracer_disable(void) { }
492static inline void stack_tracer_enable(void) { }
f38f1d2a
SR
493#endif
494
3d083395 495#ifdef CONFIG_DYNAMIC_FTRACE
31e88909 496
3a2bfec0
L
497void ftrace_arch_code_modify_prepare(void);
498void ftrace_arch_code_modify_post_process(void);
000ab691 499
02a392a0
SRRH
500enum ftrace_bug_type {
501 FTRACE_BUG_UNKNOWN,
502 FTRACE_BUG_INIT,
503 FTRACE_BUG_NOP,
504 FTRACE_BUG_CALL,
505 FTRACE_BUG_UPDATE,
506};
507extern enum ftrace_bug_type ftrace_bug_type;
508
b05086c7
SRRH
509/*
510 * Archs can set this to point to a variable that holds the value that was
511 * expected at the call site before calling ftrace_bug().
512 */
513extern const void *ftrace_expected;
514
4fd3279b 515void ftrace_bug(int err, struct dyn_ftrace *rec);
c88fd863 516
809dcf29
SR
517struct seq_file;
518
d88471cb 519extern int ftrace_text_reserved(const void *start, const void *end);
2cfa1978 520
6be7fa3c
SRV
521struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
522
aec0be2d
SRRH
523bool is_ftrace_trampoline(unsigned long addr);
524
08f6fba5
SR
525/*
526 * The dyn_ftrace record's flags field is split into two parts.
527 * the first part which is '0-FTRACE_REF_MAX' is a counter of
528 * the number of callbacks that have registered the function that
529 * the dyn_ftrace descriptor represents.
530 *
531 * The second part is a mask:
532 * ENABLED - the function is being traced
533 * REGS - the record wants the function to save regs
534 * REGS_EN - the function is set up to save regs.
f8b8be8a 535 * IPMODIFY - the record allows for the IP address to be changed.
b7ffffbb 536 * DISABLED - the record is not ready to be touched yet
763e34e7 537 * DIRECT - there is a direct function to call
cbad0fb2
MR
538 * CALL_OPS - the record can use callsite-specific ops
539 * CALL_OPS_EN - the function is set up to use callsite-specific ops
08f6fba5
SR
540 *
541 * When a new ftrace_ops is registered and wants a function to save
02dae28f 542 * pt_regs, the rec->flags REGS is set. When the function has been
08f6fba5
SR
543 * set up to save regs, the REG_EN flag is set. Once a function
544 * starts saving regs it will do so until all ftrace_ops are removed
545 * from tracing that function.
546 */
3c1720f0 547enum {
79922b80 548 FTRACE_FL_ENABLED = (1UL << 31),
08f6fba5 549 FTRACE_FL_REGS = (1UL << 30),
79922b80
SRRH
550 FTRACE_FL_REGS_EN = (1UL << 29),
551 FTRACE_FL_TRAMP = (1UL << 28),
552 FTRACE_FL_TRAMP_EN = (1UL << 27),
f8b8be8a 553 FTRACE_FL_IPMODIFY = (1UL << 26),
b7ffffbb 554 FTRACE_FL_DISABLED = (1UL << 25),
763e34e7
SRV
555 FTRACE_FL_DIRECT = (1UL << 24),
556 FTRACE_FL_DIRECT_EN = (1UL << 23),
cbad0fb2
MR
557 FTRACE_FL_CALL_OPS = (1UL << 22),
558 FTRACE_FL_CALL_OPS_EN = (1UL << 21),
3c1720f0
SR
559};
560
cbad0fb2 561#define FTRACE_REF_MAX_SHIFT 21
cf2cb0b2 562#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
ed926f9b 563
02dae28f 564#define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX)
0376bde1 565
3d083395 566struct dyn_ftrace {
a762782d 567 unsigned long ip; /* address of mcount call-site */
85ae32ae 568 unsigned long flags;
a762782d 569 struct dyn_arch_ftrace arch;
3d083395
SR
570};
571
647664ea
MH
572int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
573 int remove, int reset);
4f554e95
JO
574int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
575 unsigned int cnt, int remove, int reset);
ac483c44 576int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
936e074b 577 int len, int reset);
ac483c44 578int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
936e074b
SR
579 int len, int reset);
580void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
581void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
5500fa51 582void ftrace_free_filter(struct ftrace_ops *ops);
d032ae89 583void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
e1c08bdd 584
c88fd863
SR
585enum {
586 FTRACE_UPDATE_CALLS = (1 << 0),
587 FTRACE_DISABLE_CALLS = (1 << 1),
588 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
589 FTRACE_START_FUNC_RET = (1 << 3),
590 FTRACE_STOP_FUNC_RET = (1 << 4),
a0572f68 591 FTRACE_MAY_SLEEP = (1 << 5),
c88fd863
SR
592};
593
08f6fba5
SR
594/*
595 * The FTRACE_UPDATE_* enum is used to pass information back
596 * from the ftrace_update_record() and ftrace_test_record()
597 * functions. These are called by the code update routines
598 * to find out what is to be done for a given function.
599 *
600 * IGNORE - The function is already what we want it to be
601 * MAKE_CALL - Start tracing the function
602 * MODIFY_CALL - Stop saving regs for the function
08f6fba5
SR
603 * MAKE_NOP - Stop tracing the function
604 */
c88fd863
SR
605enum {
606 FTRACE_UPDATE_IGNORE,
607 FTRACE_UPDATE_MAKE_CALL,
08f6fba5 608 FTRACE_UPDATE_MODIFY_CALL,
c88fd863
SR
609 FTRACE_UPDATE_MAKE_NOP,
610};
611
fc13cb0c
SR
612enum {
613 FTRACE_ITER_FILTER = (1 << 0),
614 FTRACE_ITER_NOTRACE = (1 << 1),
615 FTRACE_ITER_PRINTALL = (1 << 2),
eee8ded1
SRV
616 FTRACE_ITER_DO_PROBES = (1 << 3),
617 FTRACE_ITER_PROBE = (1 << 4),
5985ea8b
SRV
618 FTRACE_ITER_MOD = (1 << 5),
619 FTRACE_ITER_ENABLED = (1 << 6),
fc13cb0c
SR
620};
621
c88fd863 622void arch_ftrace_update_code(int command);
89f579ce
YW
623void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
624void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
625void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
c88fd863
SR
626
627struct ftrace_rec_iter;
628
629struct ftrace_rec_iter *ftrace_rec_iter_start(void);
630struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
631struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
632
08d636b6
SR
633#define for_ftrace_rec_iter(iter) \
634 for (iter = ftrace_rec_iter_start(); \
635 iter; \
636 iter = ftrace_rec_iter_next(iter))
637
638
7375dca1
SRV
639int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
640int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
c88fd863 641void ftrace_run_stop_machine(int command);
f0cf973a 642unsigned long ftrace_location(unsigned long ip);
04cf31a7 643unsigned long ftrace_location_range(unsigned long start, unsigned long end);
7413af1f
SRRH
644unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
645unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
c88fd863
SR
646
647extern ftrace_func_t ftrace_trace_function;
648
fc13cb0c
SR
649int ftrace_regex_open(struct ftrace_ops *ops, int flag,
650 struct inode *inode, struct file *file);
651ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
652 size_t cnt, loff_t *ppos);
653ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
654 size_t cnt, loff_t *ppos);
fc13cb0c
SR
655int ftrace_regex_release(struct inode *inode, struct file *file);
656
2a85a37f
SR
657void __init
658ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
659
3d083395 660/* defined in arch */
3c1720f0 661extern int ftrace_ip_converted(unsigned long ip);
3a36cb11 662extern int ftrace_dyn_arch_init(void);
e4f5d544 663extern void ftrace_replace_code(int enable);
d61f82d0
SR
664extern int ftrace_update_ftrace_func(ftrace_func_t func);
665extern void ftrace_caller(void);
08f6fba5 666extern void ftrace_regs_caller(void);
d61f82d0 667extern void ftrace_call(void);
08f6fba5 668extern void ftrace_regs_call(void);
d61f82d0 669extern void mcount_call(void);
f0001207 670
8ed3e2cf
SR
671void ftrace_modify_all_code(int command);
672
f0001207
SL
673#ifndef FTRACE_ADDR
674#define FTRACE_ADDR ((unsigned long)ftrace_caller)
675#endif
08f6fba5 676
79922b80
SRRH
677#ifndef FTRACE_GRAPH_ADDR
678#define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
679#endif
680
08f6fba5 681#ifndef FTRACE_REGS_ADDR
06aeaaea 682#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
08f6fba5
SR
683# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
684#else
685# define FTRACE_REGS_ADDR FTRACE_ADDR
686#endif
687#endif
688
646d7043
SRRH
689/*
690 * If an arch would like functions that are only traced
691 * by the function graph tracer to jump directly to its own
692 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
693 * to be that address to jump to.
694 */
695#ifndef FTRACE_GRAPH_TRAMP_ADDR
696#define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
697#endif
698
fb52607a
FW
699#ifdef CONFIG_FUNCTION_GRAPH_TRACER
700extern void ftrace_graph_caller(void);
5a45cfe1
SR
701extern int ftrace_enable_ftrace_graph_caller(void);
702extern int ftrace_disable_ftrace_graph_caller(void);
703#else
704static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
705static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
e7d3737e 706#endif
ad90c0e3 707
31e88909 708/**
57794a9d 709 * ftrace_make_nop - convert code into nop
31e88909 710 * @mod: module structure if called by module load initialization
fbf6c73c 711 * @rec: the call site record (e.g. mcount/fentry)
31e88909
SR
712 * @addr: the address that the call site should be calling
713 *
714 * This is a very sensitive operation and great care needs
715 * to be taken by the arch. The operation should carefully
716 * read the location, check to see if what is read is indeed
717 * what we expect it to be, and then on success of the compare,
718 * it should write to the location.
719 *
720 * The code segment at @rec->ip should be a caller to @addr
721 *
722 * Return must be:
723 * 0 on success
724 * -EFAULT on error reading the location
725 * -EINVAL on a failed compare of the contents
726 * -EPERM on error writing to the location
727 * Any other value will be considered a failure.
728 */
729extern int ftrace_make_nop(struct module *mod,
730 struct dyn_ftrace *rec, unsigned long addr);
a26a2a27 731
67ccddf8
IL
732/**
733 * ftrace_need_init_nop - return whether nop call sites should be initialized
734 *
735 * Normally the compiler's -mnop-mcount generates suitable nops, so we don't
736 * need to call ftrace_init_nop() if the code is built with that flag.
737 * Architectures where this is not always the case may define their own
738 * condition.
739 *
740 * Return must be:
741 * 0 if ftrace_init_nop() should be called
742 * Nonzero if ftrace_init_nop() should not be called
743 */
744
745#ifndef ftrace_need_init_nop
746#define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT))
747#endif
fbf6c73c
MR
748
749/**
750 * ftrace_init_nop - initialize a nop call site
751 * @mod: module structure if called by module load initialization
752 * @rec: the call site record (e.g. mcount/fentry)
753 *
754 * This is a very sensitive operation and great care needs
755 * to be taken by the arch. The operation should carefully
756 * read the location, check to see if what is read is indeed
757 * what we expect it to be, and then on success of the compare,
758 * it should write to the location.
759 *
760 * The code segment at @rec->ip should contain the contents created by
761 * the compiler
762 *
763 * Return must be:
764 * 0 on success
765 * -EFAULT on error reading the location
766 * -EINVAL on a failed compare of the contents
767 * -EPERM on error writing to the location
768 * Any other value will be considered a failure.
769 */
770#ifndef ftrace_init_nop
771static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
772{
773 return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
774}
775#endif
776
593eb8a2 777/**
31e88909 778 * ftrace_make_call - convert a nop call site into a call to addr
fbf6c73c 779 * @rec: the call site record (e.g. mcount/fentry)
31e88909 780 * @addr: the address that the call site should call
593eb8a2
SR
781 *
782 * This is a very sensitive operation and great care needs
783 * to be taken by the arch. The operation should carefully
784 * read the location, check to see if what is read is indeed
785 * what we expect it to be, and then on success of the compare,
786 * it should write to the location.
787 *
31e88909
SR
788 * The code segment at @rec->ip should be a nop
789 *
593eb8a2
SR
790 * Return must be:
791 * 0 on success
792 * -EFAULT on error reading the location
793 * -EINVAL on a failed compare of the contents
794 * -EPERM on error writing to the location
795 * Any other value will be considered a failure.
796 */
31e88909
SR
797extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
798
cbad0fb2
MR
799#if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) || \
800 defined(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)
08f6fba5
SR
801/**
802 * ftrace_modify_call - convert from one addr to another (no nop)
fbf6c73c 803 * @rec: the call site record (e.g. mcount/fentry)
08f6fba5
SR
804 * @old_addr: the address expected to be currently called to
805 * @addr: the address to change to
806 *
807 * This is a very sensitive operation and great care needs
808 * to be taken by the arch. The operation should carefully
809 * read the location, check to see if what is read is indeed
810 * what we expect it to be, and then on success of the compare,
811 * it should write to the location.
812 *
cbad0fb2
MR
813 * When using call ops, this is called when the associated ops change, even
814 * when (addr == old_addr).
815 *
08f6fba5
SR
816 * The code segment at @rec->ip should be a caller to @old_addr
817 *
818 * Return must be:
819 * 0 on success
820 * -EFAULT on error reading the location
821 * -EINVAL on a failed compare of the contents
822 * -EPERM on error writing to the location
823 * Any other value will be considered a failure.
824 */
825extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
826 unsigned long addr);
827#else
828/* Should never be called */
829static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
830 unsigned long addr)
831{
832 return -EINVAL;
833}
834#endif
835
31e88909
SR
836/* May be defined in arch */
837extern int ftrace_arch_read_dyn_info(char *buf, int size);
593eb8a2 838
ecea656d 839extern int skip_trace(unsigned long ip);
a949ae56 840extern void ftrace_module_init(struct module *mod);
7dcd182b 841extern void ftrace_module_enable(struct module *mod);
049fb9bd 842extern void ftrace_release_mod(struct module *mod);
ecea656d 843
c0719e5a
SR
844extern void ftrace_disable_daemon(void);
845extern void ftrace_enable_daemon(void);
4dc93676 846#else /* CONFIG_DYNAMIC_FTRACE */
4dbf6bc2 847static inline int skip_trace(unsigned long ip) { return 0; }
4dbf6bc2
SR
848static inline void ftrace_disable_daemon(void) { }
849static inline void ftrace_enable_daemon(void) { }
7dcd182b
JY
850static inline void ftrace_module_init(struct module *mod) { }
851static inline void ftrace_module_enable(struct module *mod) { }
852static inline void ftrace_release_mod(struct module *mod) { }
d88471cb 853static inline int ftrace_text_reserved(const void *start, const void *end)
2cfa1978
MH
854{
855 return 0;
856}
4dc93676
SR
857static inline unsigned long ftrace_location(unsigned long ip)
858{
859 return 0;
860}
fc13cb0c
SR
861
862/*
863 * Again users of functions that have ftrace_ops may not
864 * have them defined when ftrace is not enabled, but these
865 * functions may still be called. Use a macro instead of inline.
866 */
867#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
96de37b6 868#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
647664ea 869#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
4f554e95 870#define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; })
5500fa51
JO
871#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
872#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
873#define ftrace_free_filter(ops) do { } while (0)
d032ae89 874#define ftrace_ops_set_global_filter(ops) do { } while (0)
fc13cb0c
SR
875
876static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
877 size_t cnt, loff_t *ppos) { return -ENODEV; }
878static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
879 size_t cnt, loff_t *ppos) { return -ENODEV; }
fc13cb0c
SR
880static inline int
881ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
aec0be2d
SRRH
882
883static inline bool is_ftrace_trampoline(unsigned long addr)
884{
885 return false;
886}
ecea656d 887#endif /* CONFIG_DYNAMIC_FTRACE */
352ad25a 888
0c0593b4
SRV
889#ifdef CONFIG_FUNCTION_GRAPH_TRACER
890#ifndef ftrace_graph_func
891#define ftrace_graph_func ftrace_stub
892#define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB
893#else
894#define FTRACE_OPS_GRAPH_STUB 0
895#endif
896#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
897
aeaee8a2
IM
898/* totally disable ftrace - can not re-enable after this */
899void ftrace_kill(void);
900
f43fdad8
IM
901static inline void tracer_disable(void)
902{
606576ce 903#ifdef CONFIG_FUNCTION_TRACER
f43fdad8
IM
904 ftrace_enabled = 0;
905#endif
906}
907
37002735
HY
908/*
909 * Ftrace disable/restore without lock. Some synchronization mechanism
9bdeb7b5 910 * must be used to prevent ftrace_enabled to be changed between
37002735
HY
911 * disable/restore.
912 */
9bdeb7b5
HY
913static inline int __ftrace_enabled_save(void)
914{
606576ce 915#ifdef CONFIG_FUNCTION_TRACER
9bdeb7b5
HY
916 int saved_ftrace_enabled = ftrace_enabled;
917 ftrace_enabled = 0;
918 return saved_ftrace_enabled;
919#else
920 return 0;
921#endif
922}
923
924static inline void __ftrace_enabled_restore(int enabled)
925{
606576ce 926#ifdef CONFIG_FUNCTION_TRACER
9bdeb7b5
HY
927 ftrace_enabled = enabled;
928#endif
929}
930
eed542d6
AT
931/* All archs should have this, but we define it for consistency */
932#ifndef ftrace_return_address0
933# define ftrace_return_address0 __builtin_return_address(0)
934#endif
935
936/* Archs may use other ways for ADDR1 and beyond */
937#ifndef ftrace_return_address
c79a61f5 938# ifdef CONFIG_FRAME_POINTER
eed542d6 939# define ftrace_return_address(n) __builtin_return_address(n)
c79a61f5 940# else
eed542d6 941# define ftrace_return_address(n) 0UL
c79a61f5 942# endif
eed542d6
AT
943#endif
944
945#define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
946#define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
947#define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
948#define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
949#define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
950#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
951#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
352ad25a 952
f904f582
SAS
953static inline unsigned long get_lock_parent_ip(void)
954{
955 unsigned long addr = CALLER_ADDR0;
956
957 if (!in_lock_functions(addr))
958 return addr;
959 addr = CALLER_ADDR1;
960 if (!in_lock_functions(addr))
961 return addr;
962 return CALLER_ADDR2;
963}
964
c3bc8fd6 965#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
489f1396
IM
966 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
967 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
6cd8a4bb 968#else
b02ee9a3
MB
969/*
970 * Use defines instead of static inlines because some arches will make code out
971 * of the CALLER_ADDR, when we really want these to be a real nop.
972 */
973# define trace_preempt_on(a0, a1) do { } while (0)
974# define trace_preempt_off(a0, a1) do { } while (0)
6cd8a4bb
SR
975#endif
976
68bf21aa
SR
977#ifdef CONFIG_FTRACE_MCOUNT_RECORD
978extern void ftrace_init(void);
a1326b17
MR
979#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
980#define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
981#else
982#define FTRACE_CALLSITE_SECTION "__mcount_loc"
983#endif
68bf21aa
SR
984#else
985static inline void ftrace_init(void) { }
986#endif
987
287b6e68
FW
988/*
989 * Structure that defines an entry function trace.
a4a551b8
NK
990 * It's already packed but the attribute "packed" is needed
991 * to remove extra padding at the end.
287b6e68
FW
992 */
993struct ftrace_graph_ent {
994 unsigned long func; /* Current function */
995 int depth;
a4a551b8 996} __packed;
dd0e545f 997
caf4b323
FW
998/*
999 * Structure that defines a return function trace.
a4a551b8
NK
1000 * It's already packed but the attribute "packed" is needed
1001 * to remove extra padding at the end.
caf4b323 1002 */
fb52607a 1003struct ftrace_graph_ret {
caf4b323 1004 unsigned long func; /* Current function */
60602cb5 1005 int depth;
0231022c 1006 /* Number of functions that overran the depth limit for current task */
60602cb5 1007 unsigned int overrun;
a4a551b8
NK
1008 unsigned long long calltime;
1009 unsigned long long rettime;
a4a551b8 1010} __packed;
caf4b323 1011
62b915f1
JO
1012/* Type of the callback handlers for tracing function graph*/
1013typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
1014typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
1015
e8025bab
SRV
1016extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
1017
fb52607a 1018#ifdef CONFIG_FUNCTION_GRAPH_TRACER
8b96f011 1019
688f7089
SRV
1020struct fgraph_ops {
1021 trace_func_graph_ent_t entryfunc;
1022 trace_func_graph_ret_t retfunc;
1023};
1024
712406a6
SR
1025/*
1026 * Stack of return addresses for functions
1027 * of a thread.
1028 * Used in struct thread_info
1029 */
1030struct ftrace_ret_stack {
1031 unsigned long ret;
1032 unsigned long func;
1033 unsigned long long calltime;
8861dd30 1034#ifdef CONFIG_FUNCTION_PROFILER
a2a16d6a 1035 unsigned long long subtime;
8861dd30 1036#endif
daa460a8 1037#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
71e308a2 1038 unsigned long fp;
daa460a8 1039#endif
9a7c348b
JP
1040#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
1041 unsigned long *retp;
1042#endif
712406a6
SR
1043};
1044
1045/*
1046 * Primary handler of a function return.
1047 * It relays on ftrace_return_to_handler.
1048 * Defined in entry_32/64.S
1049 */
1050extern void return_to_handler(void);
1051
1052extern int
8114865f
SRV
1053function_graph_enter(unsigned long ret, unsigned long func,
1054 unsigned long frame_pointer, unsigned long *retp);
712406a6 1055
b0e21a61
SRV
1056struct ftrace_ret_stack *
1057ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
1058
223918e3
JP
1059unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
1060 unsigned long ret, unsigned long *retp);
1061
8b96f011
FW
1062/*
1063 * Sometimes we don't want to trace a function with the function
1064 * graph tracer but we want them to keep traced by the usual function
1065 * tracer if the function graph tracer is not configured.
1066 */
1067#define __notrace_funcgraph notrace
1068
f201ae23
FW
1069#define FTRACE_RETFUNC_DEPTH 50
1070#define FTRACE_RETSTACK_ALLOC_SIZE 32
688f7089
SRV
1071
1072extern int register_ftrace_graph(struct fgraph_ops *ops);
1073extern void unregister_ftrace_graph(struct fgraph_ops *ops);
287b6e68 1074
18bfee32
CL
1075/**
1076 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
1077 *
1078 * ftrace_graph_stop() is called when a severe error is detected in
1079 * the function graph tracing. This function is called by the critical
1080 * paths of function graph to keep those paths from doing any more harm.
1081 */
1082DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph);
1083
1084static inline bool ftrace_graph_is_dead(void)
1085{
1086 return static_branch_unlikely(&kill_ftrace_graph);
1087}
1088
14a866c5
SR
1089extern void ftrace_graph_stop(void);
1090
287b6e68
FW
1091/* The current handlers in use */
1092extern trace_func_graph_ret_t ftrace_graph_return;
1093extern trace_func_graph_ent_t ftrace_graph_entry;
caf4b323 1094
fb52607a
FW
1095extern void ftrace_graph_init_task(struct task_struct *t);
1096extern void ftrace_graph_exit_task(struct task_struct *t);
868baf07 1097extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
21a8c466 1098
380c4b14
FW
1099static inline void pause_graph_tracing(void)
1100{
1101 atomic_inc(&current->tracing_graph_pause);
1102}
1103
1104static inline void unpause_graph_tracing(void)
1105{
1106 atomic_dec(&current->tracing_graph_pause);
1107}
5ac9f622 1108#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
8b96f011
FW
1109
1110#define __notrace_funcgraph
1111
fb52607a
FW
1112static inline void ftrace_graph_init_task(struct task_struct *t) { }
1113static inline void ftrace_graph_exit_task(struct task_struct *t) { }
868baf07 1114static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
21a8c466 1115
688f7089
SRV
1116/* Define as macros as fgraph_ops may not be defined */
1117#define register_ftrace_graph(ops) ({ -1; })
1118#define unregister_ftrace_graph(ops) do { } while (0)
380c4b14 1119
223918e3
JP
1120static inline unsigned long
1121ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
1122 unsigned long *retp)
1123{
1124 return ret;
1125}
1126
380c4b14
FW
1127static inline void pause_graph_tracing(void) { }
1128static inline void unpause_graph_tracing(void) { }
5ac9f622 1129#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
caf4b323 1130
ea4e2bc4 1131#ifdef CONFIG_TRACING
cecbca96
FW
1132enum ftrace_dump_mode;
1133
1134extern enum ftrace_dump_mode ftrace_dump_on_oops;
0daa2302 1135extern int tracepoint_printk;
526211bc 1136
de7edd31
SRRH
1137extern void disable_trace_on_warning(void);
1138extern int __disable_trace_on_warning;
1139
42391745 1140int tracepoint_printk_sysctl(struct ctl_table *table, int write,
32927393 1141 void *buffer, size_t *lenp, loff_t *ppos);
42391745 1142
de7edd31
SRRH
1143#else /* CONFIG_TRACING */
1144static inline void disable_trace_on_warning(void) { }
ea4e2bc4
SR
1145#endif /* CONFIG_TRACING */
1146
e7b8e675
MF
1147#ifdef CONFIG_FTRACE_SYSCALLS
1148
1149unsigned long arch_syscall_addr(int nr);
1150
1151#endif /* CONFIG_FTRACE_SYSCALLS */
1152
16444a8a 1153#endif /* _LINUX_FTRACE_H */