Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
9849ed4d MF |
2 | /* |
3 | * Ftrace header. For implementation details beyond the random comments | |
5fb94e9c | 4 | * scattered below, see: Documentation/trace/ftrace-design.rst |
9849ed4d MF |
5 | */ |
6 | ||
16444a8a ACM |
7 | #ifndef _LINUX_FTRACE_H |
8 | #define _LINUX_FTRACE_H | |
9 | ||
0264c8c9 | 10 | #include <linux/trace_recursion.h> |
0012693a | 11 | #include <linux/trace_clock.h> |
5601020f | 12 | #include <linux/kallsyms.h> |
0012693a | 13 | #include <linux/linkage.h> |
ea4e2bc4 | 14 | #include <linux/bitops.h> |
a1e2e31d | 15 | #include <linux/ptrace.h> |
0012693a | 16 | #include <linux/ktime.h> |
21a8c466 | 17 | #include <linux/sched.h> |
0012693a FW |
18 | #include <linux/types.h> |
19 | #include <linux/init.h> | |
20 | #include <linux/fs.h> | |
16444a8a | 21 | |
c79a61f5 UKK |
22 | #include <asm/ftrace.h> |
23 | ||
2f5f6ad9 SR |
24 | /* |
25 | * If the arch supports passing the variable contents of | |
26 | * function_trace_op as the third parameter back from the | |
27 | * mcount call, then the arch should define this as 1. | |
28 | */ | |
29 | #ifndef ARCH_SUPPORTS_FTRACE_OPS | |
30 | #define ARCH_SUPPORTS_FTRACE_OPS 0 | |
31 | #endif | |
32 | ||
ccf3672d SR |
33 | /* |
34 | * If the arch's mcount caller does not support all of ftrace's | |
35 | * features, then it must call an indirect function that | |
36 | * does. Or at least does enough to prevent any unwelcomed side effects. | |
37 | */ | |
7544256a | 38 | #if !ARCH_SUPPORTS_FTRACE_OPS |
ccf3672d SR |
39 | # define FTRACE_FORCE_LIST_FUNC 1 |
40 | #else | |
41 | # define FTRACE_FORCE_LIST_FUNC 0 | |
42 | #endif | |
43 | ||
5f893b26 SRRH |
44 | /* Main tracing buffer and events set up */ |
45 | #ifdef CONFIG_TRACING | |
46 | void trace_init(void); | |
e725c731 | 47 | void early_trace_init(void); |
5f893b26 SRRH |
48 | #else |
49 | static inline void trace_init(void) { } | |
e725c731 | 50 | static inline void early_trace_init(void) { } |
5f893b26 | 51 | #endif |
ccf3672d | 52 | |
de477254 | 53 | struct module; |
04da85b8 | 54 | struct ftrace_hash; |
013bf0da | 55 | struct ftrace_direct_func; |
04da85b8 | 56 | |
aba4b5c2 SRV |
57 | #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \ |
58 | defined(CONFIG_DYNAMIC_FTRACE) | |
59 | const char * | |
60 | ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, | |
61 | unsigned long *off, char **modname, char *sym); | |
62 | #else | |
63 | static inline const char * | |
64 | ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, | |
65 | unsigned long *off, char **modname, char *sym) | |
66 | { | |
67 | return NULL; | |
68 | } | |
fc0ea795 AH |
69 | #endif |
70 | ||
71 | #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) | |
72 | int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, | |
73 | char *type, char *name, | |
74 | char *module_name, int *exported); | |
75 | #else | |
6171a031 SRV |
76 | static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, |
77 | char *type, char *name, | |
78 | char *module_name, int *exported) | |
79 | { | |
80 | return -1; | |
81 | } | |
aba4b5c2 SRV |
82 | #endif |
83 | ||
606576ce | 84 | #ifdef CONFIG_FUNCTION_TRACER |
3e1932ad | 85 | |
b0fc494f SR |
86 | extern int ftrace_enabled; |
87 | extern int | |
88 | ftrace_enable_sysctl(struct ctl_table *table, int write, | |
54fa9ba5 | 89 | void *buffer, size_t *lenp, loff_t *ppos); |
b0fc494f | 90 | |
2f5f6ad9 SR |
91 | struct ftrace_ops; |
92 | ||
02a474ca SRV |
93 | #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS |
94 | ||
d19ad077 SRV |
95 | struct ftrace_regs { |
96 | struct pt_regs regs; | |
97 | }; | |
02a474ca SRV |
98 | #define arch_ftrace_get_regs(fregs) (&(fregs)->regs) |
99 | ||
2860cd8a SRV |
100 | /* |
101 | * ftrace_instruction_pointer_set() is to be defined by the architecture | |
102 | * if to allow setting of the instruction pointer from the ftrace_regs | |
103 | * when HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports | |
104 | * live kernel patching. | |
105 | */ | |
106 | #define ftrace_instruction_pointer_set(fregs, ip) do { } while (0) | |
02a474ca | 107 | #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ |
d19ad077 SRV |
108 | |
109 | static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs) | |
110 | { | |
111 | if (!fregs) | |
112 | return NULL; | |
113 | ||
02a474ca | 114 | return arch_ftrace_get_regs(fregs); |
d19ad077 SRV |
115 | } |
116 | ||
2f5f6ad9 | 117 | typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, |
d19ad077 | 118 | struct ftrace_ops *op, struct ftrace_regs *fregs); |
16444a8a | 119 | |
87354059 SRRH |
120 | ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); |
121 | ||
e248491a JO |
122 | /* |
123 | * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are | |
124 | * set in the flags member. | |
a25d036d | 125 | * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and |
f8b8be8a MH |
126 | * IPMODIFY are a kind of attribute flags which can be set only before |
127 | * registering the ftrace_ops, and can not be modified while registered. | |
ad61dd30 | 128 | * Changing those attribute flags after registering ftrace_ops will |
f8b8be8a | 129 | * cause unexpected results. |
e248491a JO |
130 | * |
131 | * ENABLED - set/unset when ftrace_ops is registered/unregistered | |
e248491a JO |
132 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically |
133 | * allocated ftrace_ops which need special care | |
08f6fba5 SR |
134 | * SAVE_REGS - The ftrace_ops wants regs saved at each function called |
135 | * and passed to the callback. If this flag is set, but the | |
136 | * architecture does not support passing regs | |
06aeaaea | 137 | * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the |
08f6fba5 SR |
138 | * ftrace_ops will fail to register, unless the next flag |
139 | * is set. | |
140 | * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the | |
141 | * handler can handle an arch that does not save regs | |
142 | * (the handler tests if regs == NULL), then it can set | |
143 | * this flag instead. It will not fail registering the ftrace_ops | |
144 | * but, the regs field will be NULL if the arch does not support | |
145 | * passing regs to the handler. | |
146 | * Note, if this flag is set, the SAVE_REGS flag will automatically | |
147 | * get set upon registering the ftrace_ops, if the arch supports it. | |
a25d036d SRV |
148 | * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure |
149 | * that the call back needs recursion protection. If it does | |
150 | * not set this, then the ftrace infrastructure will assume | |
151 | * that the callback can handle recursion on its own. | |
395b97a3 | 152 | * STUB - The ftrace_ops is just a place holder. |
f04f24fb MH |
153 | * INITIALIZED - The ftrace_ops has already been initialized (first use time |
154 | * register_ftrace_function() is called, it will initialized the ops) | |
591dffda | 155 | * DELETED - The ops are being deleted, do not let them be registered again. |
e1effa01 SRRH |
156 | * ADDING - The ops is in the process of being added. |
157 | * REMOVING - The ops is in the process of being removed. | |
158 | * MODIFYING - The ops is in the process of changing its filter functions. | |
f3bea491 SRRH |
159 | * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code. |
160 | * The arch specific code sets this flag when it allocated a | |
161 | * trampoline. This lets the arch know that it can update the | |
162 | * trampoline in case the callback function changes. | |
163 | * The ftrace_ops trampoline can be set by the ftrace users, and | |
164 | * in such cases the arch must not modify it. Only the arch ftrace | |
165 | * core code should set this flag. | |
f8b8be8a MH |
166 | * IPMODIFY - The ops can modify the IP register. This can only be set with |
167 | * SAVE_REGS. If another ops with this flag set is already registered | |
168 | * for any of the functions that this ops will be registered for, then | |
169 | * this ops will fail to register or set_filter_ip. | |
e3eea140 | 170 | * PID - Is affected by set_ftrace_pid (allows filtering on those pids) |
d0ba52f1 | 171 | * RCU - Set when the ops can only be called when RCU is watching. |
8c08f0d5 | 172 | * TRACE_ARRAY - The ops->private points to a trace_array descriptor. |
7162431d MB |
173 | * PERMANENT - Set when the ops is permanent and should not be affected by |
174 | * ftrace_enabled. | |
763e34e7 SRV |
175 | * DIRECT - Used by the direct ftrace_ops helper for direct functions |
176 | * (internal ftrace only, should not be used by others) | |
e248491a | 177 | */ |
b848914c | 178 | enum { |
b41db132 EWI |
179 | FTRACE_OPS_FL_ENABLED = BIT(0), |
180 | FTRACE_OPS_FL_DYNAMIC = BIT(1), | |
181 | FTRACE_OPS_FL_SAVE_REGS = BIT(2), | |
182 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3), | |
a25d036d | 183 | FTRACE_OPS_FL_RECURSION = BIT(4), |
b41db132 EWI |
184 | FTRACE_OPS_FL_STUB = BIT(5), |
185 | FTRACE_OPS_FL_INITIALIZED = BIT(6), | |
186 | FTRACE_OPS_FL_DELETED = BIT(7), | |
187 | FTRACE_OPS_FL_ADDING = BIT(8), | |
188 | FTRACE_OPS_FL_REMOVING = BIT(9), | |
189 | FTRACE_OPS_FL_MODIFYING = BIT(10), | |
190 | FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11), | |
191 | FTRACE_OPS_FL_IPMODIFY = BIT(12), | |
192 | FTRACE_OPS_FL_PID = BIT(13), | |
193 | FTRACE_OPS_FL_RCU = BIT(14), | |
194 | FTRACE_OPS_FL_TRACE_ARRAY = BIT(15), | |
195 | FTRACE_OPS_FL_PERMANENT = BIT(16), | |
196 | FTRACE_OPS_FL_DIRECT = BIT(17), | |
b848914c SR |
197 | }; |
198 | ||
33b7f99c SRRH |
199 | #ifdef CONFIG_DYNAMIC_FTRACE |
200 | /* The hash used to know what functions callbacks trace */ | |
201 | struct ftrace_ops_hash { | |
f86f4180 CZ |
202 | struct ftrace_hash __rcu *notrace_hash; |
203 | struct ftrace_hash __rcu *filter_hash; | |
33b7f99c SRRH |
204 | struct mutex regex_lock; |
205 | }; | |
42c269c8 | 206 | |
b80f0f6c | 207 | void ftrace_free_init_mem(void); |
aba4b5c2 | 208 | void ftrace_free_mem(struct module *mod, void *start, void *end); |
42c269c8 | 209 | #else |
b80f0f6c | 210 | static inline void ftrace_free_init_mem(void) { } |
aba4b5c2 | 211 | static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } |
33b7f99c SRRH |
212 | #endif |
213 | ||
b7e00a6c | 214 | /* |
ba27f2bc SRRH |
215 | * Note, ftrace_ops can be referenced outside of RCU protection, unless |
216 | * the RCU flag is set. If ftrace_ops is allocated and not part of kernel | |
217 | * core data, the unregistering of it will perform a scheduling on all CPUs | |
218 | * to make sure that there are no more users. Depending on the load of the | |
219 | * system that may take a bit of time. | |
b7e00a6c SRRH |
220 | * |
221 | * Any private data added must also take care not to be freed and if private | |
222 | * data is added to a ftrace_ops that is in core code, the user of the | |
223 | * ftrace_ops must perform a schedule_on_each_cpu() before freeing it. | |
224 | */ | |
16444a8a | 225 | struct ftrace_ops { |
f45948e8 | 226 | ftrace_func_t func; |
f86f4180 | 227 | struct ftrace_ops __rcu *next; |
b848914c | 228 | unsigned long flags; |
b7e00a6c | 229 | void *private; |
e3eea140 | 230 | ftrace_func_t saved_func; |
f45948e8 | 231 | #ifdef CONFIG_DYNAMIC_FTRACE |
33b7f99c SRRH |
232 | struct ftrace_ops_hash local_hash; |
233 | struct ftrace_ops_hash *func_hash; | |
fef5aeee | 234 | struct ftrace_ops_hash old_hash; |
79922b80 | 235 | unsigned long trampoline; |
aec0be2d | 236 | unsigned long trampoline_size; |
fc0ea795 | 237 | struct list_head list; |
f45948e8 | 238 | #endif |
16444a8a ACM |
239 | }; |
240 | ||
59566b0b SRV |
241 | extern struct ftrace_ops __rcu *ftrace_ops_list; |
242 | extern struct ftrace_ops ftrace_list_end; | |
243 | ||
244 | /* | |
40dc4a42 | 245 | * Traverse the ftrace_ops_list, invoking all entries. The reason that we |
59566b0b SRV |
246 | * can use rcu_dereference_raw_check() is that elements removed from this list |
247 | * are simply leaked, so there is no need to interact with a grace-period | |
248 | * mechanism. The rcu_dereference_raw_check() calls are needed to handle | |
40dc4a42 | 249 | * concurrent insertions into the ftrace_ops_list. |
59566b0b SRV |
250 | * |
251 | * Silly Alpha and silly pointer-speculation compiler optimizations! | |
252 | */ | |
253 | #define do_for_each_ftrace_op(op, list) \ | |
254 | op = rcu_dereference_raw_check(list); \ | |
255 | do | |
256 | ||
257 | /* | |
258 | * Optimized for just a single item in the list (as that is the normal case). | |
259 | */ | |
260 | #define while_for_each_ftrace_op(op) \ | |
261 | while (likely(op = rcu_dereference_raw_check((op)->next)) && \ | |
262 | unlikely((op) != &ftrace_list_end)) | |
263 | ||
e7d3737e FW |
264 | /* |
265 | * Type of the current tracing. | |
266 | */ | |
267 | enum ftrace_tracing_type_t { | |
268 | FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ | |
269 | FTRACE_TYPE_RETURN, /* Hook the return of the function */ | |
270 | }; | |
271 | ||
272 | /* Current tracing type, default is FTRACE_TYPE_ENTER */ | |
273 | extern enum ftrace_tracing_type_t ftrace_tracing_type; | |
274 | ||
16444a8a ACM |
275 | /* |
276 | * The ftrace_ops must be a static and should also | |
277 | * be read_mostly. These functions do modify read_mostly variables | |
278 | * so use them sparely. Never free an ftrace_op or modify the | |
279 | * next pointer after it has been registered. Even after unregistering | |
280 | * it, the next pointer may still be used internally. | |
281 | */ | |
282 | int register_ftrace_function(struct ftrace_ops *ops); | |
283 | int unregister_ftrace_function(struct ftrace_ops *ops); | |
16444a8a | 284 | |
a1e2e31d | 285 | extern void ftrace_stub(unsigned long a0, unsigned long a1, |
d19ad077 | 286 | struct ftrace_ops *op, struct ftrace_regs *fregs); |
16444a8a | 287 | |
606576ce | 288 | #else /* !CONFIG_FUNCTION_TRACER */ |
4dbf6bc2 SR |
289 | /* |
290 | * (un)register_ftrace_function must be a macro since the ops parameter | |
291 | * must not be evaluated. | |
292 | */ | |
293 | #define register_ftrace_function(ops) ({ 0; }) | |
294 | #define unregister_ftrace_function(ops) ({ 0; }) | |
81adbdc0 | 295 | static inline void ftrace_kill(void) { } |
b80f0f6c | 296 | static inline void ftrace_free_init_mem(void) { } |
aba4b5c2 | 297 | static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } |
606576ce | 298 | #endif /* CONFIG_FUNCTION_TRACER */ |
352ad25a | 299 | |
ea806eb3 SRV |
300 | struct ftrace_func_entry { |
301 | struct hlist_node hlist; | |
302 | unsigned long ip; | |
303 | unsigned long direct; /* for direct lookup only */ | |
304 | }; | |
305 | ||
306 | struct dyn_ftrace; | |
307 | ||
763e34e7 | 308 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
a3ad1a7e | 309 | extern int ftrace_direct_func_count; |
763e34e7 SRV |
310 | int register_ftrace_direct(unsigned long ip, unsigned long addr); |
311 | int unregister_ftrace_direct(unsigned long ip, unsigned long addr); | |
0567d680 | 312 | int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr); |
013bf0da | 313 | struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr); |
ea806eb3 SRV |
314 | int ftrace_modify_direct_caller(struct ftrace_func_entry *entry, |
315 | struct dyn_ftrace *rec, | |
316 | unsigned long old_addr, | |
317 | unsigned long new_addr); | |
ff205766 | 318 | unsigned long ftrace_find_rec_direct(unsigned long ip); |
763e34e7 | 319 | #else |
a3ad1a7e | 320 | # define ftrace_direct_func_count 0 |
763e34e7 SRV |
321 | static inline int register_ftrace_direct(unsigned long ip, unsigned long addr) |
322 | { | |
eb01fedc | 323 | return -ENOTSUPP; |
763e34e7 SRV |
324 | } |
325 | static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr) | |
326 | { | |
eb01fedc | 327 | return -ENOTSUPP; |
763e34e7 | 328 | } |
0567d680 SRV |
329 | static inline int modify_ftrace_direct(unsigned long ip, |
330 | unsigned long old_addr, unsigned long new_addr) | |
331 | { | |
eb01fedc | 332 | return -ENOTSUPP; |
0567d680 | 333 | } |
013bf0da SRV |
334 | static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr) |
335 | { | |
336 | return NULL; | |
337 | } | |
ea806eb3 SRV |
338 | static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry, |
339 | struct dyn_ftrace *rec, | |
340 | unsigned long old_addr, | |
341 | unsigned long new_addr) | |
342 | { | |
343 | return -ENODEV; | |
344 | } | |
ff205766 AS |
345 | static inline unsigned long ftrace_find_rec_direct(unsigned long ip) |
346 | { | |
347 | return 0; | |
348 | } | |
763e34e7 SRV |
349 | #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ |
350 | ||
351 | #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS | |
352 | /* | |
353 | * This must be implemented by the architecture. | |
354 | * It is the way the ftrace direct_ops helper, when called | |
355 | * via ftrace (because there's other callbacks besides the | |
356 | * direct call), can inform the architecture's trampoline that this | |
357 | * routine has a direct caller, and what the caller is. | |
562955fe SRV |
358 | * |
359 | * For example, in x86, it returns the direct caller | |
360 | * callback function via the regs->orig_ax parameter. | |
361 | * Then in the ftrace trampoline, if this is set, it makes | |
362 | * the return from the trampoline jump to the direct caller | |
363 | * instead of going back to the function it just traced. | |
763e34e7 SRV |
364 | */ |
365 | static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, | |
366 | unsigned long addr) { } | |
367 | #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ | |
368 | ||
f38f1d2a | 369 | #ifdef CONFIG_STACK_TRACER |
bb99d8cc | 370 | |
f38f1d2a | 371 | extern int stack_tracer_enabled; |
3d9a8072 | 372 | |
7ff0d449 CH |
373 | int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer, |
374 | size_t *lenp, loff_t *ppos); | |
5367278c | 375 | |
8aaf1ee7 SRV |
376 | /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ |
377 | DECLARE_PER_CPU(int, disable_stack_tracer); | |
378 | ||
379 | /** | |
380 | * stack_tracer_disable - temporarily disable the stack tracer | |
381 | * | |
382 | * There's a few locations (namely in RCU) where stack tracing | |
383 | * cannot be executed. This function is used to disable stack | |
384 | * tracing during those critical sections. | |
385 | * | |
386 | * This function must be called with preemption or interrupts | |
387 | * disabled and stack_tracer_enable() must be called shortly after | |
388 | * while preemption or interrupts are still disabled. | |
389 | */ | |
390 | static inline void stack_tracer_disable(void) | |
391 | { | |
392 | /* Preemption or interupts must be disabled */ | |
60361e12 | 393 | if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) |
8aaf1ee7 SRV |
394 | WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); |
395 | this_cpu_inc(disable_stack_tracer); | |
396 | } | |
397 | ||
398 | /** | |
399 | * stack_tracer_enable - re-enable the stack tracer | |
400 | * | |
401 | * After stack_tracer_disable() is called, stack_tracer_enable() | |
402 | * must be called shortly afterward. | |
403 | */ | |
404 | static inline void stack_tracer_enable(void) | |
405 | { | |
60361e12 | 406 | if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) |
8aaf1ee7 SRV |
407 | WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); |
408 | this_cpu_dec(disable_stack_tracer); | |
409 | } | |
5367278c SRV |
410 | #else |
411 | static inline void stack_tracer_disable(void) { } | |
412 | static inline void stack_tracer_enable(void) { } | |
f38f1d2a SR |
413 | #endif |
414 | ||
3d083395 | 415 | #ifdef CONFIG_DYNAMIC_FTRACE |
31e88909 | 416 | |
000ab691 SR |
417 | int ftrace_arch_code_modify_prepare(void); |
418 | int ftrace_arch_code_modify_post_process(void); | |
419 | ||
02a392a0 SRRH |
420 | enum ftrace_bug_type { |
421 | FTRACE_BUG_UNKNOWN, | |
422 | FTRACE_BUG_INIT, | |
423 | FTRACE_BUG_NOP, | |
424 | FTRACE_BUG_CALL, | |
425 | FTRACE_BUG_UPDATE, | |
426 | }; | |
427 | extern enum ftrace_bug_type ftrace_bug_type; | |
428 | ||
b05086c7 SRRH |
429 | /* |
430 | * Archs can set this to point to a variable that holds the value that was | |
431 | * expected at the call site before calling ftrace_bug(). | |
432 | */ | |
433 | extern const void *ftrace_expected; | |
434 | ||
4fd3279b | 435 | void ftrace_bug(int err, struct dyn_ftrace *rec); |
c88fd863 | 436 | |
809dcf29 SR |
437 | struct seq_file; |
438 | ||
d88471cb | 439 | extern int ftrace_text_reserved(const void *start, const void *end); |
2cfa1978 | 440 | |
6be7fa3c SRV |
441 | struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr); |
442 | ||
aec0be2d SRRH |
443 | bool is_ftrace_trampoline(unsigned long addr); |
444 | ||
08f6fba5 SR |
445 | /* |
446 | * The dyn_ftrace record's flags field is split into two parts. | |
447 | * the first part which is '0-FTRACE_REF_MAX' is a counter of | |
448 | * the number of callbacks that have registered the function that | |
449 | * the dyn_ftrace descriptor represents. | |
450 | * | |
451 | * The second part is a mask: | |
452 | * ENABLED - the function is being traced | |
453 | * REGS - the record wants the function to save regs | |
454 | * REGS_EN - the function is set up to save regs. | |
f8b8be8a | 455 | * IPMODIFY - the record allows for the IP address to be changed. |
b7ffffbb | 456 | * DISABLED - the record is not ready to be touched yet |
763e34e7 | 457 | * DIRECT - there is a direct function to call |
08f6fba5 SR |
458 | * |
459 | * When a new ftrace_ops is registered and wants a function to save | |
02dae28f | 460 | * pt_regs, the rec->flags REGS is set. When the function has been |
08f6fba5 SR |
461 | * set up to save regs, the REG_EN flag is set. Once a function |
462 | * starts saving regs it will do so until all ftrace_ops are removed | |
463 | * from tracing that function. | |
464 | */ | |
3c1720f0 | 465 | enum { |
79922b80 | 466 | FTRACE_FL_ENABLED = (1UL << 31), |
08f6fba5 | 467 | FTRACE_FL_REGS = (1UL << 30), |
79922b80 SRRH |
468 | FTRACE_FL_REGS_EN = (1UL << 29), |
469 | FTRACE_FL_TRAMP = (1UL << 28), | |
470 | FTRACE_FL_TRAMP_EN = (1UL << 27), | |
f8b8be8a | 471 | FTRACE_FL_IPMODIFY = (1UL << 26), |
b7ffffbb | 472 | FTRACE_FL_DISABLED = (1UL << 25), |
763e34e7 SRV |
473 | FTRACE_FL_DIRECT = (1UL << 24), |
474 | FTRACE_FL_DIRECT_EN = (1UL << 23), | |
3c1720f0 SR |
475 | }; |
476 | ||
763e34e7 | 477 | #define FTRACE_REF_MAX_SHIFT 23 |
cf2cb0b2 | 478 | #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) |
ed926f9b | 479 | |
02dae28f | 480 | #define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX) |
0376bde1 | 481 | |
3d083395 | 482 | struct dyn_ftrace { |
a762782d | 483 | unsigned long ip; /* address of mcount call-site */ |
85ae32ae | 484 | unsigned long flags; |
a762782d | 485 | struct dyn_arch_ftrace arch; |
3d083395 SR |
486 | }; |
487 | ||
647664ea MH |
488 | int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, |
489 | int remove, int reset); | |
ac483c44 | 490 | int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
936e074b | 491 | int len, int reset); |
ac483c44 | 492 | int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, |
936e074b SR |
493 | int len, int reset); |
494 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset); | |
495 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); | |
5500fa51 | 496 | void ftrace_free_filter(struct ftrace_ops *ops); |
d032ae89 | 497 | void ftrace_ops_set_global_filter(struct ftrace_ops *ops); |
e1c08bdd | 498 | |
c88fd863 SR |
499 | enum { |
500 | FTRACE_UPDATE_CALLS = (1 << 0), | |
501 | FTRACE_DISABLE_CALLS = (1 << 1), | |
502 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), | |
503 | FTRACE_START_FUNC_RET = (1 << 3), | |
504 | FTRACE_STOP_FUNC_RET = (1 << 4), | |
a0572f68 | 505 | FTRACE_MAY_SLEEP = (1 << 5), |
c88fd863 SR |
506 | }; |
507 | ||
08f6fba5 SR |
508 | /* |
509 | * The FTRACE_UPDATE_* enum is used to pass information back | |
510 | * from the ftrace_update_record() and ftrace_test_record() | |
511 | * functions. These are called by the code update routines | |
512 | * to find out what is to be done for a given function. | |
513 | * | |
514 | * IGNORE - The function is already what we want it to be | |
515 | * MAKE_CALL - Start tracing the function | |
516 | * MODIFY_CALL - Stop saving regs for the function | |
08f6fba5 SR |
517 | * MAKE_NOP - Stop tracing the function |
518 | */ | |
c88fd863 SR |
519 | enum { |
520 | FTRACE_UPDATE_IGNORE, | |
521 | FTRACE_UPDATE_MAKE_CALL, | |
08f6fba5 | 522 | FTRACE_UPDATE_MODIFY_CALL, |
c88fd863 SR |
523 | FTRACE_UPDATE_MAKE_NOP, |
524 | }; | |
525 | ||
fc13cb0c SR |
526 | enum { |
527 | FTRACE_ITER_FILTER = (1 << 0), | |
528 | FTRACE_ITER_NOTRACE = (1 << 1), | |
529 | FTRACE_ITER_PRINTALL = (1 << 2), | |
eee8ded1 SRV |
530 | FTRACE_ITER_DO_PROBES = (1 << 3), |
531 | FTRACE_ITER_PROBE = (1 << 4), | |
5985ea8b SRV |
532 | FTRACE_ITER_MOD = (1 << 5), |
533 | FTRACE_ITER_ENABLED = (1 << 6), | |
fc13cb0c SR |
534 | }; |
535 | ||
c88fd863 | 536 | void arch_ftrace_update_code(int command); |
89f579ce YW |
537 | void arch_ftrace_update_trampoline(struct ftrace_ops *ops); |
538 | void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec); | |
539 | void arch_ftrace_trampoline_free(struct ftrace_ops *ops); | |
c88fd863 SR |
540 | |
541 | struct ftrace_rec_iter; | |
542 | ||
543 | struct ftrace_rec_iter *ftrace_rec_iter_start(void); | |
544 | struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); | |
545 | struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); | |
546 | ||
08d636b6 SR |
547 | #define for_ftrace_rec_iter(iter) \ |
548 | for (iter = ftrace_rec_iter_start(); \ | |
549 | iter; \ | |
550 | iter = ftrace_rec_iter_next(iter)) | |
551 | ||
552 | ||
7375dca1 SRV |
553 | int ftrace_update_record(struct dyn_ftrace *rec, bool enable); |
554 | int ftrace_test_record(struct dyn_ftrace *rec, bool enable); | |
c88fd863 | 555 | void ftrace_run_stop_machine(int command); |
f0cf973a | 556 | unsigned long ftrace_location(unsigned long ip); |
04cf31a7 | 557 | unsigned long ftrace_location_range(unsigned long start, unsigned long end); |
7413af1f SRRH |
558 | unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); |
559 | unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); | |
c88fd863 SR |
560 | |
561 | extern ftrace_func_t ftrace_trace_function; | |
562 | ||
fc13cb0c SR |
563 | int ftrace_regex_open(struct ftrace_ops *ops, int flag, |
564 | struct inode *inode, struct file *file); | |
565 | ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, | |
566 | size_t cnt, loff_t *ppos); | |
567 | ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, | |
568 | size_t cnt, loff_t *ppos); | |
fc13cb0c SR |
569 | int ftrace_regex_release(struct inode *inode, struct file *file); |
570 | ||
2a85a37f SR |
571 | void __init |
572 | ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable); | |
573 | ||
3d083395 | 574 | /* defined in arch */ |
3c1720f0 | 575 | extern int ftrace_ip_converted(unsigned long ip); |
3a36cb11 | 576 | extern int ftrace_dyn_arch_init(void); |
e4f5d544 | 577 | extern void ftrace_replace_code(int enable); |
d61f82d0 SR |
578 | extern int ftrace_update_ftrace_func(ftrace_func_t func); |
579 | extern void ftrace_caller(void); | |
08f6fba5 | 580 | extern void ftrace_regs_caller(void); |
d61f82d0 | 581 | extern void ftrace_call(void); |
08f6fba5 | 582 | extern void ftrace_regs_call(void); |
d61f82d0 | 583 | extern void mcount_call(void); |
f0001207 | 584 | |
8ed3e2cf SR |
585 | void ftrace_modify_all_code(int command); |
586 | ||
f0001207 SL |
587 | #ifndef FTRACE_ADDR |
588 | #define FTRACE_ADDR ((unsigned long)ftrace_caller) | |
589 | #endif | |
08f6fba5 | 590 | |
79922b80 SRRH |
591 | #ifndef FTRACE_GRAPH_ADDR |
592 | #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller) | |
593 | #endif | |
594 | ||
08f6fba5 | 595 | #ifndef FTRACE_REGS_ADDR |
06aeaaea | 596 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
08f6fba5 SR |
597 | # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) |
598 | #else | |
599 | # define FTRACE_REGS_ADDR FTRACE_ADDR | |
600 | #endif | |
601 | #endif | |
602 | ||
646d7043 SRRH |
603 | /* |
604 | * If an arch would like functions that are only traced | |
605 | * by the function graph tracer to jump directly to its own | |
606 | * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR | |
607 | * to be that address to jump to. | |
608 | */ | |
609 | #ifndef FTRACE_GRAPH_TRAMP_ADDR | |
610 | #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0) | |
611 | #endif | |
612 | ||
fb52607a FW |
613 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
614 | extern void ftrace_graph_caller(void); | |
5a45cfe1 SR |
615 | extern int ftrace_enable_ftrace_graph_caller(void); |
616 | extern int ftrace_disable_ftrace_graph_caller(void); | |
617 | #else | |
618 | static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } | |
619 | static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } | |
e7d3737e | 620 | #endif |
ad90c0e3 | 621 | |
31e88909 | 622 | /** |
57794a9d | 623 | * ftrace_make_nop - convert code into nop |
31e88909 | 624 | * @mod: module structure if called by module load initialization |
fbf6c73c | 625 | * @rec: the call site record (e.g. mcount/fentry) |
31e88909 SR |
626 | * @addr: the address that the call site should be calling |
627 | * | |
628 | * This is a very sensitive operation and great care needs | |
629 | * to be taken by the arch. The operation should carefully | |
630 | * read the location, check to see if what is read is indeed | |
631 | * what we expect it to be, and then on success of the compare, | |
632 | * it should write to the location. | |
633 | * | |
634 | * The code segment at @rec->ip should be a caller to @addr | |
635 | * | |
636 | * Return must be: | |
637 | * 0 on success | |
638 | * -EFAULT on error reading the location | |
639 | * -EINVAL on a failed compare of the contents | |
640 | * -EPERM on error writing to the location | |
641 | * Any other value will be considered a failure. | |
642 | */ | |
643 | extern int ftrace_make_nop(struct module *mod, | |
644 | struct dyn_ftrace *rec, unsigned long addr); | |
a26a2a27 | 645 | |
fbf6c73c MR |
646 | |
647 | /** | |
648 | * ftrace_init_nop - initialize a nop call site | |
649 | * @mod: module structure if called by module load initialization | |
650 | * @rec: the call site record (e.g. mcount/fentry) | |
651 | * | |
652 | * This is a very sensitive operation and great care needs | |
653 | * to be taken by the arch. The operation should carefully | |
654 | * read the location, check to see if what is read is indeed | |
655 | * what we expect it to be, and then on success of the compare, | |
656 | * it should write to the location. | |
657 | * | |
658 | * The code segment at @rec->ip should contain the contents created by | |
659 | * the compiler | |
660 | * | |
661 | * Return must be: | |
662 | * 0 on success | |
663 | * -EFAULT on error reading the location | |
664 | * -EINVAL on a failed compare of the contents | |
665 | * -EPERM on error writing to the location | |
666 | * Any other value will be considered a failure. | |
667 | */ | |
668 | #ifndef ftrace_init_nop | |
669 | static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) | |
670 | { | |
671 | return ftrace_make_nop(mod, rec, MCOUNT_ADDR); | |
672 | } | |
673 | #endif | |
674 | ||
593eb8a2 | 675 | /** |
31e88909 | 676 | * ftrace_make_call - convert a nop call site into a call to addr |
fbf6c73c | 677 | * @rec: the call site record (e.g. mcount/fentry) |
31e88909 | 678 | * @addr: the address that the call site should call |
593eb8a2 SR |
679 | * |
680 | * This is a very sensitive operation and great care needs | |
681 | * to be taken by the arch. The operation should carefully | |
682 | * read the location, check to see if what is read is indeed | |
683 | * what we expect it to be, and then on success of the compare, | |
684 | * it should write to the location. | |
685 | * | |
31e88909 SR |
686 | * The code segment at @rec->ip should be a nop |
687 | * | |
593eb8a2 SR |
688 | * Return must be: |
689 | * 0 on success | |
690 | * -EFAULT on error reading the location | |
691 | * -EINVAL on a failed compare of the contents | |
692 | * -EPERM on error writing to the location | |
693 | * Any other value will be considered a failure. | |
694 | */ | |
31e88909 SR |
695 | extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); |
696 | ||
06aeaaea | 697 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
08f6fba5 SR |
698 | /** |
699 | * ftrace_modify_call - convert from one addr to another (no nop) | |
fbf6c73c | 700 | * @rec: the call site record (e.g. mcount/fentry) |
08f6fba5 SR |
701 | * @old_addr: the address expected to be currently called to |
702 | * @addr: the address to change to | |
703 | * | |
704 | * This is a very sensitive operation and great care needs | |
705 | * to be taken by the arch. The operation should carefully | |
706 | * read the location, check to see if what is read is indeed | |
707 | * what we expect it to be, and then on success of the compare, | |
708 | * it should write to the location. | |
709 | * | |
710 | * The code segment at @rec->ip should be a caller to @old_addr | |
711 | * | |
712 | * Return must be: | |
713 | * 0 on success | |
714 | * -EFAULT on error reading the location | |
715 | * -EINVAL on a failed compare of the contents | |
716 | * -EPERM on error writing to the location | |
717 | * Any other value will be considered a failure. | |
718 | */ | |
719 | extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, | |
720 | unsigned long addr); | |
721 | #else | |
722 | /* Should never be called */ | |
723 | static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, | |
724 | unsigned long addr) | |
725 | { | |
726 | return -EINVAL; | |
727 | } | |
728 | #endif | |
729 | ||
31e88909 SR |
730 | /* May be defined in arch */ |
731 | extern int ftrace_arch_read_dyn_info(char *buf, int size); | |
593eb8a2 | 732 | |
ecea656d | 733 | extern int skip_trace(unsigned long ip); |
a949ae56 | 734 | extern void ftrace_module_init(struct module *mod); |
7dcd182b | 735 | extern void ftrace_module_enable(struct module *mod); |
049fb9bd | 736 | extern void ftrace_release_mod(struct module *mod); |
ecea656d | 737 | |
c0719e5a SR |
738 | extern void ftrace_disable_daemon(void); |
739 | extern void ftrace_enable_daemon(void); | |
4dc93676 | 740 | #else /* CONFIG_DYNAMIC_FTRACE */ |
4dbf6bc2 | 741 | static inline int skip_trace(unsigned long ip) { return 0; } |
4dbf6bc2 SR |
742 | static inline void ftrace_disable_daemon(void) { } |
743 | static inline void ftrace_enable_daemon(void) { } | |
7dcd182b JY |
744 | static inline void ftrace_module_init(struct module *mod) { } |
745 | static inline void ftrace_module_enable(struct module *mod) { } | |
746 | static inline void ftrace_release_mod(struct module *mod) { } | |
d88471cb | 747 | static inline int ftrace_text_reserved(const void *start, const void *end) |
2cfa1978 MH |
748 | { |
749 | return 0; | |
750 | } | |
4dc93676 SR |
751 | static inline unsigned long ftrace_location(unsigned long ip) |
752 | { | |
753 | return 0; | |
754 | } | |
fc13cb0c SR |
755 | |
756 | /* | |
757 | * Again users of functions that have ftrace_ops may not | |
758 | * have them defined when ftrace is not enabled, but these | |
759 | * functions may still be called. Use a macro instead of inline. | |
760 | */ | |
761 | #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) | |
96de37b6 | 762 | #define ftrace_set_early_filter(ops, buf, enable) do { } while (0) |
647664ea | 763 | #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) |
5500fa51 JO |
764 | #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) |
765 | #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) | |
766 | #define ftrace_free_filter(ops) do { } while (0) | |
d032ae89 | 767 | #define ftrace_ops_set_global_filter(ops) do { } while (0) |
fc13cb0c SR |
768 | |
769 | static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, | |
770 | size_t cnt, loff_t *ppos) { return -ENODEV; } | |
771 | static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, | |
772 | size_t cnt, loff_t *ppos) { return -ENODEV; } | |
fc13cb0c SR |
773 | static inline int |
774 | ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } | |
aec0be2d SRRH |
775 | |
776 | static inline bool is_ftrace_trampoline(unsigned long addr) | |
777 | { | |
778 | return false; | |
779 | } | |
ecea656d | 780 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
352ad25a | 781 | |
aeaee8a2 IM |
782 | /* totally disable ftrace - can not re-enable after this */ |
783 | void ftrace_kill(void); | |
784 | ||
f43fdad8 IM |
785 | static inline void tracer_disable(void) |
786 | { | |
606576ce | 787 | #ifdef CONFIG_FUNCTION_TRACER |
f43fdad8 IM |
788 | ftrace_enabled = 0; |
789 | #endif | |
790 | } | |
791 | ||
37002735 HY |
792 | /* |
793 | * Ftrace disable/restore without lock. Some synchronization mechanism | |
9bdeb7b5 | 794 | * must be used to prevent ftrace_enabled to be changed between |
37002735 HY |
795 | * disable/restore. |
796 | */ | |
9bdeb7b5 HY |
797 | static inline int __ftrace_enabled_save(void) |
798 | { | |
606576ce | 799 | #ifdef CONFIG_FUNCTION_TRACER |
9bdeb7b5 HY |
800 | int saved_ftrace_enabled = ftrace_enabled; |
801 | ftrace_enabled = 0; | |
802 | return saved_ftrace_enabled; | |
803 | #else | |
804 | return 0; | |
805 | #endif | |
806 | } | |
807 | ||
808 | static inline void __ftrace_enabled_restore(int enabled) | |
809 | { | |
606576ce | 810 | #ifdef CONFIG_FUNCTION_TRACER |
9bdeb7b5 HY |
811 | ftrace_enabled = enabled; |
812 | #endif | |
813 | } | |
814 | ||
eed542d6 AT |
815 | /* All archs should have this, but we define it for consistency */ |
816 | #ifndef ftrace_return_address0 | |
817 | # define ftrace_return_address0 __builtin_return_address(0) | |
818 | #endif | |
819 | ||
820 | /* Archs may use other ways for ADDR1 and beyond */ | |
821 | #ifndef ftrace_return_address | |
c79a61f5 | 822 | # ifdef CONFIG_FRAME_POINTER |
eed542d6 | 823 | # define ftrace_return_address(n) __builtin_return_address(n) |
c79a61f5 | 824 | # else |
eed542d6 | 825 | # define ftrace_return_address(n) 0UL |
c79a61f5 | 826 | # endif |
eed542d6 AT |
827 | #endif |
828 | ||
829 | #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0) | |
830 | #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1)) | |
831 | #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2)) | |
832 | #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3)) | |
833 | #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4)) | |
834 | #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) | |
835 | #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) | |
352ad25a | 836 | |
f904f582 SAS |
837 | static inline unsigned long get_lock_parent_ip(void) |
838 | { | |
839 | unsigned long addr = CALLER_ADDR0; | |
840 | ||
841 | if (!in_lock_functions(addr)) | |
842 | return addr; | |
843 | addr = CALLER_ADDR1; | |
844 | if (!in_lock_functions(addr)) | |
845 | return addr; | |
846 | return CALLER_ADDR2; | |
847 | } | |
848 | ||
c3bc8fd6 | 849 | #ifdef CONFIG_TRACE_PREEMPT_TOGGLE |
489f1396 IM |
850 | extern void trace_preempt_on(unsigned long a0, unsigned long a1); |
851 | extern void trace_preempt_off(unsigned long a0, unsigned long a1); | |
6cd8a4bb | 852 | #else |
b02ee9a3 MB |
853 | /* |
854 | * Use defines instead of static inlines because some arches will make code out | |
855 | * of the CALLER_ADDR, when we really want these to be a real nop. | |
856 | */ | |
857 | # define trace_preempt_on(a0, a1) do { } while (0) | |
858 | # define trace_preempt_off(a0, a1) do { } while (0) | |
6cd8a4bb SR |
859 | #endif |
860 | ||
68bf21aa SR |
861 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
862 | extern void ftrace_init(void); | |
a1326b17 MR |
863 | #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY |
864 | #define FTRACE_CALLSITE_SECTION "__patchable_function_entries" | |
865 | #else | |
866 | #define FTRACE_CALLSITE_SECTION "__mcount_loc" | |
867 | #endif | |
68bf21aa SR |
868 | #else |
869 | static inline void ftrace_init(void) { } | |
870 | #endif | |
871 | ||
287b6e68 FW |
872 | /* |
873 | * Structure that defines an entry function trace. | |
a4a551b8 NK |
874 | * It's already packed but the attribute "packed" is needed |
875 | * to remove extra padding at the end. | |
287b6e68 FW |
876 | */ |
877 | struct ftrace_graph_ent { | |
878 | unsigned long func; /* Current function */ | |
879 | int depth; | |
a4a551b8 | 880 | } __packed; |
dd0e545f | 881 | |
caf4b323 FW |
882 | /* |
883 | * Structure that defines a return function trace. | |
a4a551b8 NK |
884 | * It's already packed but the attribute "packed" is needed |
885 | * to remove extra padding at the end. | |
caf4b323 | 886 | */ |
fb52607a | 887 | struct ftrace_graph_ret { |
caf4b323 | 888 | unsigned long func; /* Current function */ |
60602cb5 | 889 | int depth; |
0231022c | 890 | /* Number of functions that overran the depth limit for current task */ |
60602cb5 | 891 | unsigned int overrun; |
a4a551b8 NK |
892 | unsigned long long calltime; |
893 | unsigned long long rettime; | |
a4a551b8 | 894 | } __packed; |
caf4b323 | 895 | |
62b915f1 JO |
896 | /* Type of the callback handlers for tracing function graph*/ |
897 | typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ | |
898 | typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ | |
899 | ||
e8025bab SRV |
900 | extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); |
901 | ||
fb52607a | 902 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
8b96f011 | 903 | |
688f7089 SRV |
904 | struct fgraph_ops { |
905 | trace_func_graph_ent_t entryfunc; | |
906 | trace_func_graph_ret_t retfunc; | |
907 | }; | |
908 | ||
712406a6 SR |
909 | /* |
910 | * Stack of return addresses for functions | |
911 | * of a thread. | |
912 | * Used in struct thread_info | |
913 | */ | |
914 | struct ftrace_ret_stack { | |
915 | unsigned long ret; | |
916 | unsigned long func; | |
917 | unsigned long long calltime; | |
8861dd30 | 918 | #ifdef CONFIG_FUNCTION_PROFILER |
a2a16d6a | 919 | unsigned long long subtime; |
8861dd30 | 920 | #endif |
daa460a8 | 921 | #ifdef HAVE_FUNCTION_GRAPH_FP_TEST |
71e308a2 | 922 | unsigned long fp; |
daa460a8 | 923 | #endif |
9a7c348b JP |
924 | #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR |
925 | unsigned long *retp; | |
926 | #endif | |
712406a6 SR |
927 | }; |
928 | ||
929 | /* | |
930 | * Primary handler of a function return. | |
931 | * It relays on ftrace_return_to_handler. | |
932 | * Defined in entry_32/64.S | |
933 | */ | |
934 | extern void return_to_handler(void); | |
935 | ||
936 | extern int | |
8114865f SRV |
937 | function_graph_enter(unsigned long ret, unsigned long func, |
938 | unsigned long frame_pointer, unsigned long *retp); | |
712406a6 | 939 | |
b0e21a61 SRV |
940 | struct ftrace_ret_stack * |
941 | ftrace_graph_get_ret_stack(struct task_struct *task, int idx); | |
942 | ||
223918e3 JP |
943 | unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, |
944 | unsigned long ret, unsigned long *retp); | |
945 | ||
8b96f011 FW |
946 | /* |
947 | * Sometimes we don't want to trace a function with the function | |
948 | * graph tracer but we want them to keep traced by the usual function | |
949 | * tracer if the function graph tracer is not configured. | |
950 | */ | |
951 | #define __notrace_funcgraph notrace | |
952 | ||
f201ae23 FW |
953 | #define FTRACE_RETFUNC_DEPTH 50 |
954 | #define FTRACE_RETSTACK_ALLOC_SIZE 32 | |
688f7089 SRV |
955 | |
956 | extern int register_ftrace_graph(struct fgraph_ops *ops); | |
957 | extern void unregister_ftrace_graph(struct fgraph_ops *ops); | |
287b6e68 | 958 | |
1b2f121c | 959 | extern bool ftrace_graph_is_dead(void); |
14a866c5 SR |
960 | extern void ftrace_graph_stop(void); |
961 | ||
287b6e68 FW |
962 | /* The current handlers in use */ |
963 | extern trace_func_graph_ret_t ftrace_graph_return; | |
964 | extern trace_func_graph_ent_t ftrace_graph_entry; | |
caf4b323 | 965 | |
fb52607a FW |
966 | extern void ftrace_graph_init_task(struct task_struct *t); |
967 | extern void ftrace_graph_exit_task(struct task_struct *t); | |
868baf07 | 968 | extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); |
21a8c466 | 969 | |
380c4b14 FW |
970 | static inline void pause_graph_tracing(void) |
971 | { | |
972 | atomic_inc(¤t->tracing_graph_pause); | |
973 | } | |
974 | ||
975 | static inline void unpause_graph_tracing(void) | |
976 | { | |
977 | atomic_dec(¤t->tracing_graph_pause); | |
978 | } | |
5ac9f622 | 979 | #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ |
8b96f011 FW |
980 | |
981 | #define __notrace_funcgraph | |
982 | ||
fb52607a FW |
983 | static inline void ftrace_graph_init_task(struct task_struct *t) { } |
984 | static inline void ftrace_graph_exit_task(struct task_struct *t) { } | |
868baf07 | 985 | static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } |
21a8c466 | 986 | |
688f7089 SRV |
987 | /* Define as macros as fgraph_ops may not be defined */ |
988 | #define register_ftrace_graph(ops) ({ -1; }) | |
989 | #define unregister_ftrace_graph(ops) do { } while (0) | |
380c4b14 | 990 | |
223918e3 JP |
991 | static inline unsigned long |
992 | ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, | |
993 | unsigned long *retp) | |
994 | { | |
995 | return ret; | |
996 | } | |
997 | ||
380c4b14 FW |
998 | static inline void pause_graph_tracing(void) { } |
999 | static inline void unpause_graph_tracing(void) { } | |
5ac9f622 | 1000 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
caf4b323 | 1001 | |
ea4e2bc4 | 1002 | #ifdef CONFIG_TRACING |
ea4e2bc4 SR |
1003 | |
1004 | /* flags for current->trace */ | |
1005 | enum { | |
1006 | TSK_TRACE_FL_TRACE_BIT = 0, | |
1007 | TSK_TRACE_FL_GRAPH_BIT = 1, | |
1008 | }; | |
1009 | enum { | |
1010 | TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, | |
1011 | TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, | |
1012 | }; | |
1013 | ||
1014 | static inline void set_tsk_trace_trace(struct task_struct *tsk) | |
1015 | { | |
1016 | set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); | |
1017 | } | |
1018 | ||
1019 | static inline void clear_tsk_trace_trace(struct task_struct *tsk) | |
1020 | { | |
1021 | clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); | |
1022 | } | |
1023 | ||
1024 | static inline int test_tsk_trace_trace(struct task_struct *tsk) | |
1025 | { | |
1026 | return tsk->trace & TSK_TRACE_FL_TRACE; | |
1027 | } | |
1028 | ||
1029 | static inline void set_tsk_trace_graph(struct task_struct *tsk) | |
1030 | { | |
1031 | set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); | |
1032 | } | |
1033 | ||
1034 | static inline void clear_tsk_trace_graph(struct task_struct *tsk) | |
1035 | { | |
1036 | clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); | |
1037 | } | |
1038 | ||
1039 | static inline int test_tsk_trace_graph(struct task_struct *tsk) | |
1040 | { | |
1041 | return tsk->trace & TSK_TRACE_FL_GRAPH; | |
1042 | } | |
1043 | ||
cecbca96 FW |
1044 | enum ftrace_dump_mode; |
1045 | ||
1046 | extern enum ftrace_dump_mode ftrace_dump_on_oops; | |
0daa2302 | 1047 | extern int tracepoint_printk; |
526211bc | 1048 | |
de7edd31 SRRH |
1049 | extern void disable_trace_on_warning(void); |
1050 | extern int __disable_trace_on_warning; | |
1051 | ||
42391745 | 1052 | int tracepoint_printk_sysctl(struct ctl_table *table, int write, |
32927393 | 1053 | void *buffer, size_t *lenp, loff_t *ppos); |
42391745 | 1054 | |
de7edd31 SRRH |
1055 | #else /* CONFIG_TRACING */ |
1056 | static inline void disable_trace_on_warning(void) { } | |
ea4e2bc4 SR |
1057 | #endif /* CONFIG_TRACING */ |
1058 | ||
e7b8e675 MF |
1059 | #ifdef CONFIG_FTRACE_SYSCALLS |
1060 | ||
1061 | unsigned long arch_syscall_addr(int nr); | |
1062 | ||
1063 | #endif /* CONFIG_FTRACE_SYSCALLS */ | |
1064 | ||
16444a8a | 1065 | #endif /* _LINUX_FTRACE_H */ |