Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
9849ed4d MF |
2 | /* |
3 | * Ftrace header. For implementation details beyond the random comments | |
5fb94e9c | 4 | * scattered below, see: Documentation/trace/ftrace-design.rst |
9849ed4d MF |
5 | */ |
6 | ||
16444a8a ACM |
7 | #ifndef _LINUX_FTRACE_H |
8 | #define _LINUX_FTRACE_H | |
9 | ||
0264c8c9 | 10 | #include <linux/trace_recursion.h> |
0012693a | 11 | #include <linux/trace_clock.h> |
18bfee32 | 12 | #include <linux/jump_label.h> |
5601020f | 13 | #include <linux/kallsyms.h> |
0012693a | 14 | #include <linux/linkage.h> |
ea4e2bc4 | 15 | #include <linux/bitops.h> |
a1e2e31d | 16 | #include <linux/ptrace.h> |
0012693a | 17 | #include <linux/ktime.h> |
21a8c466 | 18 | #include <linux/sched.h> |
0012693a FW |
19 | #include <linux/types.h> |
20 | #include <linux/init.h> | |
21 | #include <linux/fs.h> | |
16444a8a | 22 | |
c79a61f5 UKK |
23 | #include <asm/ftrace.h> |
24 | ||
2f5f6ad9 SR |
25 | /* |
26 | * If the arch supports passing the variable contents of | |
27 | * function_trace_op as the third parameter back from the | |
28 | * mcount call, then the arch should define this as 1. | |
29 | */ | |
30 | #ifndef ARCH_SUPPORTS_FTRACE_OPS | |
31 | #define ARCH_SUPPORTS_FTRACE_OPS 0 | |
32 | #endif | |
33 | ||
380af29b SRG |
34 | #ifdef CONFIG_TRACING |
35 | extern void ftrace_boot_snapshot(void); | |
36 | #else | |
37 | static inline void ftrace_boot_snapshot(void) { } | |
38 | #endif | |
39 | ||
34cdd18b SRV |
40 | struct ftrace_ops; |
41 | struct ftrace_regs; | |
cbad0fb2 | 42 | struct dyn_ftrace; |
9705bc70 | 43 | |
7d8b31b7 AB |
44 | char *arch_ftrace_match_adjust(char *str, const char *search); |
45 | ||
a3ed4157 MHG |
46 | #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS |
47 | unsigned long ftrace_return_to_handler(struct ftrace_regs *fregs); | |
7d8b31b7 AB |
48 | #else |
49 | unsigned long ftrace_return_to_handler(unsigned long frame_pointer); | |
50 | #endif | |
51 | ||
9705bc70 | 52 | #ifdef CONFIG_FUNCTION_TRACER |
ccf3672d SR |
53 | /* |
54 | * If the arch's mcount caller does not support all of ftrace's | |
55 | * features, then it must call an indirect function that | |
f2cc020d | 56 | * does. Or at least does enough to prevent any unwelcome side effects. |
34cdd18b SRV |
57 | * |
58 | * Also define the function prototype that these architectures use | |
59 | * to call the ftrace_ops_list_func(). | |
ccf3672d | 60 | */ |
7544256a | 61 | #if !ARCH_SUPPORTS_FTRACE_OPS |
ccf3672d | 62 | # define FTRACE_FORCE_LIST_FUNC 1 |
34cdd18b | 63 | void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); |
ccf3672d SR |
64 | #else |
65 | # define FTRACE_FORCE_LIST_FUNC 0 | |
34cdd18b SRV |
66 | void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
67 | struct ftrace_ops *op, struct ftrace_regs *fregs); | |
ccf3672d | 68 | #endif |
cbad0fb2 MR |
69 | extern const struct ftrace_ops ftrace_nop_ops; |
70 | extern const struct ftrace_ops ftrace_list_ops; | |
71 | struct ftrace_ops *ftrace_find_unique_ops(struct dyn_ftrace *rec); | |
34cdd18b | 72 | #endif /* CONFIG_FUNCTION_TRACER */ |
ccf3672d | 73 | |
5f893b26 SRRH |
74 | /* Main tracing buffer and events set up */ |
75 | #ifdef CONFIG_TRACING | |
76 | void trace_init(void); | |
e725c731 | 77 | void early_trace_init(void); |
5f893b26 SRRH |
78 | #else |
79 | static inline void trace_init(void) { } | |
e725c731 | 80 | static inline void early_trace_init(void) { } |
5f893b26 | 81 | #endif |
ccf3672d | 82 | |
de477254 | 83 | struct module; |
04da85b8 SR |
84 | struct ftrace_hash; |
85 | ||
aba4b5c2 SRV |
86 | #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \ |
87 | defined(CONFIG_DYNAMIC_FTRACE) | |
7e1f4eb9 | 88 | int |
aba4b5c2 SRV |
89 | ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, |
90 | unsigned long *off, char **modname, char *sym); | |
91 | #else | |
7e1f4eb9 | 92 | static inline int |
aba4b5c2 SRV |
93 | ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, |
94 | unsigned long *off, char **modname, char *sym) | |
95 | { | |
7e1f4eb9 | 96 | return 0; |
aba4b5c2 | 97 | } |
fc0ea795 AH |
98 | #endif |
99 | ||
100 | #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) | |
101 | int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, | |
102 | char *type, char *name, | |
103 | char *module_name, int *exported); | |
104 | #else | |
6171a031 SRV |
105 | static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, |
106 | char *type, char *name, | |
107 | char *module_name, int *exported) | |
108 | { | |
109 | return -1; | |
110 | } | |
aba4b5c2 SRV |
111 | #endif |
112 | ||
606576ce | 113 | #ifdef CONFIG_FUNCTION_TRACER |
3e1932ad | 114 | |
e4cf33ca SR |
115 | #include <linux/ftrace_regs.h> |
116 | ||
b0fc494f | 117 | extern int ftrace_enabled; |
b0fc494f | 118 | |
a370b72e MHG |
119 | /** |
120 | * ftrace_regs - ftrace partial/optimal register set | |
121 | * | |
122 | * ftrace_regs represents a group of registers which is used at the | |
123 | * function entry and exit. There are three types of registers. | |
124 | * | |
125 | * - Registers for passing the parameters to callee, including the stack | |
126 | * pointer. (e.g. rcx, rdx, rdi, rsi, r8, r9 and rsp on x86_64) | |
127 | * - Registers for passing the return values to caller. | |
128 | * (e.g. rax and rdx on x86_64) | |
129 | * - Registers for hooking the function call and return including the | |
130 | * frame pointer (the frame pointer is architecture/config dependent) | |
131 | * (e.g. rip, rbp and rsp for x86_64) | |
132 | * | |
133 | * Also, architecture dependent fields can be used for internal process. | |
134 | * (e.g. orig_ax on x86_64) | |
135 | * | |
a3ed4157 MHG |
136 | * Basically, ftrace_regs stores the registers related to the context. |
137 | * On function entry, registers for function parameters and hooking the | |
138 | * function call are stored, and on function exit, registers for function | |
139 | * return value and frame pointers are stored. | |
140 | * | |
141 | * And also, it dpends on the context that which registers are restored | |
142 | * from the ftrace_regs. | |
a370b72e MHG |
143 | * On the function entry, those registers will be restored except for |
144 | * the stack pointer, so that user can change the function parameters | |
145 | * and instruction pointer (e.g. live patching.) | |
146 | * On the function exit, only registers which is used for return values | |
147 | * are restored. | |
148 | * | |
149 | * NOTE: user *must not* access regs directly, only do it via APIs, because | |
150 | * the member can be changed according to the architecture. | |
7888af41 SR |
151 | * This is why the structure is empty here, so that nothing accesses |
152 | * the ftrace_regs directly. | |
a370b72e | 153 | */ |
d19ad077 | 154 | struct ftrace_regs { |
7888af41 SR |
155 | /* Nothing to see here, use the accessor functions! */ |
156 | }; | |
157 | ||
158 | #define ftrace_regs_size() sizeof(struct __arch_ftrace_regs) | |
159 | ||
160 | #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS | |
e4cf33ca SR |
161 | /* |
162 | * Architectures that define HAVE_DYNAMIC_FTRACE_WITH_ARGS must define their own | |
163 | * arch_ftrace_get_regs() where it only returns pt_regs *if* it is fully | |
164 | * populated. It should return NULL otherwise. | |
165 | */ | |
7888af41 SR |
166 | static inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs) |
167 | { | |
168 | return &arch_ftrace_regs(fregs)->regs; | |
169 | } | |
02a474ca | 170 | |
2860cd8a | 171 | /* |
0ef86097 MR |
172 | * ftrace_regs_set_instruction_pointer() is to be defined by the architecture |
173 | * if to allow setting of the instruction pointer from the ftrace_regs when | |
174 | * HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports live kernel patching. | |
2860cd8a | 175 | */ |
0ef86097 | 176 | #define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0) |
02a474ca | 177 | #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ |
d19ad077 | 178 | |
762abbc0 MHG |
179 | #ifdef CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS |
180 | ||
181 | static_assert(sizeof(struct pt_regs) == ftrace_regs_size()); | |
182 | ||
183 | #endif /* CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS */ | |
184 | ||
d19ad077 SRV |
185 | static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs) |
186 | { | |
187 | if (!fregs) | |
188 | return NULL; | |
189 | ||
02a474ca | 190 | return arch_ftrace_get_regs(fregs); |
d19ad077 SRV |
191 | } |
192 | ||
b9b55c89 MHG |
193 | #if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) || \ |
194 | defined(CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS) | |
195 | ||
196 | static __always_inline struct pt_regs * | |
197 | ftrace_partial_regs(struct ftrace_regs *fregs, struct pt_regs *regs) | |
198 | { | |
199 | /* | |
200 | * If CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS=y, ftrace_regs memory | |
201 | * layout is including pt_regs. So always returns that address. | |
202 | * Since arch_ftrace_get_regs() will check some members and may return | |
203 | * NULL, we can not use it. | |
204 | */ | |
205 | return &arch_ftrace_regs(fregs)->regs; | |
206 | } | |
207 | ||
208 | #endif /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS || CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS */ | |
209 | ||
d5d01b71 MHG |
210 | #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS |
211 | ||
212 | /* | |
213 | * Please define arch dependent pt_regs which compatible to the | |
214 | * perf_arch_fetch_caller_regs() but based on ftrace_regs. | |
215 | * This requires | |
216 | * - user_mode(_regs) returns false (always kernel mode). | |
217 | * - able to use the _regs for stack trace. | |
218 | */ | |
219 | #ifndef arch_ftrace_fill_perf_regs | |
220 | /* As same as perf_arch_fetch_caller_regs(), do nothing by default */ | |
221 | #define arch_ftrace_fill_perf_regs(fregs, _regs) do {} while (0) | |
222 | #endif | |
223 | ||
224 | static __always_inline struct pt_regs * | |
225 | ftrace_fill_perf_regs(struct ftrace_regs *fregs, struct pt_regs *regs) | |
226 | { | |
227 | arch_ftrace_fill_perf_regs(fregs, regs); | |
228 | return regs; | |
229 | } | |
230 | ||
231 | #else /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ | |
232 | ||
233 | static __always_inline struct pt_regs * | |
234 | ftrace_fill_perf_regs(struct ftrace_regs *fregs, struct pt_regs *regs) | |
235 | { | |
236 | return &arch_ftrace_regs(fregs)->regs; | |
237 | } | |
238 | ||
239 | #endif | |
240 | ||
94d095ff MR |
241 | /* |
242 | * When true, the ftrace_regs_{get,set}_*() functions may be used on fregs. | |
243 | * Note: this can be true even when ftrace_get_regs() cannot provide a pt_regs. | |
244 | */ | |
245 | static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs) | |
246 | { | |
247 | if (IS_ENABLED(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)) | |
248 | return true; | |
249 | ||
250 | return ftrace_get_regs(fregs) != NULL; | |
251 | } | |
252 | ||
0566cefe MHG |
253 | #ifdef CONFIG_HAVE_REGS_AND_STACK_ACCESS_API |
254 | static __always_inline unsigned long | |
255 | ftrace_regs_get_kernel_stack_nth(struct ftrace_regs *fregs, unsigned int nth) | |
256 | { | |
257 | unsigned long *stackp; | |
258 | ||
259 | stackp = (unsigned long *)ftrace_regs_get_stack_pointer(fregs); | |
260 | if (((unsigned long)(stackp + nth) & ~(THREAD_SIZE - 1)) == | |
261 | ((unsigned long)stackp & ~(THREAD_SIZE - 1))) | |
262 | return *(stackp + nth); | |
263 | ||
264 | return 0; | |
265 | } | |
266 | #else /* !CONFIG_HAVE_REGS_AND_STACK_ACCESS_API */ | |
267 | #define ftrace_regs_get_kernel_stack_nth(fregs, nth) (0L) | |
268 | #endif /* CONFIG_HAVE_REGS_AND_STACK_ACCESS_API */ | |
269 | ||
2f5f6ad9 | 270 | typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, |
d19ad077 | 271 | struct ftrace_ops *op, struct ftrace_regs *fregs); |
16444a8a | 272 | |
87354059 SRRH |
273 | ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); |
274 | ||
e248491a JO |
275 | /* |
276 | * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are | |
277 | * set in the flags member. | |
a25d036d | 278 | * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and |
f8b8be8a MH |
279 | * IPMODIFY are a kind of attribute flags which can be set only before |
280 | * registering the ftrace_ops, and can not be modified while registered. | |
ad61dd30 | 281 | * Changing those attribute flags after registering ftrace_ops will |
f8b8be8a | 282 | * cause unexpected results. |
e248491a JO |
283 | * |
284 | * ENABLED - set/unset when ftrace_ops is registered/unregistered | |
e248491a JO |
285 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically |
286 | * allocated ftrace_ops which need special care | |
08f6fba5 SR |
287 | * SAVE_REGS - The ftrace_ops wants regs saved at each function called |
288 | * and passed to the callback. If this flag is set, but the | |
289 | * architecture does not support passing regs | |
06aeaaea | 290 | * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the |
08f6fba5 SR |
291 | * ftrace_ops will fail to register, unless the next flag |
292 | * is set. | |
293 | * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the | |
294 | * handler can handle an arch that does not save regs | |
295 | * (the handler tests if regs == NULL), then it can set | |
296 | * this flag instead. It will not fail registering the ftrace_ops | |
297 | * but, the regs field will be NULL if the arch does not support | |
298 | * passing regs to the handler. | |
299 | * Note, if this flag is set, the SAVE_REGS flag will automatically | |
300 | * get set upon registering the ftrace_ops, if the arch supports it. | |
a25d036d SRV |
301 | * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure |
302 | * that the call back needs recursion protection. If it does | |
303 | * not set this, then the ftrace infrastructure will assume | |
304 | * that the callback can handle recursion on its own. | |
395b97a3 | 305 | * STUB - The ftrace_ops is just a place holder. |
f04f24fb MH |
306 | * INITIALIZED - The ftrace_ops has already been initialized (first use time |
307 | * register_ftrace_function() is called, it will initialized the ops) | |
591dffda | 308 | * DELETED - The ops are being deleted, do not let them be registered again. |
e1effa01 SRRH |
309 | * ADDING - The ops is in the process of being added. |
310 | * REMOVING - The ops is in the process of being removed. | |
311 | * MODIFYING - The ops is in the process of changing its filter functions. | |
f3bea491 SRRH |
312 | * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code. |
313 | * The arch specific code sets this flag when it allocated a | |
314 | * trampoline. This lets the arch know that it can update the | |
315 | * trampoline in case the callback function changes. | |
316 | * The ftrace_ops trampoline can be set by the ftrace users, and | |
317 | * in such cases the arch must not modify it. Only the arch ftrace | |
318 | * core code should set this flag. | |
f8b8be8a MH |
319 | * IPMODIFY - The ops can modify the IP register. This can only be set with |
320 | * SAVE_REGS. If another ops with this flag set is already registered | |
321 | * for any of the functions that this ops will be registered for, then | |
322 | * this ops will fail to register or set_filter_ip. | |
e3eea140 | 323 | * PID - Is affected by set_ftrace_pid (allows filtering on those pids) |
d0ba52f1 | 324 | * RCU - Set when the ops can only be called when RCU is watching. |
8c08f0d5 | 325 | * TRACE_ARRAY - The ops->private points to a trace_array descriptor. |
7162431d MB |
326 | * PERMANENT - Set when the ops is permanent and should not be affected by |
327 | * ftrace_enabled. | |
763e34e7 SRV |
328 | * DIRECT - Used by the direct ftrace_ops helper for direct functions |
329 | * (internal ftrace only, should not be used by others) | |
d9bbfbd1 | 330 | * SUBOP - Is controlled by another op in field managed. |
88cefd99 | 331 | * GRAPH - Is a component of the fgraph_ops structure |
e248491a | 332 | */ |
b848914c | 333 | enum { |
b41db132 EWI |
334 | FTRACE_OPS_FL_ENABLED = BIT(0), |
335 | FTRACE_OPS_FL_DYNAMIC = BIT(1), | |
336 | FTRACE_OPS_FL_SAVE_REGS = BIT(2), | |
337 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3), | |
a25d036d | 338 | FTRACE_OPS_FL_RECURSION = BIT(4), |
b41db132 EWI |
339 | FTRACE_OPS_FL_STUB = BIT(5), |
340 | FTRACE_OPS_FL_INITIALIZED = BIT(6), | |
341 | FTRACE_OPS_FL_DELETED = BIT(7), | |
342 | FTRACE_OPS_FL_ADDING = BIT(8), | |
343 | FTRACE_OPS_FL_REMOVING = BIT(9), | |
344 | FTRACE_OPS_FL_MODIFYING = BIT(10), | |
345 | FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11), | |
346 | FTRACE_OPS_FL_IPMODIFY = BIT(12), | |
347 | FTRACE_OPS_FL_PID = BIT(13), | |
348 | FTRACE_OPS_FL_RCU = BIT(14), | |
349 | FTRACE_OPS_FL_TRACE_ARRAY = BIT(15), | |
350 | FTRACE_OPS_FL_PERMANENT = BIT(16), | |
351 | FTRACE_OPS_FL_DIRECT = BIT(17), | |
d9bbfbd1 | 352 | FTRACE_OPS_FL_SUBOP = BIT(18), |
88cefd99 | 353 | FTRACE_OPS_FL_GRAPH = BIT(19), |
b848914c SR |
354 | }; |
355 | ||
60c89718 FR |
356 | #ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS |
357 | #define FTRACE_OPS_FL_SAVE_ARGS FTRACE_OPS_FL_SAVE_REGS | |
358 | #else | |
359 | #define FTRACE_OPS_FL_SAVE_ARGS 0 | |
360 | #endif | |
361 | ||
53cd885b SL |
362 | /* |
363 | * FTRACE_OPS_CMD_* commands allow the ftrace core logic to request changes | |
364 | * to a ftrace_ops. Note, the requests may fail. | |
365 | * | |
366 | * ENABLE_SHARE_IPMODIFY_SELF - enable a DIRECT ops to work on the same | |
367 | * function as an ops with IPMODIFY. Called | |
368 | * when the DIRECT ops is being registered. | |
369 | * This is called with both direct_mutex and | |
370 | * ftrace_lock are locked. | |
371 | * | |
372 | * ENABLE_SHARE_IPMODIFY_PEER - enable a DIRECT ops to work on the same | |
373 | * function as an ops with IPMODIFY. Called | |
374 | * when the other ops (the one with IPMODIFY) | |
375 | * is being registered. | |
376 | * This is called with direct_mutex locked. | |
377 | * | |
378 | * DISABLE_SHARE_IPMODIFY_PEER - disable a DIRECT ops to work on the same | |
379 | * function as an ops with IPMODIFY. Called | |
380 | * when the other ops (the one with IPMODIFY) | |
381 | * is being unregistered. | |
382 | * This is called with direct_mutex locked. | |
383 | */ | |
384 | enum ftrace_ops_cmd { | |
385 | FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF, | |
386 | FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER, | |
387 | FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER, | |
388 | }; | |
389 | ||
390 | /* | |
391 | * For most ftrace_ops_cmd, | |
392 | * Returns: | |
393 | * 0 - Success. | |
394 | * Negative on failure. The return value is dependent on the | |
395 | * callback. | |
396 | */ | |
397 | typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd); | |
398 | ||
33b7f99c SRRH |
399 | #ifdef CONFIG_DYNAMIC_FTRACE |
400 | /* The hash used to know what functions callbacks trace */ | |
401 | struct ftrace_ops_hash { | |
f86f4180 CZ |
402 | struct ftrace_hash __rcu *notrace_hash; |
403 | struct ftrace_hash __rcu *filter_hash; | |
33b7f99c SRRH |
404 | struct mutex regex_lock; |
405 | }; | |
42c269c8 | 406 | |
b80f0f6c | 407 | void ftrace_free_init_mem(void); |
aba4b5c2 | 408 | void ftrace_free_mem(struct module *mod, void *start, void *end); |
42c269c8 | 409 | #else |
380af29b SRG |
410 | static inline void ftrace_free_init_mem(void) |
411 | { | |
412 | ftrace_boot_snapshot(); | |
413 | } | |
aba4b5c2 | 414 | static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } |
33b7f99c SRRH |
415 | #endif |
416 | ||
b7e00a6c | 417 | /* |
ba27f2bc SRRH |
418 | * Note, ftrace_ops can be referenced outside of RCU protection, unless |
419 | * the RCU flag is set. If ftrace_ops is allocated and not part of kernel | |
420 | * core data, the unregistering of it will perform a scheduling on all CPUs | |
421 | * to make sure that there are no more users. Depending on the load of the | |
422 | * system that may take a bit of time. | |
b7e00a6c SRRH |
423 | * |
424 | * Any private data added must also take care not to be freed and if private | |
425 | * data is added to a ftrace_ops that is in core code, the user of the | |
426 | * ftrace_ops must perform a schedule_on_each_cpu() before freeing it. | |
427 | */ | |
16444a8a | 428 | struct ftrace_ops { |
f45948e8 | 429 | ftrace_func_t func; |
f86f4180 | 430 | struct ftrace_ops __rcu *next; |
b848914c | 431 | unsigned long flags; |
b7e00a6c | 432 | void *private; |
e3eea140 | 433 | ftrace_func_t saved_func; |
f45948e8 | 434 | #ifdef CONFIG_DYNAMIC_FTRACE |
33b7f99c SRRH |
435 | struct ftrace_ops_hash local_hash; |
436 | struct ftrace_ops_hash *func_hash; | |
fef5aeee | 437 | struct ftrace_ops_hash old_hash; |
79922b80 | 438 | unsigned long trampoline; |
aec0be2d | 439 | unsigned long trampoline_size; |
fc0ea795 | 440 | struct list_head list; |
5fccc755 | 441 | struct list_head subop_list; |
53cd885b | 442 | ftrace_ops_func_t ops_func; |
d9bbfbd1 | 443 | struct ftrace_ops *managed; |
dbaccb61 FR |
444 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
445 | unsigned long direct_call; | |
446 | #endif | |
f45948e8 | 447 | #endif |
16444a8a ACM |
448 | }; |
449 | ||
59566b0b SRV |
450 | extern struct ftrace_ops __rcu *ftrace_ops_list; |
451 | extern struct ftrace_ops ftrace_list_end; | |
452 | ||
453 | /* | |
40dc4a42 | 454 | * Traverse the ftrace_ops_list, invoking all entries. The reason that we |
59566b0b SRV |
455 | * can use rcu_dereference_raw_check() is that elements removed from this list |
456 | * are simply leaked, so there is no need to interact with a grace-period | |
457 | * mechanism. The rcu_dereference_raw_check() calls are needed to handle | |
40dc4a42 | 458 | * concurrent insertions into the ftrace_ops_list. |
59566b0b SRV |
459 | * |
460 | * Silly Alpha and silly pointer-speculation compiler optimizations! | |
461 | */ | |
462 | #define do_for_each_ftrace_op(op, list) \ | |
463 | op = rcu_dereference_raw_check(list); \ | |
464 | do | |
465 | ||
466 | /* | |
467 | * Optimized for just a single item in the list (as that is the normal case). | |
468 | */ | |
469 | #define while_for_each_ftrace_op(op) \ | |
470 | while (likely(op = rcu_dereference_raw_check((op)->next)) && \ | |
471 | unlikely((op) != &ftrace_list_end)) | |
472 | ||
e7d3737e FW |
473 | /* |
474 | * Type of the current tracing. | |
475 | */ | |
476 | enum ftrace_tracing_type_t { | |
477 | FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ | |
478 | FTRACE_TYPE_RETURN, /* Hook the return of the function */ | |
479 | }; | |
480 | ||
481 | /* Current tracing type, default is FTRACE_TYPE_ENTER */ | |
482 | extern enum ftrace_tracing_type_t ftrace_tracing_type; | |
483 | ||
16444a8a ACM |
484 | /* |
485 | * The ftrace_ops must be a static and should also | |
486 | * be read_mostly. These functions do modify read_mostly variables | |
487 | * so use them sparely. Never free an ftrace_op or modify the | |
488 | * next pointer after it has been registered. Even after unregistering | |
489 | * it, the next pointer may still be used internally. | |
490 | */ | |
491 | int register_ftrace_function(struct ftrace_ops *ops); | |
492 | int unregister_ftrace_function(struct ftrace_ops *ops); | |
16444a8a | 493 | |
a1e2e31d | 494 | extern void ftrace_stub(unsigned long a0, unsigned long a1, |
d19ad077 | 495 | struct ftrace_ops *op, struct ftrace_regs *fregs); |
16444a8a | 496 | |
bed0d9a5 JO |
497 | |
498 | int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs); | |
606576ce | 499 | #else /* !CONFIG_FUNCTION_TRACER */ |
4dbf6bc2 SR |
500 | /* |
501 | * (un)register_ftrace_function must be a macro since the ops parameter | |
502 | * must not be evaluated. | |
503 | */ | |
504 | #define register_ftrace_function(ops) ({ 0; }) | |
505 | #define unregister_ftrace_function(ops) ({ 0; }) | |
81adbdc0 | 506 | static inline void ftrace_kill(void) { } |
b80f0f6c | 507 | static inline void ftrace_free_init_mem(void) { } |
aba4b5c2 | 508 | static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } |
bed0d9a5 JO |
509 | static inline int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs) |
510 | { | |
511 | return -EOPNOTSUPP; | |
512 | } | |
606576ce | 513 | #endif /* CONFIG_FUNCTION_TRACER */ |
352ad25a | 514 | |
ea806eb3 SRV |
515 | struct ftrace_func_entry { |
516 | struct hlist_node hlist; | |
517 | unsigned long ip; | |
518 | unsigned long direct; /* for direct lookup only */ | |
519 | }; | |
520 | ||
763e34e7 | 521 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
ff205766 | 522 | unsigned long ftrace_find_rec_direct(unsigned long ip); |
da8bdfbd FR |
523 | int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr); |
524 | int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr, | |
525 | bool free_filters); | |
526 | int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr); | |
527 | int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr); | |
ccf5a89e | 528 | |
fee86a4e | 529 | void ftrace_stub_direct_tramp(void); |
ccf5a89e | 530 | |
763e34e7 | 531 | #else |
f64dd462 | 532 | struct ftrace_ops; |
ff205766 AS |
533 | static inline unsigned long ftrace_find_rec_direct(unsigned long ip) |
534 | { | |
535 | return 0; | |
536 | } | |
da8bdfbd | 537 | static inline int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) |
f64dd462 JO |
538 | { |
539 | return -ENODEV; | |
540 | } | |
da8bdfbd FR |
541 | static inline int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr, |
542 | bool free_filters) | |
f64dd462 JO |
543 | { |
544 | return -ENODEV; | |
545 | } | |
da8bdfbd | 546 | static inline int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) |
ccf5a89e JO |
547 | { |
548 | return -ENODEV; | |
549 | } | |
da8bdfbd | 550 | static inline int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr) |
f96f644a SL |
551 | { |
552 | return -ENODEV; | |
553 | } | |
763e34e7 | 554 | |
763e34e7 SRV |
555 | /* |
556 | * This must be implemented by the architecture. | |
557 | * It is the way the ftrace direct_ops helper, when called | |
558 | * via ftrace (because there's other callbacks besides the | |
559 | * direct call), can inform the architecture's trampoline that this | |
560 | * routine has a direct caller, and what the caller is. | |
562955fe SRV |
561 | * |
562 | * For example, in x86, it returns the direct caller | |
563 | * callback function via the regs->orig_ax parameter. | |
564 | * Then in the ftrace trampoline, if this is set, it makes | |
565 | * the return from the trampoline jump to the direct caller | |
566 | * instead of going back to the function it just traced. | |
763e34e7 | 567 | */ |
9705bc70 | 568 | static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, |
763e34e7 | 569 | unsigned long addr) { } |
9705bc70 | 570 | #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ |
763e34e7 | 571 | |
f38f1d2a | 572 | #ifdef CONFIG_STACK_TRACER |
bb99d8cc | 573 | |
78eb4ea2 | 574 | int stack_trace_sysctl(const struct ctl_table *table, int write, void *buffer, |
7ff0d449 | 575 | size_t *lenp, loff_t *ppos); |
5367278c | 576 | |
8aaf1ee7 SRV |
577 | /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ |
578 | DECLARE_PER_CPU(int, disable_stack_tracer); | |
579 | ||
580 | /** | |
581 | * stack_tracer_disable - temporarily disable the stack tracer | |
582 | * | |
583 | * There's a few locations (namely in RCU) where stack tracing | |
584 | * cannot be executed. This function is used to disable stack | |
585 | * tracing during those critical sections. | |
586 | * | |
587 | * This function must be called with preemption or interrupts | |
588 | * disabled and stack_tracer_enable() must be called shortly after | |
589 | * while preemption or interrupts are still disabled. | |
590 | */ | |
591 | static inline void stack_tracer_disable(void) | |
592 | { | |
f2cc020d | 593 | /* Preemption or interrupts must be disabled */ |
60361e12 | 594 | if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) |
8aaf1ee7 SRV |
595 | WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); |
596 | this_cpu_inc(disable_stack_tracer); | |
597 | } | |
598 | ||
599 | /** | |
600 | * stack_tracer_enable - re-enable the stack tracer | |
601 | * | |
602 | * After stack_tracer_disable() is called, stack_tracer_enable() | |
603 | * must be called shortly afterward. | |
604 | */ | |
605 | static inline void stack_tracer_enable(void) | |
606 | { | |
60361e12 | 607 | if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) |
8aaf1ee7 SRV |
608 | WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); |
609 | this_cpu_dec(disable_stack_tracer); | |
610 | } | |
5367278c SRV |
611 | #else |
612 | static inline void stack_tracer_disable(void) { } | |
613 | static inline void stack_tracer_enable(void) { } | |
f38f1d2a SR |
614 | #endif |
615 | ||
ab6b8463 SRV |
616 | enum { |
617 | FTRACE_UPDATE_CALLS = (1 << 0), | |
618 | FTRACE_DISABLE_CALLS = (1 << 1), | |
619 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), | |
620 | FTRACE_START_FUNC_RET = (1 << 3), | |
621 | FTRACE_STOP_FUNC_RET = (1 << 4), | |
622 | FTRACE_MAY_SLEEP = (1 << 5), | |
623 | }; | |
624 | ||
2bc56fda MHG |
625 | /* Arches can override ftrace_get_symaddr() to convert fentry_ip to symaddr. */ |
626 | #ifndef ftrace_get_symaddr | |
627 | /** | |
628 | * ftrace_get_symaddr - return the symbol address from fentry_ip | |
629 | * @fentry_ip: the address of ftrace location | |
630 | * | |
631 | * Get the symbol address from @fentry_ip (fast path). If there is no fast | |
632 | * search path, this returns 0. | |
633 | * User may need to use kallsyms API to find the symbol address. | |
634 | */ | |
635 | #define ftrace_get_symaddr(fentry_ip) (0) | |
636 | #endif | |
637 | ||
500e626c AC |
638 | void ftrace_sync_ipi(void *data); |
639 | ||
3d083395 | 640 | #ifdef CONFIG_DYNAMIC_FTRACE |
31e88909 | 641 | |
3a2bfec0 L |
642 | void ftrace_arch_code_modify_prepare(void); |
643 | void ftrace_arch_code_modify_post_process(void); | |
000ab691 | 644 | |
02a392a0 SRRH |
645 | enum ftrace_bug_type { |
646 | FTRACE_BUG_UNKNOWN, | |
647 | FTRACE_BUG_INIT, | |
648 | FTRACE_BUG_NOP, | |
649 | FTRACE_BUG_CALL, | |
650 | FTRACE_BUG_UPDATE, | |
651 | }; | |
652 | extern enum ftrace_bug_type ftrace_bug_type; | |
653 | ||
b05086c7 SRRH |
654 | /* |
655 | * Archs can set this to point to a variable that holds the value that was | |
656 | * expected at the call site before calling ftrace_bug(). | |
657 | */ | |
658 | extern const void *ftrace_expected; | |
659 | ||
4fd3279b | 660 | void ftrace_bug(int err, struct dyn_ftrace *rec); |
c88fd863 | 661 | |
809dcf29 SR |
662 | struct seq_file; |
663 | ||
d88471cb | 664 | extern int ftrace_text_reserved(const void *start, const void *end); |
2cfa1978 | 665 | |
6be7fa3c SRV |
666 | struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr); |
667 | ||
aec0be2d SRRH |
668 | bool is_ftrace_trampoline(unsigned long addr); |
669 | ||
08f6fba5 SR |
670 | /* |
671 | * The dyn_ftrace record's flags field is split into two parts. | |
672 | * the first part which is '0-FTRACE_REF_MAX' is a counter of | |
673 | * the number of callbacks that have registered the function that | |
674 | * the dyn_ftrace descriptor represents. | |
675 | * | |
676 | * The second part is a mask: | |
677 | * ENABLED - the function is being traced | |
678 | * REGS - the record wants the function to save regs | |
679 | * REGS_EN - the function is set up to save regs. | |
f8b8be8a | 680 | * IPMODIFY - the record allows for the IP address to be changed. |
b7ffffbb | 681 | * DISABLED - the record is not ready to be touched yet |
763e34e7 | 682 | * DIRECT - there is a direct function to call |
cbad0fb2 MR |
683 | * CALL_OPS - the record can use callsite-specific ops |
684 | * CALL_OPS_EN - the function is set up to use callsite-specific ops | |
e11b521a | 685 | * TOUCHED - A callback was added since boot up |
6ce2c04f | 686 | * MODIFIED - The function had IPMODIFY or DIRECT attached to it |
08f6fba5 SR |
687 | * |
688 | * When a new ftrace_ops is registered and wants a function to save | |
02dae28f | 689 | * pt_regs, the rec->flags REGS is set. When the function has been |
08f6fba5 SR |
690 | * set up to save regs, the REG_EN flag is set. Once a function |
691 | * starts saving regs it will do so until all ftrace_ops are removed | |
692 | * from tracing that function. | |
693 | */ | |
3c1720f0 | 694 | enum { |
79922b80 | 695 | FTRACE_FL_ENABLED = (1UL << 31), |
08f6fba5 | 696 | FTRACE_FL_REGS = (1UL << 30), |
79922b80 SRRH |
697 | FTRACE_FL_REGS_EN = (1UL << 29), |
698 | FTRACE_FL_TRAMP = (1UL << 28), | |
699 | FTRACE_FL_TRAMP_EN = (1UL << 27), | |
f8b8be8a | 700 | FTRACE_FL_IPMODIFY = (1UL << 26), |
b7ffffbb | 701 | FTRACE_FL_DISABLED = (1UL << 25), |
763e34e7 SRV |
702 | FTRACE_FL_DIRECT = (1UL << 24), |
703 | FTRACE_FL_DIRECT_EN = (1UL << 23), | |
cbad0fb2 MR |
704 | FTRACE_FL_CALL_OPS = (1UL << 22), |
705 | FTRACE_FL_CALL_OPS_EN = (1UL << 21), | |
e11b521a | 706 | FTRACE_FL_TOUCHED = (1UL << 20), |
6ce2c04f | 707 | FTRACE_FL_MODIFIED = (1UL << 19), |
3c1720f0 SR |
708 | }; |
709 | ||
6ce2c04f | 710 | #define FTRACE_REF_MAX_SHIFT 19 |
cf2cb0b2 | 711 | #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) |
ed926f9b | 712 | |
02dae28f | 713 | #define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX) |
0376bde1 | 714 | |
3d083395 | 715 | struct dyn_ftrace { |
a762782d | 716 | unsigned long ip; /* address of mcount call-site */ |
85ae32ae | 717 | unsigned long flags; |
a762782d | 718 | struct dyn_arch_ftrace arch; |
3d083395 SR |
719 | }; |
720 | ||
647664ea MH |
721 | int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, |
722 | int remove, int reset); | |
4f554e95 JO |
723 | int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips, |
724 | unsigned int cnt, int remove, int reset); | |
ac483c44 | 725 | int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
936e074b | 726 | int len, int reset); |
ac483c44 | 727 | int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, |
936e074b SR |
728 | int len, int reset); |
729 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset); | |
730 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); | |
5500fa51 | 731 | void ftrace_free_filter(struct ftrace_ops *ops); |
d032ae89 | 732 | void ftrace_ops_set_global_filter(struct ftrace_ops *ops); |
e1c08bdd | 733 | |
08f6fba5 SR |
734 | /* |
735 | * The FTRACE_UPDATE_* enum is used to pass information back | |
736 | * from the ftrace_update_record() and ftrace_test_record() | |
737 | * functions. These are called by the code update routines | |
738 | * to find out what is to be done for a given function. | |
739 | * | |
740 | * IGNORE - The function is already what we want it to be | |
741 | * MAKE_CALL - Start tracing the function | |
742 | * MODIFY_CALL - Stop saving regs for the function | |
08f6fba5 SR |
743 | * MAKE_NOP - Stop tracing the function |
744 | */ | |
c88fd863 SR |
745 | enum { |
746 | FTRACE_UPDATE_IGNORE, | |
747 | FTRACE_UPDATE_MAKE_CALL, | |
08f6fba5 | 748 | FTRACE_UPDATE_MODIFY_CALL, |
c88fd863 SR |
749 | FTRACE_UPDATE_MAKE_NOP, |
750 | }; | |
751 | ||
fc13cb0c SR |
752 | enum { |
753 | FTRACE_ITER_FILTER = (1 << 0), | |
754 | FTRACE_ITER_NOTRACE = (1 << 1), | |
755 | FTRACE_ITER_PRINTALL = (1 << 2), | |
eee8ded1 SRV |
756 | FTRACE_ITER_DO_PROBES = (1 << 3), |
757 | FTRACE_ITER_PROBE = (1 << 4), | |
5985ea8b SRV |
758 | FTRACE_ITER_MOD = (1 << 5), |
759 | FTRACE_ITER_ENABLED = (1 << 6), | |
e11b521a | 760 | FTRACE_ITER_TOUCHED = (1 << 7), |
83f74441 | 761 | FTRACE_ITER_ADDRS = (1 << 8), |
fc13cb0c SR |
762 | }; |
763 | ||
c88fd863 | 764 | void arch_ftrace_update_code(int command); |
89f579ce YW |
765 | void arch_ftrace_update_trampoline(struct ftrace_ops *ops); |
766 | void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec); | |
767 | void arch_ftrace_trampoline_free(struct ftrace_ops *ops); | |
c88fd863 SR |
768 | |
769 | struct ftrace_rec_iter; | |
770 | ||
771 | struct ftrace_rec_iter *ftrace_rec_iter_start(void); | |
772 | struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); | |
773 | struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); | |
774 | ||
08d636b6 SR |
775 | #define for_ftrace_rec_iter(iter) \ |
776 | for (iter = ftrace_rec_iter_start(); \ | |
777 | iter; \ | |
778 | iter = ftrace_rec_iter_next(iter)) | |
779 | ||
780 | ||
7375dca1 SRV |
781 | int ftrace_update_record(struct dyn_ftrace *rec, bool enable); |
782 | int ftrace_test_record(struct dyn_ftrace *rec, bool enable); | |
c88fd863 | 783 | void ftrace_run_stop_machine(int command); |
f0cf973a | 784 | unsigned long ftrace_location(unsigned long ip); |
04cf31a7 | 785 | unsigned long ftrace_location_range(unsigned long start, unsigned long end); |
7413af1f SRRH |
786 | unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); |
787 | unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); | |
c88fd863 SR |
788 | |
789 | extern ftrace_func_t ftrace_trace_function; | |
790 | ||
fc13cb0c SR |
791 | int ftrace_regex_open(struct ftrace_ops *ops, int flag, |
792 | struct inode *inode, struct file *file); | |
793 | ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, | |
794 | size_t cnt, loff_t *ppos); | |
795 | ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, | |
796 | size_t cnt, loff_t *ppos); | |
fc13cb0c SR |
797 | int ftrace_regex_release(struct inode *inode, struct file *file); |
798 | ||
2a85a37f SR |
799 | void __init |
800 | ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable); | |
801 | ||
3d083395 | 802 | /* defined in arch */ |
3a36cb11 | 803 | extern int ftrace_dyn_arch_init(void); |
e4f5d544 | 804 | extern void ftrace_replace_code(int enable); |
d61f82d0 SR |
805 | extern int ftrace_update_ftrace_func(ftrace_func_t func); |
806 | extern void ftrace_caller(void); | |
08f6fba5 | 807 | extern void ftrace_regs_caller(void); |
d61f82d0 | 808 | extern void ftrace_call(void); |
08f6fba5 | 809 | extern void ftrace_regs_call(void); |
d61f82d0 | 810 | extern void mcount_call(void); |
f0001207 | 811 | |
8ed3e2cf SR |
812 | void ftrace_modify_all_code(int command); |
813 | ||
f0001207 SL |
814 | #ifndef FTRACE_ADDR |
815 | #define FTRACE_ADDR ((unsigned long)ftrace_caller) | |
816 | #endif | |
08f6fba5 | 817 | |
79922b80 SRRH |
818 | #ifndef FTRACE_GRAPH_ADDR |
819 | #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller) | |
820 | #endif | |
821 | ||
08f6fba5 | 822 | #ifndef FTRACE_REGS_ADDR |
06aeaaea | 823 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
08f6fba5 SR |
824 | # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) |
825 | #else | |
826 | # define FTRACE_REGS_ADDR FTRACE_ADDR | |
827 | #endif | |
828 | #endif | |
829 | ||
646d7043 SRRH |
830 | /* |
831 | * If an arch would like functions that are only traced | |
832 | * by the function graph tracer to jump directly to its own | |
833 | * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR | |
834 | * to be that address to jump to. | |
835 | */ | |
836 | #ifndef FTRACE_GRAPH_TRAMP_ADDR | |
837 | #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0) | |
838 | #endif | |
839 | ||
fb52607a FW |
840 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
841 | extern void ftrace_graph_caller(void); | |
5a45cfe1 SR |
842 | extern int ftrace_enable_ftrace_graph_caller(void); |
843 | extern int ftrace_disable_ftrace_graph_caller(void); | |
844 | #else | |
845 | static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } | |
846 | static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } | |
e7d3737e | 847 | #endif |
ad90c0e3 | 848 | |
31e88909 | 849 | /** |
57794a9d | 850 | * ftrace_make_nop - convert code into nop |
31e88909 | 851 | * @mod: module structure if called by module load initialization |
fbf6c73c | 852 | * @rec: the call site record (e.g. mcount/fentry) |
31e88909 SR |
853 | * @addr: the address that the call site should be calling |
854 | * | |
855 | * This is a very sensitive operation and great care needs | |
856 | * to be taken by the arch. The operation should carefully | |
857 | * read the location, check to see if what is read is indeed | |
858 | * what we expect it to be, and then on success of the compare, | |
859 | * it should write to the location. | |
860 | * | |
861 | * The code segment at @rec->ip should be a caller to @addr | |
862 | * | |
863 | * Return must be: | |
864 | * 0 on success | |
865 | * -EFAULT on error reading the location | |
866 | * -EINVAL on a failed compare of the contents | |
867 | * -EPERM on error writing to the location | |
868 | * Any other value will be considered a failure. | |
869 | */ | |
870 | extern int ftrace_make_nop(struct module *mod, | |
871 | struct dyn_ftrace *rec, unsigned long addr); | |
a26a2a27 | 872 | |
67ccddf8 IL |
873 | /** |
874 | * ftrace_need_init_nop - return whether nop call sites should be initialized | |
875 | * | |
876 | * Normally the compiler's -mnop-mcount generates suitable nops, so we don't | |
877 | * need to call ftrace_init_nop() if the code is built with that flag. | |
878 | * Architectures where this is not always the case may define their own | |
879 | * condition. | |
880 | * | |
881 | * Return must be: | |
882 | * 0 if ftrace_init_nop() should be called | |
883 | * Nonzero if ftrace_init_nop() should not be called | |
884 | */ | |
885 | ||
886 | #ifndef ftrace_need_init_nop | |
887 | #define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT)) | |
888 | #endif | |
fbf6c73c MR |
889 | |
890 | /** | |
891 | * ftrace_init_nop - initialize a nop call site | |
892 | * @mod: module structure if called by module load initialization | |
893 | * @rec: the call site record (e.g. mcount/fentry) | |
894 | * | |
895 | * This is a very sensitive operation and great care needs | |
896 | * to be taken by the arch. The operation should carefully | |
897 | * read the location, check to see if what is read is indeed | |
898 | * what we expect it to be, and then on success of the compare, | |
899 | * it should write to the location. | |
900 | * | |
901 | * The code segment at @rec->ip should contain the contents created by | |
902 | * the compiler | |
903 | * | |
904 | * Return must be: | |
905 | * 0 on success | |
906 | * -EFAULT on error reading the location | |
907 | * -EINVAL on a failed compare of the contents | |
908 | * -EPERM on error writing to the location | |
909 | * Any other value will be considered a failure. | |
910 | */ | |
911 | #ifndef ftrace_init_nop | |
912 | static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) | |
913 | { | |
914 | return ftrace_make_nop(mod, rec, MCOUNT_ADDR); | |
915 | } | |
916 | #endif | |
917 | ||
593eb8a2 | 918 | /** |
31e88909 | 919 | * ftrace_make_call - convert a nop call site into a call to addr |
fbf6c73c | 920 | * @rec: the call site record (e.g. mcount/fentry) |
31e88909 | 921 | * @addr: the address that the call site should call |
593eb8a2 SR |
922 | * |
923 | * This is a very sensitive operation and great care needs | |
924 | * to be taken by the arch. The operation should carefully | |
925 | * read the location, check to see if what is read is indeed | |
926 | * what we expect it to be, and then on success of the compare, | |
927 | * it should write to the location. | |
928 | * | |
31e88909 SR |
929 | * The code segment at @rec->ip should be a nop |
930 | * | |
593eb8a2 SR |
931 | * Return must be: |
932 | * 0 on success | |
933 | * -EFAULT on error reading the location | |
934 | * -EINVAL on a failed compare of the contents | |
935 | * -EPERM on error writing to the location | |
936 | * Any other value will be considered a failure. | |
937 | */ | |
31e88909 SR |
938 | extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); |
939 | ||
cbad0fb2 | 940 | #if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) || \ |
7caa9765 PM |
941 | defined(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) || \ |
942 | defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) | |
08f6fba5 SR |
943 | /** |
944 | * ftrace_modify_call - convert from one addr to another (no nop) | |
fbf6c73c | 945 | * @rec: the call site record (e.g. mcount/fentry) |
08f6fba5 SR |
946 | * @old_addr: the address expected to be currently called to |
947 | * @addr: the address to change to | |
948 | * | |
949 | * This is a very sensitive operation and great care needs | |
950 | * to be taken by the arch. The operation should carefully | |
951 | * read the location, check to see if what is read is indeed | |
952 | * what we expect it to be, and then on success of the compare, | |
953 | * it should write to the location. | |
954 | * | |
cbad0fb2 MR |
955 | * When using call ops, this is called when the associated ops change, even |
956 | * when (addr == old_addr). | |
957 | * | |
08f6fba5 SR |
958 | * The code segment at @rec->ip should be a caller to @old_addr |
959 | * | |
960 | * Return must be: | |
961 | * 0 on success | |
962 | * -EFAULT on error reading the location | |
963 | * -EINVAL on a failed compare of the contents | |
964 | * -EPERM on error writing to the location | |
965 | * Any other value will be considered a failure. | |
966 | */ | |
967 | extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, | |
968 | unsigned long addr); | |
969 | #else | |
970 | /* Should never be called */ | |
971 | static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, | |
972 | unsigned long addr) | |
973 | { | |
974 | return -EINVAL; | |
975 | } | |
976 | #endif | |
977 | ||
ecea656d | 978 | extern int skip_trace(unsigned long ip); |
a949ae56 | 979 | extern void ftrace_module_init(struct module *mod); |
7dcd182b | 980 | extern void ftrace_module_enable(struct module *mod); |
049fb9bd | 981 | extern void ftrace_release_mod(struct module *mod); |
4dc93676 | 982 | #else /* CONFIG_DYNAMIC_FTRACE */ |
4dbf6bc2 | 983 | static inline int skip_trace(unsigned long ip) { return 0; } |
7dcd182b JY |
984 | static inline void ftrace_module_init(struct module *mod) { } |
985 | static inline void ftrace_module_enable(struct module *mod) { } | |
986 | static inline void ftrace_release_mod(struct module *mod) { } | |
d88471cb | 987 | static inline int ftrace_text_reserved(const void *start, const void *end) |
2cfa1978 MH |
988 | { |
989 | return 0; | |
990 | } | |
4dc93676 SR |
991 | static inline unsigned long ftrace_location(unsigned long ip) |
992 | { | |
993 | return 0; | |
994 | } | |
fc13cb0c SR |
995 | |
996 | /* | |
997 | * Again users of functions that have ftrace_ops may not | |
998 | * have them defined when ftrace is not enabled, but these | |
999 | * functions may still be called. Use a macro instead of inline. | |
1000 | */ | |
1001 | #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) | |
96de37b6 | 1002 | #define ftrace_set_early_filter(ops, buf, enable) do { } while (0) |
647664ea | 1003 | #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) |
4f554e95 | 1004 | #define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; }) |
5500fa51 JO |
1005 | #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) |
1006 | #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) | |
1007 | #define ftrace_free_filter(ops) do { } while (0) | |
d032ae89 | 1008 | #define ftrace_ops_set_global_filter(ops) do { } while (0) |
fc13cb0c SR |
1009 | |
1010 | static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, | |
1011 | size_t cnt, loff_t *ppos) { return -ENODEV; } | |
1012 | static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, | |
1013 | size_t cnt, loff_t *ppos) { return -ENODEV; } | |
fc13cb0c SR |
1014 | static inline int |
1015 | ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } | |
aec0be2d SRRH |
1016 | |
1017 | static inline bool is_ftrace_trampoline(unsigned long addr) | |
1018 | { | |
1019 | return false; | |
1020 | } | |
ecea656d | 1021 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
352ad25a | 1022 | |
0c0593b4 SRV |
1023 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1024 | #ifndef ftrace_graph_func | |
1025 | #define ftrace_graph_func ftrace_stub | |
1026 | #define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB | |
1027 | #else | |
1028 | #define FTRACE_OPS_GRAPH_STUB 0 | |
1029 | #endif | |
1030 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
1031 | ||
aeaee8a2 IM |
1032 | /* totally disable ftrace - can not re-enable after this */ |
1033 | void ftrace_kill(void); | |
1034 | ||
f43fdad8 IM |
1035 | static inline void tracer_disable(void) |
1036 | { | |
606576ce | 1037 | #ifdef CONFIG_FUNCTION_TRACER |
f43fdad8 IM |
1038 | ftrace_enabled = 0; |
1039 | #endif | |
1040 | } | |
1041 | ||
37002735 HY |
1042 | /* |
1043 | * Ftrace disable/restore without lock. Some synchronization mechanism | |
9bdeb7b5 | 1044 | * must be used to prevent ftrace_enabled to be changed between |
37002735 HY |
1045 | * disable/restore. |
1046 | */ | |
9bdeb7b5 HY |
1047 | static inline int __ftrace_enabled_save(void) |
1048 | { | |
606576ce | 1049 | #ifdef CONFIG_FUNCTION_TRACER |
9bdeb7b5 HY |
1050 | int saved_ftrace_enabled = ftrace_enabled; |
1051 | ftrace_enabled = 0; | |
1052 | return saved_ftrace_enabled; | |
1053 | #else | |
1054 | return 0; | |
1055 | #endif | |
1056 | } | |
1057 | ||
1058 | static inline void __ftrace_enabled_restore(int enabled) | |
1059 | { | |
606576ce | 1060 | #ifdef CONFIG_FUNCTION_TRACER |
9bdeb7b5 HY |
1061 | ftrace_enabled = enabled; |
1062 | #endif | |
1063 | } | |
1064 | ||
eed542d6 AT |
1065 | /* All archs should have this, but we define it for consistency */ |
1066 | #ifndef ftrace_return_address0 | |
1067 | # define ftrace_return_address0 __builtin_return_address(0) | |
1068 | #endif | |
1069 | ||
1070 | /* Archs may use other ways for ADDR1 and beyond */ | |
1071 | #ifndef ftrace_return_address | |
c79a61f5 | 1072 | # ifdef CONFIG_FRAME_POINTER |
eed542d6 | 1073 | # define ftrace_return_address(n) __builtin_return_address(n) |
c79a61f5 | 1074 | # else |
eed542d6 | 1075 | # define ftrace_return_address(n) 0UL |
c79a61f5 | 1076 | # endif |
eed542d6 AT |
1077 | #endif |
1078 | ||
1079 | #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0) | |
1080 | #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1)) | |
1081 | #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2)) | |
1082 | #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3)) | |
1083 | #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4)) | |
1084 | #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) | |
1085 | #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) | |
352ad25a | 1086 | |
ea65b418 | 1087 | static __always_inline unsigned long get_lock_parent_ip(void) |
f904f582 SAS |
1088 | { |
1089 | unsigned long addr = CALLER_ADDR0; | |
1090 | ||
1091 | if (!in_lock_functions(addr)) | |
1092 | return addr; | |
1093 | addr = CALLER_ADDR1; | |
1094 | if (!in_lock_functions(addr)) | |
1095 | return addr; | |
1096 | return CALLER_ADDR2; | |
1097 | } | |
1098 | ||
c3bc8fd6 | 1099 | #ifdef CONFIG_TRACE_PREEMPT_TOGGLE |
489f1396 IM |
1100 | extern void trace_preempt_on(unsigned long a0, unsigned long a1); |
1101 | extern void trace_preempt_off(unsigned long a0, unsigned long a1); | |
6cd8a4bb | 1102 | #else |
b02ee9a3 MB |
1103 | /* |
1104 | * Use defines instead of static inlines because some arches will make code out | |
1105 | * of the CALLER_ADDR, when we really want these to be a real nop. | |
1106 | */ | |
1107 | # define trace_preempt_on(a0, a1) do { } while (0) | |
1108 | # define trace_preempt_off(a0, a1) do { } while (0) | |
6cd8a4bb SR |
1109 | #endif |
1110 | ||
68bf21aa SR |
1111 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
1112 | extern void ftrace_init(void); | |
a1326b17 MR |
1113 | #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY |
1114 | #define FTRACE_CALLSITE_SECTION "__patchable_function_entries" | |
1115 | #else | |
1116 | #define FTRACE_CALLSITE_SECTION "__mcount_loc" | |
1117 | #endif | |
68bf21aa SR |
1118 | #else |
1119 | static inline void ftrace_init(void) { } | |
1120 | #endif | |
1121 | ||
287b6e68 FW |
1122 | /* |
1123 | * Structure that defines an entry function trace. | |
a4a551b8 NK |
1124 | * It's already packed but the attribute "packed" is needed |
1125 | * to remove extra padding at the end. | |
287b6e68 FW |
1126 | */ |
1127 | struct ftrace_graph_ent { | |
1128 | unsigned long func; /* Current function */ | |
1129 | int depth; | |
a4a551b8 | 1130 | } __packed; |
dd0e545f | 1131 | |
21e92806 DP |
1132 | /* |
1133 | * Structure that defines an entry function trace with retaddr. | |
1134 | * It's already packed but the attribute "packed" is needed | |
1135 | * to remove extra padding at the end. | |
1136 | */ | |
1137 | struct fgraph_retaddr_ent { | |
1138 | unsigned long func; /* Current function */ | |
1139 | int depth; | |
1140 | unsigned long retaddr; /* Return address */ | |
1141 | } __packed; | |
1142 | ||
caf4b323 FW |
1143 | /* |
1144 | * Structure that defines a return function trace. | |
a4a551b8 NK |
1145 | * It's already packed but the attribute "packed" is needed |
1146 | * to remove extra padding at the end. | |
caf4b323 | 1147 | */ |
fb52607a | 1148 | struct ftrace_graph_ret { |
caf4b323 | 1149 | unsigned long func; /* Current function */ |
a1be9ccc DP |
1150 | #ifdef CONFIG_FUNCTION_GRAPH_RETVAL |
1151 | unsigned long retval; | |
1152 | #endif | |
60602cb5 | 1153 | int depth; |
0231022c | 1154 | /* Number of functions that overran the depth limit for current task */ |
60602cb5 | 1155 | unsigned int overrun; |
a4a551b8 | 1156 | } __packed; |
caf4b323 | 1157 | |
37238abe SRV |
1158 | struct fgraph_ops; |
1159 | ||
62b915f1 | 1160 | /* Type of the callback handlers for tracing function graph*/ |
37238abe | 1161 | typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *, |
2ca8c112 MHG |
1162 | struct fgraph_ops *, |
1163 | struct ftrace_regs *); /* return */ | |
37238abe | 1164 | typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *, |
41705c42 MHG |
1165 | struct fgraph_ops *, |
1166 | struct ftrace_regs *); /* entry */ | |
62b915f1 | 1167 | |
21e92806 | 1168 | extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, |
41705c42 MHG |
1169 | struct fgraph_ops *gops, |
1170 | struct ftrace_regs *fregs); | |
df3ec5da | 1171 | bool ftrace_pids_enabled(struct ftrace_ops *ops); |
e8025bab | 1172 | |
fb52607a | 1173 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
8b96f011 | 1174 | |
688f7089 SRV |
1175 | struct fgraph_ops { |
1176 | trace_func_graph_ent_t entryfunc; | |
1177 | trace_func_graph_ret_t retfunc; | |
c132be2c | 1178 | struct ftrace_ops ops; /* for the hash lists */ |
26dda563 | 1179 | void *private; |
df3ec5da | 1180 | trace_func_graph_ent_t saved_func; |
7aa1eaef | 1181 | int idx; |
688f7089 SRV |
1182 | }; |
1183 | ||
91c46b0a SRV |
1184 | void *fgraph_reserve_data(int idx, int size_bytes); |
1185 | void *fgraph_retrieve_data(int idx, int *size_bytes); | |
a312a0f7 | 1186 | void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth); |
91c46b0a | 1187 | |
712406a6 SR |
1188 | /* |
1189 | * Stack of return addresses for functions | |
1190 | * of a thread. | |
1191 | * Used in struct thread_info | |
1192 | */ | |
1193 | struct ftrace_ret_stack { | |
1194 | unsigned long ret; | |
1195 | unsigned long func; | |
daa460a8 | 1196 | #ifdef HAVE_FUNCTION_GRAPH_FP_TEST |
71e308a2 | 1197 | unsigned long fp; |
daa460a8 | 1198 | #endif |
9a7c348b | 1199 | unsigned long *retp; |
712406a6 SR |
1200 | }; |
1201 | ||
1202 | /* | |
1203 | * Primary handler of a function return. | |
1204 | * It relays on ftrace_return_to_handler. | |
1205 | * Defined in entry_32/64.S | |
1206 | */ | |
1207 | extern void return_to_handler(void); | |
1208 | ||
1209 | extern int | |
41705c42 MHG |
1210 | function_graph_enter_regs(unsigned long ret, unsigned long func, |
1211 | unsigned long frame_pointer, unsigned long *retp, | |
1212 | struct ftrace_regs *fregs); | |
1213 | ||
1214 | static inline int function_graph_enter(unsigned long ret, unsigned long func, | |
1215 | unsigned long fp, unsigned long *retp) | |
1216 | { | |
1217 | return function_graph_enter_regs(ret, func, fp, retp, NULL); | |
1218 | } | |
712406a6 | 1219 | |
b0e21a61 | 1220 | struct ftrace_ret_stack * |
7aa1eaef | 1221 | ftrace_graph_get_ret_stack(struct task_struct *task, int skip); |
0a6c61bc | 1222 | unsigned long ftrace_graph_top_ret_addr(struct task_struct *task); |
b0e21a61 | 1223 | |
223918e3 JP |
1224 | unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, |
1225 | unsigned long ret, unsigned long *retp); | |
4497412a | 1226 | unsigned long *fgraph_get_task_var(struct fgraph_ops *gops); |
223918e3 | 1227 | |
8b96f011 FW |
1228 | /* |
1229 | * Sometimes we don't want to trace a function with the function | |
1230 | * graph tracer but we want them to keep traced by the usual function | |
1231 | * tracer if the function graph tracer is not configured. | |
1232 | */ | |
1233 | #define __notrace_funcgraph notrace | |
1234 | ||
f201ae23 FW |
1235 | #define FTRACE_RETFUNC_DEPTH 50 |
1236 | #define FTRACE_RETSTACK_ALLOC_SIZE 32 | |
688f7089 SRV |
1237 | |
1238 | extern int register_ftrace_graph(struct fgraph_ops *ops); | |
1239 | extern void unregister_ftrace_graph(struct fgraph_ops *ops); | |
287b6e68 | 1240 | |
18bfee32 CL |
1241 | /** |
1242 | * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called | |
1243 | * | |
1244 | * ftrace_graph_stop() is called when a severe error is detected in | |
1245 | * the function graph tracing. This function is called by the critical | |
1246 | * paths of function graph to keep those paths from doing any more harm. | |
1247 | */ | |
1248 | DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph); | |
1249 | ||
1250 | static inline bool ftrace_graph_is_dead(void) | |
1251 | { | |
1252 | return static_branch_unlikely(&kill_ftrace_graph); | |
1253 | } | |
1254 | ||
14a866c5 SR |
1255 | extern void ftrace_graph_stop(void); |
1256 | ||
287b6e68 FW |
1257 | /* The current handlers in use */ |
1258 | extern trace_func_graph_ret_t ftrace_graph_return; | |
1259 | extern trace_func_graph_ent_t ftrace_graph_entry; | |
caf4b323 | 1260 | |
fb52607a FW |
1261 | extern void ftrace_graph_init_task(struct task_struct *t); |
1262 | extern void ftrace_graph_exit_task(struct task_struct *t); | |
868baf07 | 1263 | extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); |
21a8c466 | 1264 | |
1d5f0222 SRG |
1265 | /* Used by assembly, but to quiet sparse warnings */ |
1266 | extern struct ftrace_ops *function_trace_op; | |
1267 | ||
380c4b14 FW |
1268 | static inline void pause_graph_tracing(void) |
1269 | { | |
1270 | atomic_inc(¤t->tracing_graph_pause); | |
1271 | } | |
1272 | ||
1273 | static inline void unpause_graph_tracing(void) | |
1274 | { | |
1275 | atomic_dec(¤t->tracing_graph_pause); | |
1276 | } | |
5ac9f622 | 1277 | #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ |
8b96f011 FW |
1278 | |
1279 | #define __notrace_funcgraph | |
1280 | ||
fb52607a FW |
1281 | static inline void ftrace_graph_init_task(struct task_struct *t) { } |
1282 | static inline void ftrace_graph_exit_task(struct task_struct *t) { } | |
868baf07 | 1283 | static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } |
21a8c466 | 1284 | |
688f7089 SRV |
1285 | /* Define as macros as fgraph_ops may not be defined */ |
1286 | #define register_ftrace_graph(ops) ({ -1; }) | |
1287 | #define unregister_ftrace_graph(ops) do { } while (0) | |
380c4b14 | 1288 | |
223918e3 JP |
1289 | static inline unsigned long |
1290 | ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, | |
1291 | unsigned long *retp) | |
1292 | { | |
1293 | return ret; | |
1294 | } | |
1295 | ||
380c4b14 FW |
1296 | static inline void pause_graph_tracing(void) { } |
1297 | static inline void unpause_graph_tracing(void) { } | |
5ac9f622 | 1298 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
caf4b323 | 1299 | |
ea4e2bc4 | 1300 | #ifdef CONFIG_TRACING |
cecbca96 FW |
1301 | enum ftrace_dump_mode; |
1302 | ||
19f0423f | 1303 | extern int ftrace_dump_on_oops_enabled(void); |
526211bc | 1304 | |
de7edd31 | 1305 | extern void disable_trace_on_warning(void); |
42391745 | 1306 | |
de7edd31 SRRH |
1307 | #else /* CONFIG_TRACING */ |
1308 | static inline void disable_trace_on_warning(void) { } | |
ea4e2bc4 SR |
1309 | #endif /* CONFIG_TRACING */ |
1310 | ||
e7b8e675 MF |
1311 | #ifdef CONFIG_FTRACE_SYSCALLS |
1312 | ||
1313 | unsigned long arch_syscall_addr(int nr); | |
1314 | ||
1315 | #endif /* CONFIG_FTRACE_SYSCALLS */ | |
1316 | ||
16444a8a | 1317 | #endif /* _LINUX_FTRACE_H */ |