Commit | Line | Data |
---|---|---|
bcea3f96 | 1 | // SPDX-License-Identifier: GPL-2.0 |
16444a8a ACM |
2 | /* |
3 | * Infrastructure for profiling code inserted by 'gcc -pg'. | |
4 | * | |
5 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
6 | * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> | |
7 | * | |
8 | * Originally ported from the -rt patch by: | |
9 | * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> | |
10 | * | |
11 | * Based on code in the latency_tracer, that is: | |
12 | * | |
13 | * Copyright (C) 2004-2006 Ingo Molnar | |
6d49e352 | 14 | * Copyright (C) 2004 Nadia Yvette Chambers |
16444a8a ACM |
15 | */ |
16 | ||
3d083395 SR |
17 | #include <linux/stop_machine.h> |
18 | #include <linux/clocksource.h> | |
29930025 | 19 | #include <linux/sched/task.h> |
3d083395 | 20 | #include <linux/kallsyms.h> |
17911ff3 | 21 | #include <linux/security.h> |
5072c59f | 22 | #include <linux/seq_file.h> |
8434dc93 | 23 | #include <linux/tracefs.h> |
3d083395 | 24 | #include <linux/hardirq.h> |
2d8b820b | 25 | #include <linux/kthread.h> |
5072c59f | 26 | #include <linux/uaccess.h> |
5855fead | 27 | #include <linux/bsearch.h> |
56d82e00 | 28 | #include <linux/module.h> |
2d8b820b | 29 | #include <linux/ftrace.h> |
b0fc494f | 30 | #include <linux/sysctl.h> |
5a0e3ad6 | 31 | #include <linux/slab.h> |
5072c59f | 32 | #include <linux/ctype.h> |
68950619 | 33 | #include <linux/sort.h> |
3d083395 | 34 | #include <linux/list.h> |
59df055f | 35 | #include <linux/hash.h> |
3f379b03 | 36 | #include <linux/rcupdate.h> |
fabe38ab | 37 | #include <linux/kprobes.h> |
3d083395 | 38 | |
ad8d75ff | 39 | #include <trace/events/sched.h> |
8aef2d28 | 40 | |
b80f0f6c | 41 | #include <asm/sections.h> |
2af15d6a | 42 | #include <asm/setup.h> |
395a59d0 | 43 | |
3306fc4a | 44 | #include "ftrace_internal.h" |
0706f1c4 | 45 | #include "trace_output.h" |
bac429f0 | 46 | #include "trace_stat.h" |
16444a8a | 47 | |
e11b521a | 48 | /* Flags that do not get reset */ |
6ce2c04f SRG |
49 | #define FTRACE_NOCLEAR_FLAGS (FTRACE_FL_DISABLED | FTRACE_FL_TOUCHED | \ |
50 | FTRACE_FL_MODIFIED) | |
e11b521a | 51 | |
b39181f7 SRG |
52 | #define FTRACE_INVALID_FUNCTION "__ftrace_invalid_address__" |
53 | ||
6912896e | 54 | #define FTRACE_WARN_ON(cond) \ |
0778d9ad SR |
55 | ({ \ |
56 | int ___r = cond; \ | |
57 | if (WARN_ON(___r)) \ | |
6912896e | 58 | ftrace_kill(); \ |
0778d9ad SR |
59 | ___r; \ |
60 | }) | |
6912896e SR |
61 | |
62 | #define FTRACE_WARN_ON_ONCE(cond) \ | |
0778d9ad SR |
63 | ({ \ |
64 | int ___r = cond; \ | |
65 | if (WARN_ON_ONCE(___r)) \ | |
6912896e | 66 | ftrace_kill(); \ |
0778d9ad SR |
67 | ___r; \ |
68 | }) | |
6912896e | 69 | |
8fc0c701 | 70 | /* hash bits for specific function selection */ |
33dc9b12 SR |
71 | #define FTRACE_HASH_DEFAULT_BITS 10 |
72 | #define FTRACE_HASH_MAX_BITS 12 | |
8fc0c701 | 73 | |
f04f24fb | 74 | #ifdef CONFIG_DYNAMIC_FTRACE |
33b7f99c SRRH |
75 | #define INIT_OPS_HASH(opsname) \ |
76 | .func_hash = &opsname.local_hash, \ | |
5fccc755 SRG |
77 | .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), \ |
78 | .subop_list = LIST_HEAD_INIT(opsname.subop_list), | |
f04f24fb | 79 | #else |
33b7f99c | 80 | #define INIT_OPS_HASH(opsname) |
f04f24fb MH |
81 | #endif |
82 | ||
a0572f68 SRV |
83 | enum { |
84 | FTRACE_MODIFY_ENABLE_FL = (1 << 0), | |
85 | FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1), | |
86 | }; | |
87 | ||
3306fc4a | 88 | struct ftrace_ops ftrace_list_end __read_mostly = { |
2f5f6ad9 | 89 | .func = ftrace_stub, |
a25d036d | 90 | .flags = FTRACE_OPS_FL_STUB, |
33b7f99c | 91 | INIT_OPS_HASH(ftrace_list_end) |
2f5f6ad9 SR |
92 | }; |
93 | ||
4eebcc81 SR |
94 | /* ftrace_enabled is a method to turn ftrace on or off */ |
95 | int ftrace_enabled __read_mostly; | |
5d79fa0d | 96 | static int __maybe_unused last_ftrace_enabled; |
b0fc494f | 97 | |
2f5f6ad9 SR |
98 | /* Current function tracing op */ |
99 | struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; | |
405e1d83 SRRH |
100 | /* What to set function_trace_op to */ |
101 | static struct ftrace_ops *set_function_trace_op; | |
60a7ecf4 | 102 | |
df3ec5da | 103 | bool ftrace_pids_enabled(struct ftrace_ops *ops) |
e3eea140 | 104 | { |
345ddcc8 SRRH |
105 | struct trace_array *tr; |
106 | ||
107 | if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private) | |
108 | return false; | |
109 | ||
110 | tr = ops->private; | |
111 | ||
b3b1e6ed | 112 | return tr->function_pids != NULL || tr->function_no_pids != NULL; |
e3eea140 SRRH |
113 | } |
114 | ||
115 | static void ftrace_update_trampoline(struct ftrace_ops *ops); | |
116 | ||
4eebcc81 SR |
117 | /* |
118 | * ftrace_disabled is set when an anomaly is discovered. | |
119 | * ftrace_disabled is much stronger than ftrace_enabled. | |
120 | */ | |
121 | static int ftrace_disabled __read_mostly; | |
122 | ||
3306fc4a | 123 | DEFINE_MUTEX(ftrace_lock); |
b0fc494f | 124 | |
1f51ba90 | 125 | struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = (struct ftrace_ops __rcu *)&ftrace_list_end; |
16444a8a | 126 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
3306fc4a | 127 | struct ftrace_ops global_ops; |
16444a8a | 128 | |
50c69781 | 129 | /* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */ |
34cdd18b SRV |
130 | void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
131 | struct ftrace_ops *op, struct ftrace_regs *fregs); | |
b848914c | 132 | |
cbad0fb2 MR |
133 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS |
134 | /* | |
135 | * Stub used to invoke the list ops without requiring a separate trampoline. | |
136 | */ | |
137 | const struct ftrace_ops ftrace_list_ops = { | |
138 | .func = ftrace_ops_list_func, | |
139 | .flags = FTRACE_OPS_FL_STUB, | |
140 | }; | |
141 | ||
142 | static void ftrace_ops_nop_func(unsigned long ip, unsigned long parent_ip, | |
143 | struct ftrace_ops *op, | |
144 | struct ftrace_regs *fregs) | |
145 | { | |
146 | /* do nothing */ | |
147 | } | |
148 | ||
149 | /* | |
150 | * Stub used when a call site is disabled. May be called transiently by threads | |
151 | * which have made it into ftrace_caller but haven't yet recovered the ops at | |
152 | * the point the call site is disabled. | |
153 | */ | |
154 | const struct ftrace_ops ftrace_nop_ops = { | |
155 | .func = ftrace_ops_nop_func, | |
156 | .flags = FTRACE_OPS_FL_STUB, | |
157 | }; | |
158 | #endif | |
159 | ||
f04f24fb MH |
160 | static inline void ftrace_ops_init(struct ftrace_ops *ops) |
161 | { | |
162 | #ifdef CONFIG_DYNAMIC_FTRACE | |
163 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { | |
33b7f99c | 164 | mutex_init(&ops->local_hash.regex_lock); |
5fccc755 | 165 | INIT_LIST_HEAD(&ops->subop_list); |
33b7f99c | 166 | ops->func_hash = &ops->local_hash; |
f04f24fb MH |
167 | ops->flags |= FTRACE_OPS_FL_INITIALIZED; |
168 | } | |
169 | #endif | |
170 | } | |
171 | ||
d66bb334 | 172 | /* Call this function for when a callback filters on set_ftrace_pid */ |
2f5f6ad9 | 173 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, |
d19ad077 | 174 | struct ftrace_ops *op, struct ftrace_regs *fregs) |
df4fc315 | 175 | { |
345ddcc8 | 176 | struct trace_array *tr = op->private; |
717e3f5e | 177 | int pid; |
345ddcc8 | 178 | |
717e3f5e SRV |
179 | if (tr) { |
180 | pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); | |
181 | if (pid == FTRACE_PID_IGNORE) | |
182 | return; | |
183 | if (pid != FTRACE_PID_TRACE && | |
184 | pid != current->pid) | |
185 | return; | |
186 | } | |
df4fc315 | 187 | |
d19ad077 | 188 | op->saved_func(ip, parent_ip, op, fregs); |
df4fc315 SR |
189 | } |
190 | ||
500e626c | 191 | void ftrace_sync_ipi(void *data) |
405e1d83 SRRH |
192 | { |
193 | /* Probably not needed, but do it anyway */ | |
194 | smp_rmb(); | |
195 | } | |
196 | ||
00ccbf2f SRRH |
197 | static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) |
198 | { | |
199 | /* | |
78a01feb | 200 | * If this is a dynamic or RCU ops, or we force list func, |
00ccbf2f SRRH |
201 | * then it needs to call the list anyway. |
202 | */ | |
b3a88803 PZ |
203 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) || |
204 | FTRACE_FORCE_LIST_FUNC) | |
00ccbf2f SRRH |
205 | return ftrace_ops_list_func; |
206 | ||
207 | return ftrace_ops_get_func(ops); | |
208 | } | |
209 | ||
2b499381 SR |
210 | static void update_ftrace_function(void) |
211 | { | |
212 | ftrace_func_t func; | |
213 | ||
f7aad4e1 SRRH |
214 | /* |
215 | * Prepare the ftrace_ops that the arch callback will use. | |
216 | * If there's only one ftrace_ops registered, the ftrace_ops_list | |
217 | * will point to the ops we want. | |
218 | */ | |
f86f4180 CZ |
219 | set_function_trace_op = rcu_dereference_protected(ftrace_ops_list, |
220 | lockdep_is_held(&ftrace_lock)); | |
f7aad4e1 SRRH |
221 | |
222 | /* If there's no ftrace_ops registered, just call the stub function */ | |
f86f4180 | 223 | if (set_function_trace_op == &ftrace_list_end) { |
f7aad4e1 SRRH |
224 | func = ftrace_stub; |
225 | ||
cdbe61bf SR |
226 | /* |
227 | * If we are at the end of the list and this ops is | |
4740974a SR |
228 | * recursion safe and not dynamic and the arch supports passing ops, |
229 | * then have the mcount trampoline call the function directly. | |
cdbe61bf | 230 | */ |
f86f4180 CZ |
231 | } else if (rcu_dereference_protected(ftrace_ops_list->next, |
232 | lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { | |
00ccbf2f | 233 | func = ftrace_ops_get_list_func(ftrace_ops_list); |
f7aad4e1 | 234 | |
2f5f6ad9 SR |
235 | } else { |
236 | /* Just use the default ftrace_ops */ | |
405e1d83 | 237 | set_function_trace_op = &ftrace_list_end; |
b848914c | 238 | func = ftrace_ops_list_func; |
2f5f6ad9 | 239 | } |
2b499381 | 240 | |
405e1d83 SRRH |
241 | /* If there's no change, then do nothing more here */ |
242 | if (ftrace_trace_function == func) | |
243 | return; | |
244 | ||
245 | /* | |
246 | * If we are using the list function, it doesn't care | |
247 | * about the function_trace_ops. | |
248 | */ | |
249 | if (func == ftrace_ops_list_func) { | |
250 | ftrace_trace_function = func; | |
251 | /* | |
252 | * Don't even bother setting function_trace_ops, | |
253 | * it would be racy to do so anyway. | |
254 | */ | |
255 | return; | |
256 | } | |
257 | ||
258 | #ifndef CONFIG_DYNAMIC_FTRACE | |
259 | /* | |
260 | * For static tracing, we need to be a bit more careful. | |
261 | * The function change takes affect immediately. Thus, | |
fdda88d3 | 262 | * we need to coordinate the setting of the function_trace_ops |
405e1d83 SRRH |
263 | * with the setting of the ftrace_trace_function. |
264 | * | |
265 | * Set the function to the list ops, which will call the | |
266 | * function we want, albeit indirectly, but it handles the | |
267 | * ftrace_ops and doesn't depend on function_trace_op. | |
268 | */ | |
269 | ftrace_trace_function = ftrace_ops_list_func; | |
270 | /* | |
271 | * Make sure all CPUs see this. Yes this is slow, but static | |
272 | * tracing is slow and nasty to have enabled. | |
273 | */ | |
e5a971d7 | 274 | synchronize_rcu_tasks_rude(); |
405e1d83 SRRH |
275 | /* Now all cpus are using the list ops. */ |
276 | function_trace_op = set_function_trace_op; | |
277 | /* Make sure the function_trace_op is visible on all CPUs */ | |
278 | smp_wmb(); | |
279 | /* Nasty way to force a rmb on all cpus */ | |
280 | smp_call_function(ftrace_sync_ipi, NULL, 1); | |
281 | /* OK, we are all set to update the ftrace_trace_function now! */ | |
282 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | |
283 | ||
491d0dcf | 284 | ftrace_trace_function = func; |
491d0dcf SR |
285 | } |
286 | ||
f86f4180 CZ |
287 | static void add_ftrace_ops(struct ftrace_ops __rcu **list, |
288 | struct ftrace_ops *ops) | |
3d083395 | 289 | { |
f86f4180 CZ |
290 | rcu_assign_pointer(ops->next, *list); |
291 | ||
16444a8a | 292 | /* |
b848914c | 293 | * We are entering ops into the list but another |
16444a8a ACM |
294 | * CPU might be walking that list. We need to make sure |
295 | * the ops->next pointer is valid before another CPU sees | |
b848914c | 296 | * the ops pointer included into the list. |
16444a8a | 297 | */ |
2b499381 | 298 | rcu_assign_pointer(*list, ops); |
16444a8a ACM |
299 | } |
300 | ||
f86f4180 CZ |
301 | static int remove_ftrace_ops(struct ftrace_ops __rcu **list, |
302 | struct ftrace_ops *ops) | |
16444a8a | 303 | { |
16444a8a | 304 | struct ftrace_ops **p; |
16444a8a ACM |
305 | |
306 | /* | |
3d083395 SR |
307 | * If we are removing the last function, then simply point |
308 | * to the ftrace_stub. | |
16444a8a | 309 | */ |
f86f4180 CZ |
310 | if (rcu_dereference_protected(*list, |
311 | lockdep_is_held(&ftrace_lock)) == ops && | |
312 | rcu_dereference_protected(ops->next, | |
313 | lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { | |
0ddef5d6 | 314 | rcu_assign_pointer(*list, &ftrace_list_end); |
e6ea44e9 | 315 | return 0; |
16444a8a ACM |
316 | } |
317 | ||
2b499381 | 318 | for (p = list; *p != &ftrace_list_end; p = &(*p)->next) |
16444a8a ACM |
319 | if (*p == ops) |
320 | break; | |
321 | ||
e6ea44e9 SR |
322 | if (*p != ops) |
323 | return -1; | |
16444a8a ACM |
324 | |
325 | *p = (*p)->next; | |
2b499381 SR |
326 | return 0; |
327 | } | |
16444a8a | 328 | |
f3bea491 SRRH |
329 | static void ftrace_update_trampoline(struct ftrace_ops *ops); |
330 | ||
3306fc4a | 331 | int __register_ftrace_function(struct ftrace_ops *ops) |
2b499381 | 332 | { |
591dffda SRRH |
333 | if (ops->flags & FTRACE_OPS_FL_DELETED) |
334 | return -EINVAL; | |
335 | ||
b848914c SR |
336 | if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) |
337 | return -EBUSY; | |
338 | ||
06aeaaea | 339 | #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
08f6fba5 SR |
340 | /* |
341 | * If the ftrace_ops specifies SAVE_REGS, then it only can be used | |
342 | * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. | |
343 | * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant. | |
344 | */ | |
345 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && | |
346 | !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) | |
347 | return -EINVAL; | |
348 | ||
349 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) | |
350 | ops->flags |= FTRACE_OPS_FL_SAVE_REGS; | |
351 | #endif | |
7162431d MB |
352 | if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT)) |
353 | return -EBUSY; | |
08f6fba5 | 354 | |
a20deb3a | 355 | if (!is_kernel_core_data((unsigned long)ops)) |
cdbe61bf SR |
356 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; |
357 | ||
ba27f2bc | 358 | add_ftrace_ops(&ftrace_ops_list, ops); |
b848914c | 359 | |
e3eea140 SRRH |
360 | /* Always save the function, and reset at unregistering */ |
361 | ops->saved_func = ops->func; | |
362 | ||
345ddcc8 | 363 | if (ftrace_pids_enabled(ops)) |
e3eea140 SRRH |
364 | ops->func = ftrace_pid_func; |
365 | ||
f3bea491 SRRH |
366 | ftrace_update_trampoline(ops); |
367 | ||
2b499381 SR |
368 | if (ftrace_enabled) |
369 | update_ftrace_function(); | |
370 | ||
371 | return 0; | |
372 | } | |
373 | ||
3306fc4a | 374 | int __unregister_ftrace_function(struct ftrace_ops *ops) |
2b499381 SR |
375 | { |
376 | int ret; | |
377 | ||
b848914c SR |
378 | if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) |
379 | return -EBUSY; | |
380 | ||
ba27f2bc | 381 | ret = remove_ftrace_ops(&ftrace_ops_list, ops); |
b848914c | 382 | |
2b499381 SR |
383 | if (ret < 0) |
384 | return ret; | |
b848914c | 385 | |
491d0dcf SR |
386 | if (ftrace_enabled) |
387 | update_ftrace_function(); | |
16444a8a | 388 | |
e3eea140 SRRH |
389 | ops->func = ops->saved_func; |
390 | ||
e6ea44e9 | 391 | return 0; |
3d083395 SR |
392 | } |
393 | ||
df4fc315 SR |
394 | static void ftrace_update_pid_func(void) |
395 | { | |
e3eea140 SRRH |
396 | struct ftrace_ops *op; |
397 | ||
491d0dcf | 398 | /* Only do something if we are tracing something */ |
df4fc315 | 399 | if (ftrace_trace_function == ftrace_stub) |
10dd3ebe | 400 | return; |
df4fc315 | 401 | |
e3eea140 SRRH |
402 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
403 | if (op->flags & FTRACE_OPS_FL_PID) { | |
345ddcc8 SRRH |
404 | op->func = ftrace_pids_enabled(op) ? |
405 | ftrace_pid_func : op->saved_func; | |
e3eea140 SRRH |
406 | ftrace_update_trampoline(op); |
407 | } | |
408 | } while_for_each_ftrace_op(op); | |
409 | ||
df3ec5da SRG |
410 | fgraph_update_pid_func(); |
411 | ||
491d0dcf | 412 | update_ftrace_function(); |
df4fc315 SR |
413 | } |
414 | ||
493762fc SR |
415 | #ifdef CONFIG_FUNCTION_PROFILER |
416 | struct ftrace_profile { | |
417 | struct hlist_node node; | |
418 | unsigned long ip; | |
419 | unsigned long counter; | |
0706f1c4 SR |
420 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
421 | unsigned long long time; | |
e330b3bc | 422 | unsigned long long time_squared; |
0706f1c4 | 423 | #endif |
8fc0c701 SR |
424 | }; |
425 | ||
493762fc SR |
426 | struct ftrace_profile_page { |
427 | struct ftrace_profile_page *next; | |
428 | unsigned long index; | |
429 | struct ftrace_profile records[]; | |
d61f82d0 SR |
430 | }; |
431 | ||
cafb168a SR |
432 | struct ftrace_profile_stat { |
433 | atomic_t disabled; | |
434 | struct hlist_head *hash; | |
435 | struct ftrace_profile_page *pages; | |
436 | struct ftrace_profile_page *start; | |
437 | struct tracer_stat stat; | |
438 | }; | |
439 | ||
493762fc SR |
440 | #define PROFILE_RECORDS_SIZE \ |
441 | (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) | |
5072c59f | 442 | |
493762fc SR |
443 | #define PROFILES_PER_PAGE \ |
444 | (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) | |
3d083395 | 445 | |
fb9fb015 SR |
446 | static int ftrace_profile_enabled __read_mostly; |
447 | ||
448 | /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ | |
bac429f0 SR |
449 | static DEFINE_MUTEX(ftrace_profile_lock); |
450 | ||
cafb168a | 451 | static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); |
493762fc | 452 | |
20079ebe NK |
453 | #define FTRACE_PROFILE_HASH_BITS 10 |
454 | #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS) | |
493762fc | 455 | |
bac429f0 SR |
456 | static void * |
457 | function_stat_next(void *v, int idx) | |
458 | { | |
493762fc SR |
459 | struct ftrace_profile *rec = v; |
460 | struct ftrace_profile_page *pg; | |
bac429f0 | 461 | |
493762fc | 462 | pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); |
bac429f0 SR |
463 | |
464 | again: | |
0296e425 LZ |
465 | if (idx != 0) |
466 | rec++; | |
467 | ||
bac429f0 SR |
468 | if ((void *)rec >= (void *)&pg->records[pg->index]) { |
469 | pg = pg->next; | |
470 | if (!pg) | |
471 | return NULL; | |
472 | rec = &pg->records[0]; | |
493762fc SR |
473 | if (!rec->counter) |
474 | goto again; | |
bac429f0 SR |
475 | } |
476 | ||
bac429f0 SR |
477 | return rec; |
478 | } | |
479 | ||
480 | static void *function_stat_start(struct tracer_stat *trace) | |
481 | { | |
cafb168a SR |
482 | struct ftrace_profile_stat *stat = |
483 | container_of(trace, struct ftrace_profile_stat, stat); | |
484 | ||
485 | if (!stat || !stat->start) | |
486 | return NULL; | |
487 | ||
488 | return function_stat_next(&stat->start->records[0], 0); | |
bac429f0 SR |
489 | } |
490 | ||
0706f1c4 SR |
491 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
492 | /* function graph compares on total time */ | |
80042c8f | 493 | static int function_stat_cmp(const void *p1, const void *p2) |
0706f1c4 | 494 | { |
80042c8f AS |
495 | const struct ftrace_profile *a = p1; |
496 | const struct ftrace_profile *b = p2; | |
0706f1c4 SR |
497 | |
498 | if (a->time < b->time) | |
499 | return -1; | |
500 | if (a->time > b->time) | |
501 | return 1; | |
502 | else | |
503 | return 0; | |
504 | } | |
505 | #else | |
506 | /* not function graph compares against hits */ | |
80042c8f | 507 | static int function_stat_cmp(const void *p1, const void *p2) |
bac429f0 | 508 | { |
80042c8f AS |
509 | const struct ftrace_profile *a = p1; |
510 | const struct ftrace_profile *b = p2; | |
bac429f0 SR |
511 | |
512 | if (a->counter < b->counter) | |
513 | return -1; | |
514 | if (a->counter > b->counter) | |
515 | return 1; | |
516 | else | |
517 | return 0; | |
518 | } | |
0706f1c4 | 519 | #endif |
bac429f0 SR |
520 | |
521 | static int function_stat_headers(struct seq_file *m) | |
522 | { | |
0706f1c4 | 523 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
fa6f0cc7 RV |
524 | seq_puts(m, " Function " |
525 | "Hit Time Avg s^2\n" | |
526 | " -------- " | |
527 | "--- ---- --- ---\n"); | |
0706f1c4 | 528 | #else |
fa6f0cc7 RV |
529 | seq_puts(m, " Function Hit\n" |
530 | " -------- ---\n"); | |
0706f1c4 | 531 | #endif |
bac429f0 SR |
532 | return 0; |
533 | } | |
534 | ||
535 | static int function_stat_show(struct seq_file *m, void *v) | |
536 | { | |
493762fc | 537 | struct ftrace_profile *rec = v; |
bac429f0 | 538 | char str[KSYM_SYMBOL_LEN]; |
0706f1c4 | 539 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
34886c8b SR |
540 | static struct trace_seq s; |
541 | unsigned long long avg; | |
e330b3bc | 542 | unsigned long long stddev; |
a1a7eb89 | 543 | unsigned long long stddev_denom; |
0706f1c4 | 544 | #endif |
1d95fd9d | 545 | guard(mutex)(&ftrace_profile_lock); |
3aaba20f LZ |
546 | |
547 | /* we raced with function_profile_reset() */ | |
1d95fd9d SR |
548 | if (unlikely(rec->counter == 0)) |
549 | return -EBUSY; | |
bac429f0 | 550 | |
8e436ca0 | 551 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
e31f7939 | 552 | avg = div64_ul(rec->time, rec->counter); |
8e436ca0 | 553 | if (tracing_thresh && (avg < tracing_thresh)) |
1d95fd9d | 554 | return 0; |
8e436ca0 UT |
555 | #endif |
556 | ||
bac429f0 | 557 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
0706f1c4 SR |
558 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); |
559 | ||
560 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
fa6f0cc7 | 561 | seq_puts(m, " "); |
34886c8b | 562 | |
a1a7eb89 NK |
563 | /* |
564 | * Variance formula: | |
565 | * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) | |
566 | * Maybe Welford's method is better here? | |
567 | * Divide only by 1000 for ns^2 -> us^2 conversion. | |
568 | * trace_print_graph_duration will divide by 1000 again. | |
569 | */ | |
570 | stddev = 0; | |
571 | stddev_denom = rec->counter * (rec->counter - 1) * 1000; | |
572 | if (stddev_denom) { | |
52d85d76 JL |
573 | stddev = rec->counter * rec->time_squared - |
574 | rec->time * rec->time; | |
a1a7eb89 | 575 | stddev = div64_ul(stddev, stddev_denom); |
e330b3bc CD |
576 | } |
577 | ||
34886c8b SR |
578 | trace_seq_init(&s); |
579 | trace_print_graph_duration(rec->time, &s); | |
580 | trace_seq_puts(&s, " "); | |
581 | trace_print_graph_duration(avg, &s); | |
e330b3bc CD |
582 | trace_seq_puts(&s, " "); |
583 | trace_print_graph_duration(stddev, &s); | |
0706f1c4 | 584 | trace_print_seq(m, &s); |
0706f1c4 SR |
585 | #endif |
586 | seq_putc(m, '\n'); | |
bac429f0 | 587 | |
1d95fd9d | 588 | return 0; |
bac429f0 SR |
589 | } |
590 | ||
cafb168a | 591 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) |
bac429f0 | 592 | { |
493762fc | 593 | struct ftrace_profile_page *pg; |
bac429f0 | 594 | |
cafb168a | 595 | pg = stat->pages = stat->start; |
bac429f0 | 596 | |
493762fc SR |
597 | while (pg) { |
598 | memset(pg->records, 0, PROFILE_RECORDS_SIZE); | |
599 | pg->index = 0; | |
600 | pg = pg->next; | |
bac429f0 SR |
601 | } |
602 | ||
cafb168a | 603 | memset(stat->hash, 0, |
493762fc SR |
604 | FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); |
605 | } | |
bac429f0 | 606 | |
172f7ba9 | 607 | static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) |
493762fc SR |
608 | { |
609 | struct ftrace_profile_page *pg; | |
318e0a73 SR |
610 | int functions; |
611 | int pages; | |
493762fc | 612 | int i; |
bac429f0 | 613 | |
493762fc | 614 | /* If we already allocated, do nothing */ |
cafb168a | 615 | if (stat->pages) |
493762fc | 616 | return 0; |
bac429f0 | 617 | |
cafb168a SR |
618 | stat->pages = (void *)get_zeroed_page(GFP_KERNEL); |
619 | if (!stat->pages) | |
493762fc | 620 | return -ENOMEM; |
bac429f0 | 621 | |
318e0a73 SR |
622 | #ifdef CONFIG_DYNAMIC_FTRACE |
623 | functions = ftrace_update_tot_cnt; | |
624 | #else | |
625 | /* | |
626 | * We do not know the number of functions that exist because | |
627 | * dynamic tracing is what counts them. With past experience | |
628 | * we have around 20K functions. That should be more than enough. | |
629 | * It is highly unlikely we will execute every function in | |
630 | * the kernel. | |
631 | */ | |
632 | functions = 20000; | |
633 | #endif | |
634 | ||
cafb168a | 635 | pg = stat->start = stat->pages; |
bac429f0 | 636 | |
318e0a73 SR |
637 | pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); |
638 | ||
39e30cd1 | 639 | for (i = 1; i < pages; i++) { |
493762fc | 640 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); |
493762fc | 641 | if (!pg->next) |
318e0a73 | 642 | goto out_free; |
493762fc SR |
643 | pg = pg->next; |
644 | } | |
645 | ||
646 | return 0; | |
318e0a73 SR |
647 | |
648 | out_free: | |
649 | pg = stat->start; | |
650 | while (pg) { | |
651 | unsigned long tmp = (unsigned long)pg; | |
652 | ||
653 | pg = pg->next; | |
654 | free_page(tmp); | |
655 | } | |
656 | ||
318e0a73 SR |
657 | stat->pages = NULL; |
658 | stat->start = NULL; | |
659 | ||
660 | return -ENOMEM; | |
bac429f0 SR |
661 | } |
662 | ||
cafb168a | 663 | static int ftrace_profile_init_cpu(int cpu) |
bac429f0 | 664 | { |
cafb168a | 665 | struct ftrace_profile_stat *stat; |
493762fc | 666 | int size; |
bac429f0 | 667 | |
cafb168a SR |
668 | stat = &per_cpu(ftrace_profile_stats, cpu); |
669 | ||
670 | if (stat->hash) { | |
493762fc | 671 | /* If the profile is already created, simply reset it */ |
cafb168a | 672 | ftrace_profile_reset(stat); |
493762fc SR |
673 | return 0; |
674 | } | |
bac429f0 | 675 | |
493762fc SR |
676 | /* |
677 | * We are profiling all functions, but usually only a few thousand | |
678 | * functions are hit. We'll make a hash of 1024 items. | |
679 | */ | |
680 | size = FTRACE_PROFILE_HASH_SIZE; | |
bac429f0 | 681 | |
6396bb22 | 682 | stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL); |
493762fc | 683 | |
cafb168a | 684 | if (!stat->hash) |
493762fc SR |
685 | return -ENOMEM; |
686 | ||
318e0a73 | 687 | /* Preallocate the function profiling pages */ |
cafb168a SR |
688 | if (ftrace_profile_pages_init(stat) < 0) { |
689 | kfree(stat->hash); | |
690 | stat->hash = NULL; | |
493762fc SR |
691 | return -ENOMEM; |
692 | } | |
693 | ||
694 | return 0; | |
bac429f0 SR |
695 | } |
696 | ||
cafb168a SR |
697 | static int ftrace_profile_init(void) |
698 | { | |
699 | int cpu; | |
700 | int ret = 0; | |
701 | ||
c4602c1c | 702 | for_each_possible_cpu(cpu) { |
cafb168a SR |
703 | ret = ftrace_profile_init_cpu(cpu); |
704 | if (ret) | |
705 | break; | |
706 | } | |
707 | ||
708 | return ret; | |
709 | } | |
710 | ||
493762fc | 711 | /* interrupts must be disabled */ |
cafb168a SR |
712 | static struct ftrace_profile * |
713 | ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) | |
bac429f0 | 714 | { |
493762fc | 715 | struct ftrace_profile *rec; |
bac429f0 | 716 | struct hlist_head *hhd; |
bac429f0 SR |
717 | unsigned long key; |
718 | ||
20079ebe | 719 | key = hash_long(ip, FTRACE_PROFILE_HASH_BITS); |
cafb168a | 720 | hhd = &stat->hash[key]; |
bac429f0 SR |
721 | |
722 | if (hlist_empty(hhd)) | |
723 | return NULL; | |
724 | ||
1bb539ca | 725 | hlist_for_each_entry_rcu_notrace(rec, hhd, node) { |
bac429f0 | 726 | if (rec->ip == ip) |
493762fc SR |
727 | return rec; |
728 | } | |
729 | ||
730 | return NULL; | |
731 | } | |
732 | ||
cafb168a SR |
733 | static void ftrace_add_profile(struct ftrace_profile_stat *stat, |
734 | struct ftrace_profile *rec) | |
493762fc SR |
735 | { |
736 | unsigned long key; | |
737 | ||
20079ebe | 738 | key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS); |
cafb168a | 739 | hlist_add_head_rcu(&rec->node, &stat->hash[key]); |
493762fc SR |
740 | } |
741 | ||
318e0a73 SR |
742 | /* |
743 | * The memory is already allocated, this simply finds a new record to use. | |
744 | */ | |
493762fc | 745 | static struct ftrace_profile * |
318e0a73 | 746 | ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) |
493762fc SR |
747 | { |
748 | struct ftrace_profile *rec = NULL; | |
749 | ||
318e0a73 | 750 | /* prevent recursion (from NMIs) */ |
cafb168a | 751 | if (atomic_inc_return(&stat->disabled) != 1) |
493762fc SR |
752 | goto out; |
753 | ||
493762fc | 754 | /* |
318e0a73 SR |
755 | * Try to find the function again since an NMI |
756 | * could have added it | |
493762fc | 757 | */ |
cafb168a | 758 | rec = ftrace_find_profiled_func(stat, ip); |
493762fc | 759 | if (rec) |
cafb168a | 760 | goto out; |
493762fc | 761 | |
cafb168a SR |
762 | if (stat->pages->index == PROFILES_PER_PAGE) { |
763 | if (!stat->pages->next) | |
764 | goto out; | |
765 | stat->pages = stat->pages->next; | |
bac429f0 | 766 | } |
493762fc | 767 | |
cafb168a | 768 | rec = &stat->pages->records[stat->pages->index++]; |
493762fc | 769 | rec->ip = ip; |
cafb168a | 770 | ftrace_add_profile(stat, rec); |
493762fc | 771 | |
bac429f0 | 772 | out: |
cafb168a | 773 | atomic_dec(&stat->disabled); |
bac429f0 SR |
774 | |
775 | return rec; | |
776 | } | |
777 | ||
778 | static void | |
2f5f6ad9 | 779 | function_profile_call(unsigned long ip, unsigned long parent_ip, |
d19ad077 | 780 | struct ftrace_ops *ops, struct ftrace_regs *fregs) |
bac429f0 | 781 | { |
cafb168a | 782 | struct ftrace_profile_stat *stat; |
493762fc | 783 | struct ftrace_profile *rec; |
bac429f0 SR |
784 | |
785 | if (!ftrace_profile_enabled) | |
786 | return; | |
787 | ||
ac8c3b02 | 788 | guard(preempt_notrace)(); |
cafb168a | 789 | |
bdffd893 | 790 | stat = this_cpu_ptr(&ftrace_profile_stats); |
0f6ce3de | 791 | if (!stat->hash || !ftrace_profile_enabled) |
ac8c3b02 | 792 | return; |
cafb168a SR |
793 | |
794 | rec = ftrace_find_profiled_func(stat, ip); | |
493762fc | 795 | if (!rec) { |
318e0a73 | 796 | rec = ftrace_profile_alloc(stat, ip); |
493762fc | 797 | if (!rec) |
ac8c3b02 | 798 | return; |
493762fc | 799 | } |
bac429f0 SR |
800 | |
801 | rec->counter++; | |
bac429f0 SR |
802 | } |
803 | ||
0706f1c4 | 804 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
e73e679f SRV |
805 | static bool fgraph_graph_time = true; |
806 | ||
807 | void ftrace_graph_graph_time_control(bool enable) | |
808 | { | |
809 | fgraph_graph_time = enable; | |
810 | } | |
811 | ||
3c9880f3 | 812 | struct profile_fgraph_data { |
f1f36e22 | 813 | unsigned long long calltime; |
3c9880f3 SR |
814 | unsigned long long subtime; |
815 | unsigned long long sleeptime; | |
816 | }; | |
817 | ||
37238abe | 818 | static int profile_graph_entry(struct ftrace_graph_ent *trace, |
41705c42 MHG |
819 | struct fgraph_ops *gops, |
820 | struct ftrace_regs *fregs) | |
0706f1c4 | 821 | { |
3c9880f3 | 822 | struct profile_fgraph_data *profile_data; |
8861dd30 | 823 | |
a1e2e31d | 824 | function_profile_call(trace->func, 0, NULL, NULL); |
8861dd30 | 825 | |
a8f0f9e4 SRV |
826 | /* If function graph is shutting down, ret_stack can be NULL */ |
827 | if (!current->ret_stack) | |
828 | return 0; | |
829 | ||
3c9880f3 SR |
830 | profile_data = fgraph_reserve_data(gops->idx, sizeof(*profile_data)); |
831 | if (!profile_data) | |
832 | return 0; | |
833 | ||
834 | profile_data->subtime = 0; | |
835 | profile_data->sleeptime = current->ftrace_sleeptime; | |
f1f36e22 | 836 | profile_data->calltime = trace_clock_local(); |
8861dd30 | 837 | |
0706f1c4 SR |
838 | return 1; |
839 | } | |
840 | ||
37238abe | 841 | static void profile_graph_return(struct ftrace_graph_ret *trace, |
2ca8c112 MHG |
842 | struct fgraph_ops *gops, |
843 | struct ftrace_regs *fregs) | |
0706f1c4 | 844 | { |
3c9880f3 | 845 | struct profile_fgraph_data *profile_data; |
cafb168a | 846 | struct ftrace_profile_stat *stat; |
a2a16d6a | 847 | unsigned long long calltime; |
f1f36e22 | 848 | unsigned long long rettime = trace_clock_local(); |
0706f1c4 | 849 | struct ftrace_profile *rec; |
a312a0f7 | 850 | int size; |
0706f1c4 | 851 | |
ac8c3b02 SR |
852 | guard(preempt_notrace)(); |
853 | ||
bdffd893 | 854 | stat = this_cpu_ptr(&ftrace_profile_stats); |
0f6ce3de | 855 | if (!stat->hash || !ftrace_profile_enabled) |
ac8c3b02 | 856 | return; |
cafb168a | 857 | |
f1f36e22 SR |
858 | profile_data = fgraph_retrieve_data(gops->idx, &size); |
859 | ||
37e44bc5 | 860 | /* If the calltime was zero'd ignore it */ |
f1f36e22 | 861 | if (!profile_data || !profile_data->calltime) |
ac8c3b02 | 862 | return; |
37e44bc5 | 863 | |
f1f36e22 | 864 | calltime = rettime - profile_data->calltime; |
a2a16d6a | 865 | |
3c9880f3 | 866 | if (!fgraph_sleep_time) { |
f1f36e22 | 867 | if (current->ftrace_sleeptime) |
3c9880f3 SR |
868 | calltime -= current->ftrace_sleeptime - profile_data->sleeptime; |
869 | } | |
870 | ||
55577204 | 871 | if (!fgraph_graph_time) { |
f1f36e22 | 872 | struct profile_fgraph_data *parent_data; |
a2a16d6a SR |
873 | |
874 | /* Append this call time to the parent time to subtract */ | |
3c9880f3 SR |
875 | parent_data = fgraph_retrieve_parent_data(gops->idx, &size, 1); |
876 | if (parent_data) | |
877 | parent_data->subtime += calltime; | |
878 | ||
f1f36e22 | 879 | if (profile_data->subtime && profile_data->subtime < calltime) |
3c9880f3 | 880 | calltime -= profile_data->subtime; |
a2a16d6a SR |
881 | else |
882 | calltime = 0; | |
883 | } | |
884 | ||
cafb168a | 885 | rec = ftrace_find_profiled_func(stat, trace->func); |
e330b3bc | 886 | if (rec) { |
a2a16d6a | 887 | rec->time += calltime; |
e330b3bc CD |
888 | rec->time_squared += calltime * calltime; |
889 | } | |
0706f1c4 SR |
890 | } |
891 | ||
688f7089 SRV |
892 | static struct fgraph_ops fprofiler_ops = { |
893 | .entryfunc = &profile_graph_entry, | |
894 | .retfunc = &profile_graph_return, | |
895 | }; | |
896 | ||
0706f1c4 SR |
897 | static int register_ftrace_profiler(void) |
898 | { | |
789a8cff | 899 | ftrace_ops_set_global_filter(&fprofiler_ops.ops); |
688f7089 | 900 | return register_ftrace_graph(&fprofiler_ops); |
0706f1c4 SR |
901 | } |
902 | ||
903 | static void unregister_ftrace_profiler(void) | |
904 | { | |
688f7089 | 905 | unregister_ftrace_graph(&fprofiler_ops); |
0706f1c4 SR |
906 | } |
907 | #else | |
bd38c0e6 | 908 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { |
fb9fb015 | 909 | .func = function_profile_call, |
bac429f0 SR |
910 | }; |
911 | ||
0706f1c4 SR |
912 | static int register_ftrace_profiler(void) |
913 | { | |
789a8cff | 914 | ftrace_ops_set_global_filter(&ftrace_profile_ops); |
0706f1c4 SR |
915 | return register_ftrace_function(&ftrace_profile_ops); |
916 | } | |
917 | ||
918 | static void unregister_ftrace_profiler(void) | |
919 | { | |
920 | unregister_ftrace_function(&ftrace_profile_ops); | |
921 | } | |
922 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
923 | ||
bac429f0 SR |
924 | static ssize_t |
925 | ftrace_profile_write(struct file *filp, const char __user *ubuf, | |
926 | size_t cnt, loff_t *ppos) | |
927 | { | |
928 | unsigned long val; | |
bac429f0 SR |
929 | int ret; |
930 | ||
22fe9b54 PH |
931 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
932 | if (ret) | |
bac429f0 SR |
933 | return ret; |
934 | ||
935 | val = !!val; | |
936 | ||
1d95fd9d | 937 | guard(mutex)(&ftrace_profile_lock); |
bac429f0 SR |
938 | if (ftrace_profile_enabled ^ val) { |
939 | if (val) { | |
493762fc | 940 | ret = ftrace_profile_init(); |
1d95fd9d SR |
941 | if (ret < 0) |
942 | return ret; | |
493762fc | 943 | |
0706f1c4 | 944 | ret = register_ftrace_profiler(); |
1d95fd9d SR |
945 | if (ret < 0) |
946 | return ret; | |
bac429f0 SR |
947 | ftrace_profile_enabled = 1; |
948 | } else { | |
949 | ftrace_profile_enabled = 0; | |
0f6ce3de SR |
950 | /* |
951 | * unregister_ftrace_profiler calls stop_machine | |
74401729 | 952 | * so this acts like an synchronize_rcu. |
0f6ce3de | 953 | */ |
0706f1c4 | 954 | unregister_ftrace_profiler(); |
bac429f0 SR |
955 | } |
956 | } | |
bac429f0 | 957 | |
cf8517cf | 958 | *ppos += cnt; |
bac429f0 SR |
959 | |
960 | return cnt; | |
961 | } | |
962 | ||
493762fc SR |
963 | static ssize_t |
964 | ftrace_profile_read(struct file *filp, char __user *ubuf, | |
965 | size_t cnt, loff_t *ppos) | |
966 | { | |
fb9fb015 | 967 | char buf[64]; /* big enough to hold a number */ |
493762fc SR |
968 | int r; |
969 | ||
970 | r = sprintf(buf, "%u\n", ftrace_profile_enabled); | |
971 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | |
972 | } | |
973 | ||
bac429f0 SR |
974 | static const struct file_operations ftrace_profile_fops = { |
975 | .open = tracing_open_generic, | |
976 | .read = ftrace_profile_read, | |
977 | .write = ftrace_profile_write, | |
6038f373 | 978 | .llseek = default_llseek, |
bac429f0 SR |
979 | }; |
980 | ||
cafb168a SR |
981 | /* used to initialize the real stat files */ |
982 | static struct tracer_stat function_stats __initdata = { | |
fb9fb015 SR |
983 | .name = "functions", |
984 | .stat_start = function_stat_start, | |
985 | .stat_next = function_stat_next, | |
986 | .stat_cmp = function_stat_cmp, | |
987 | .stat_headers = function_stat_headers, | |
988 | .stat_show = function_stat_show | |
cafb168a SR |
989 | }; |
990 | ||
8434dc93 | 991 | static __init void ftrace_profile_tracefs(struct dentry *d_tracer) |
bac429f0 | 992 | { |
cafb168a | 993 | struct ftrace_profile_stat *stat; |
cafb168a | 994 | char *name; |
bac429f0 | 995 | int ret; |
cafb168a SR |
996 | int cpu; |
997 | ||
998 | for_each_possible_cpu(cpu) { | |
999 | stat = &per_cpu(ftrace_profile_stats, cpu); | |
1000 | ||
6363c6b5 | 1001 | name = kasprintf(GFP_KERNEL, "function%d", cpu); |
cafb168a SR |
1002 | if (!name) { |
1003 | /* | |
1004 | * The files created are permanent, if something happens | |
1005 | * we still do not free memory. | |
1006 | */ | |
cafb168a SR |
1007 | WARN(1, |
1008 | "Could not allocate stat file for cpu %d\n", | |
1009 | cpu); | |
1010 | return; | |
1011 | } | |
1012 | stat->stat = function_stats; | |
cafb168a SR |
1013 | stat->stat.name = name; |
1014 | ret = register_stat_tracer(&stat->stat); | |
1015 | if (ret) { | |
1016 | WARN(1, | |
1017 | "Could not register function stat for cpu %d\n", | |
1018 | cpu); | |
1019 | kfree(name); | |
1020 | return; | |
1021 | } | |
bac429f0 SR |
1022 | } |
1023 | ||
e4931b82 YW |
1024 | trace_create_file("function_profile_enabled", |
1025 | TRACE_MODE_WRITE, d_tracer, NULL, | |
1026 | &ftrace_profile_fops); | |
bac429f0 SR |
1027 | } |
1028 | ||
bac429f0 | 1029 | #else /* CONFIG_FUNCTION_PROFILER */ |
8434dc93 | 1030 | static __init void ftrace_profile_tracefs(struct dentry *d_tracer) |
bac429f0 SR |
1031 | { |
1032 | } | |
bac429f0 SR |
1033 | #endif /* CONFIG_FUNCTION_PROFILER */ |
1034 | ||
493762fc SR |
1035 | #ifdef CONFIG_DYNAMIC_FTRACE |
1036 | ||
79922b80 SRRH |
1037 | static struct ftrace_ops *removed_ops; |
1038 | ||
e1effa01 SRRH |
1039 | /* |
1040 | * Set when doing a global update, like enabling all recs or disabling them. | |
1041 | * It is not set when just updating a single ftrace_ops. | |
1042 | */ | |
1043 | static bool update_all_ops; | |
1044 | ||
493762fc SR |
1045 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
1046 | # error Dynamic ftrace depends on MCOUNT_RECORD | |
1047 | #endif | |
1048 | ||
7b60f3d8 SRV |
1049 | struct ftrace_func_probe { |
1050 | struct ftrace_probe_ops *probe_ops; | |
1051 | struct ftrace_ops ops; | |
1052 | struct trace_array *tr; | |
1053 | struct list_head list; | |
6e444319 | 1054 | void *data; |
7b60f3d8 SRV |
1055 | int ref; |
1056 | }; | |
1057 | ||
33dc9b12 SR |
1058 | /* |
1059 | * We make these constant because no one should touch them, | |
1060 | * but they are used as the default "empty hash", to avoid allocating | |
1061 | * it all the time. These are in a read only section such that if | |
1062 | * anyone does try to modify it, it will cause an exception. | |
1063 | */ | |
1064 | static const struct hlist_head empty_buckets[1]; | |
1065 | static const struct ftrace_hash empty_hash = { | |
1066 | .buckets = (struct hlist_head *)empty_buckets, | |
1cf41dd7 | 1067 | }; |
33dc9b12 | 1068 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) |
493762fc | 1069 | |
3306fc4a | 1070 | struct ftrace_ops global_ops = { |
33b7f99c SRRH |
1071 | .func = ftrace_stub, |
1072 | .local_hash.notrace_hash = EMPTY_HASH, | |
1073 | .local_hash.filter_hash = EMPTY_HASH, | |
1074 | INIT_OPS_HASH(global_ops) | |
a25d036d | 1075 | .flags = FTRACE_OPS_FL_INITIALIZED | |
e3eea140 | 1076 | FTRACE_OPS_FL_PID, |
f45948e8 SR |
1077 | }; |
1078 | ||
aec0be2d | 1079 | /* |
f2cc020d | 1080 | * Used by the stack unwinder to know about dynamic ftrace trampolines. |
aec0be2d | 1081 | */ |
6be7fa3c | 1082 | struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr) |
aec0be2d | 1083 | { |
6be7fa3c | 1084 | struct ftrace_ops *op = NULL; |
aec0be2d SRRH |
1085 | |
1086 | /* | |
1087 | * Some of the ops may be dynamically allocated, | |
74401729 | 1088 | * they are freed after a synchronize_rcu(). |
aec0be2d SRRH |
1089 | */ |
1090 | preempt_disable_notrace(); | |
1091 | ||
1092 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
1093 | /* | |
1094 | * This is to check for dynamically allocated trampolines. | |
1095 | * Trampolines that are in kernel text will have | |
1096 | * core_kernel_text() return true. | |
1097 | */ | |
1098 | if (op->trampoline && op->trampoline_size) | |
1099 | if (addr >= op->trampoline && | |
1100 | addr < op->trampoline + op->trampoline_size) { | |
6be7fa3c SRV |
1101 | preempt_enable_notrace(); |
1102 | return op; | |
aec0be2d SRRH |
1103 | } |
1104 | } while_for_each_ftrace_op(op); | |
aec0be2d SRRH |
1105 | preempt_enable_notrace(); |
1106 | ||
6be7fa3c SRV |
1107 | return NULL; |
1108 | } | |
1109 | ||
1110 | /* | |
1111 | * This is used by __kernel_text_address() to return true if the | |
1112 | * address is on a dynamically allocated trampoline that would | |
1113 | * not return true for either core_kernel_text() or | |
1114 | * is_module_text_address(). | |
1115 | */ | |
1116 | bool is_ftrace_trampoline(unsigned long addr) | |
1117 | { | |
1118 | return ftrace_ops_trampoline(addr) != NULL; | |
aec0be2d SRRH |
1119 | } |
1120 | ||
493762fc SR |
1121 | struct ftrace_page { |
1122 | struct ftrace_page *next; | |
a7900875 | 1123 | struct dyn_ftrace *records; |
493762fc | 1124 | int index; |
db42523b | 1125 | int order; |
493762fc SR |
1126 | }; |
1127 | ||
a7900875 SR |
1128 | #define ENTRY_SIZE sizeof(struct dyn_ftrace) |
1129 | #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) | |
493762fc | 1130 | |
493762fc SR |
1131 | static struct ftrace_page *ftrace_pages_start; |
1132 | static struct ftrace_page *ftrace_pages; | |
1133 | ||
2b0cce0e SRV |
1134 | static __always_inline unsigned long |
1135 | ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip) | |
1136 | { | |
1137 | if (hash->size_bits > 0) | |
1138 | return hash_long(ip, hash->size_bits); | |
1139 | ||
1140 | return 0; | |
1141 | } | |
1142 | ||
2b2c279c SRV |
1143 | /* Only use this function if ftrace_hash_empty() has already been tested */ |
1144 | static __always_inline struct ftrace_func_entry * | |
1145 | __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) | |
b448c4e3 SR |
1146 | { |
1147 | unsigned long key; | |
1148 | struct ftrace_func_entry *entry; | |
1149 | struct hlist_head *hhd; | |
b448c4e3 | 1150 | |
2b0cce0e | 1151 | key = ftrace_hash_key(hash, ip); |
b448c4e3 SR |
1152 | hhd = &hash->buckets[key]; |
1153 | ||
1bb539ca | 1154 | hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { |
b448c4e3 SR |
1155 | if (entry->ip == ip) |
1156 | return entry; | |
1157 | } | |
1158 | return NULL; | |
1159 | } | |
1160 | ||
2b2c279c SRV |
1161 | /** |
1162 | * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash | |
1163 | * @hash: The hash to look at | |
1164 | * @ip: The instruction pointer to test | |
1165 | * | |
1166 | * Search a given @hash to see if a given instruction pointer (@ip) | |
1167 | * exists in it. | |
1168 | * | |
d1530413 | 1169 | * Returns: the entry that holds the @ip if found. NULL otherwise. |
2b2c279c SRV |
1170 | */ |
1171 | struct ftrace_func_entry * | |
1172 | ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) | |
1173 | { | |
1174 | if (ftrace_hash_empty(hash)) | |
1175 | return NULL; | |
1176 | ||
1177 | return __ftrace_lookup_ip(hash, ip); | |
1178 | } | |
1179 | ||
33dc9b12 SR |
1180 | static void __add_hash_entry(struct ftrace_hash *hash, |
1181 | struct ftrace_func_entry *entry) | |
b448c4e3 | 1182 | { |
b448c4e3 SR |
1183 | struct hlist_head *hhd; |
1184 | unsigned long key; | |
1185 | ||
2b0cce0e | 1186 | key = ftrace_hash_key(hash, entry->ip); |
b448c4e3 SR |
1187 | hhd = &hash->buckets[key]; |
1188 | hlist_add_head(&entry->hlist, hhd); | |
1189 | hash->count++; | |
33dc9b12 SR |
1190 | } |
1191 | ||
d05cb470 SRG |
1192 | static struct ftrace_func_entry * |
1193 | add_hash_entry(struct ftrace_hash *hash, unsigned long ip) | |
33dc9b12 SR |
1194 | { |
1195 | struct ftrace_func_entry *entry; | |
1196 | ||
1197 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | |
1198 | if (!entry) | |
d05cb470 | 1199 | return NULL; |
33dc9b12 SR |
1200 | |
1201 | entry->ip = ip; | |
1202 | __add_hash_entry(hash, entry); | |
b448c4e3 | 1203 | |
d05cb470 | 1204 | return entry; |
b448c4e3 SR |
1205 | } |
1206 | ||
1207 | static void | |
33dc9b12 | 1208 | free_hash_entry(struct ftrace_hash *hash, |
b448c4e3 SR |
1209 | struct ftrace_func_entry *entry) |
1210 | { | |
1211 | hlist_del(&entry->hlist); | |
1212 | kfree(entry); | |
1213 | hash->count--; | |
1214 | } | |
1215 | ||
33dc9b12 SR |
1216 | static void |
1217 | remove_hash_entry(struct ftrace_hash *hash, | |
1218 | struct ftrace_func_entry *entry) | |
1219 | { | |
eee8ded1 | 1220 | hlist_del_rcu(&entry->hlist); |
33dc9b12 SR |
1221 | hash->count--; |
1222 | } | |
1223 | ||
b448c4e3 SR |
1224 | static void ftrace_hash_clear(struct ftrace_hash *hash) |
1225 | { | |
1226 | struct hlist_head *hhd; | |
b67bfe0d | 1227 | struct hlist_node *tn; |
b448c4e3 SR |
1228 | struct ftrace_func_entry *entry; |
1229 | int size = 1 << hash->size_bits; | |
1230 | int i; | |
1231 | ||
33dc9b12 SR |
1232 | if (!hash->count) |
1233 | return; | |
1234 | ||
b448c4e3 SR |
1235 | for (i = 0; i < size; i++) { |
1236 | hhd = &hash->buckets[i]; | |
b67bfe0d | 1237 | hlist_for_each_entry_safe(entry, tn, hhd, hlist) |
33dc9b12 | 1238 | free_hash_entry(hash, entry); |
b448c4e3 SR |
1239 | } |
1240 | FTRACE_WARN_ON(hash->count); | |
1241 | } | |
1242 | ||
673feb9d SRV |
1243 | static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod) |
1244 | { | |
1245 | list_del(&ftrace_mod->list); | |
1246 | kfree(ftrace_mod->module); | |
1247 | kfree(ftrace_mod->func); | |
1248 | kfree(ftrace_mod); | |
1249 | } | |
1250 | ||
1251 | static void clear_ftrace_mod_list(struct list_head *head) | |
1252 | { | |
1253 | struct ftrace_mod_load *p, *n; | |
1254 | ||
1255 | /* stack tracer isn't supported yet */ | |
1256 | if (!head) | |
1257 | return; | |
1258 | ||
1259 | mutex_lock(&ftrace_lock); | |
1260 | list_for_each_entry_safe(p, n, head, list) | |
1261 | free_ftrace_mod(p); | |
1262 | mutex_unlock(&ftrace_lock); | |
1263 | } | |
1264 | ||
33dc9b12 SR |
1265 | static void free_ftrace_hash(struct ftrace_hash *hash) |
1266 | { | |
1267 | if (!hash || hash == EMPTY_HASH) | |
1268 | return; | |
1269 | ftrace_hash_clear(hash); | |
1270 | kfree(hash->buckets); | |
1271 | kfree(hash); | |
1272 | } | |
1273 | ||
07fd5515 SR |
1274 | static void __free_ftrace_hash_rcu(struct rcu_head *rcu) |
1275 | { | |
1276 | struct ftrace_hash *hash; | |
1277 | ||
1278 | hash = container_of(rcu, struct ftrace_hash, rcu); | |
1279 | free_ftrace_hash(hash); | |
1280 | } | |
1281 | ||
1282 | static void free_ftrace_hash_rcu(struct ftrace_hash *hash) | |
1283 | { | |
1284 | if (!hash || hash == EMPTY_HASH) | |
1285 | return; | |
74401729 | 1286 | call_rcu(&hash->rcu, __free_ftrace_hash_rcu); |
07fd5515 SR |
1287 | } |
1288 | ||
8be9fbd5 MR |
1289 | /** |
1290 | * ftrace_free_filter - remove all filters for an ftrace_ops | |
d1530413 | 1291 | * @ops: the ops to remove the filters from |
8be9fbd5 | 1292 | */ |
5500fa51 JO |
1293 | void ftrace_free_filter(struct ftrace_ops *ops) |
1294 | { | |
f04f24fb | 1295 | ftrace_ops_init(ops); |
0c667775 SR |
1296 | if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) |
1297 | return; | |
33b7f99c SRRH |
1298 | free_ftrace_hash(ops->func_hash->filter_hash); |
1299 | free_ftrace_hash(ops->func_hash->notrace_hash); | |
08275e59 SR |
1300 | ops->func_hash->filter_hash = EMPTY_HASH; |
1301 | ops->func_hash->notrace_hash = EMPTY_HASH; | |
5500fa51 | 1302 | } |
8be9fbd5 | 1303 | EXPORT_SYMBOL_GPL(ftrace_free_filter); |
5500fa51 | 1304 | |
33dc9b12 SR |
1305 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) |
1306 | { | |
1307 | struct ftrace_hash *hash; | |
1308 | int size; | |
1309 | ||
1310 | hash = kzalloc(sizeof(*hash), GFP_KERNEL); | |
1311 | if (!hash) | |
1312 | return NULL; | |
1313 | ||
1314 | size = 1 << size_bits; | |
47b0edcb | 1315 | hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); |
33dc9b12 SR |
1316 | |
1317 | if (!hash->buckets) { | |
1318 | kfree(hash); | |
1319 | return NULL; | |
1320 | } | |
1321 | ||
1322 | hash->size_bits = size_bits; | |
1323 | ||
1324 | return hash; | |
1325 | } | |
1326 | ||
d66bb334 | 1327 | /* Used to save filters on functions for modules not loaded yet */ |
673feb9d SRV |
1328 | static int ftrace_add_mod(struct trace_array *tr, |
1329 | const char *func, const char *module, | |
1330 | int enable) | |
1331 | { | |
1332 | struct ftrace_mod_load *ftrace_mod; | |
1333 | struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace; | |
1334 | ||
1335 | ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL); | |
1336 | if (!ftrace_mod) | |
1337 | return -ENOMEM; | |
1338 | ||
19ba6c8a | 1339 | INIT_LIST_HEAD(&ftrace_mod->list); |
673feb9d SRV |
1340 | ftrace_mod->func = kstrdup(func, GFP_KERNEL); |
1341 | ftrace_mod->module = kstrdup(module, GFP_KERNEL); | |
1342 | ftrace_mod->enable = enable; | |
1343 | ||
1344 | if (!ftrace_mod->func || !ftrace_mod->module) | |
1345 | goto out_free; | |
1346 | ||
1347 | list_add(&ftrace_mod->list, mod_head); | |
1348 | ||
1349 | return 0; | |
1350 | ||
1351 | out_free: | |
1352 | free_ftrace_mod(ftrace_mod); | |
1353 | ||
1354 | return -ENOMEM; | |
1355 | } | |
1356 | ||
33dc9b12 SR |
1357 | static struct ftrace_hash * |
1358 | alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) | |
1359 | { | |
1360 | struct ftrace_func_entry *entry; | |
1361 | struct ftrace_hash *new_hash; | |
33dc9b12 | 1362 | int size; |
33dc9b12 SR |
1363 | int i; |
1364 | ||
1365 | new_hash = alloc_ftrace_hash(size_bits); | |
1366 | if (!new_hash) | |
1367 | return NULL; | |
1368 | ||
8c08f0d5 SRV |
1369 | if (hash) |
1370 | new_hash->flags = hash->flags; | |
1371 | ||
33dc9b12 | 1372 | /* Empty hash? */ |
06a51d93 | 1373 | if (ftrace_hash_empty(hash)) |
33dc9b12 SR |
1374 | return new_hash; |
1375 | ||
1376 | size = 1 << hash->size_bits; | |
1377 | for (i = 0; i < size; i++) { | |
b67bfe0d | 1378 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
d05cb470 | 1379 | if (add_hash_entry(new_hash, entry->ip) == NULL) |
33dc9b12 SR |
1380 | goto free_hash; |
1381 | } | |
1382 | } | |
1383 | ||
1384 | FTRACE_WARN_ON(new_hash->count != hash->count); | |
1385 | ||
1386 | return new_hash; | |
1387 | ||
1388 | free_hash: | |
1389 | free_ftrace_hash(new_hash); | |
1390 | return NULL; | |
1391 | } | |
1392 | ||
07bbe083 SRG |
1393 | static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops); |
1394 | static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops); | |
41fb61c2 | 1395 | |
f8b8be8a MH |
1396 | static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, |
1397 | struct ftrace_hash *new_hash); | |
1398 | ||
3afd801f SRG |
1399 | /* |
1400 | * Allocate a new hash and remove entries from @src and move them to the new hash. | |
1401 | * On success, the @src hash will be empty and should be freed. | |
1402 | */ | |
1403 | static struct ftrace_hash *__move_hash(struct ftrace_hash *src, int size) | |
33dc9b12 SR |
1404 | { |
1405 | struct ftrace_func_entry *entry; | |
07fd5515 | 1406 | struct ftrace_hash *new_hash; |
714641c3 SRV |
1407 | struct hlist_head *hhd; |
1408 | struct hlist_node *tn; | |
33dc9b12 SR |
1409 | int bits = 0; |
1410 | int i; | |
1411 | ||
33dc9b12 | 1412 | /* |
be493132 SRV |
1413 | * Use around half the size (max bit of it), but |
1414 | * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits). | |
33dc9b12 | 1415 | */ |
be493132 | 1416 | bits = fls(size / 2); |
33dc9b12 SR |
1417 | |
1418 | /* Don't allocate too much */ | |
1419 | if (bits > FTRACE_HASH_MAX_BITS) | |
1420 | bits = FTRACE_HASH_MAX_BITS; | |
1421 | ||
07fd5515 SR |
1422 | new_hash = alloc_ftrace_hash(bits); |
1423 | if (!new_hash) | |
3e278c0d | 1424 | return NULL; |
33dc9b12 | 1425 | |
8c08f0d5 SRV |
1426 | new_hash->flags = src->flags; |
1427 | ||
33dc9b12 SR |
1428 | size = 1 << src->size_bits; |
1429 | for (i = 0; i < size; i++) { | |
1430 | hhd = &src->buckets[i]; | |
b67bfe0d | 1431 | hlist_for_each_entry_safe(entry, tn, hhd, hlist) { |
33dc9b12 | 1432 | remove_hash_entry(src, entry); |
07fd5515 | 1433 | __add_hash_entry(new_hash, entry); |
33dc9b12 SR |
1434 | } |
1435 | } | |
3e278c0d NK |
1436 | return new_hash; |
1437 | } | |
1438 | ||
d66bb334 | 1439 | /* Move the @src entries to a newly allocated hash */ |
714641c3 SRV |
1440 | static struct ftrace_hash * |
1441 | __ftrace_hash_move(struct ftrace_hash *src) | |
1442 | { | |
1443 | int size = src->count; | |
1444 | ||
1445 | /* | |
1446 | * If the new source is empty, just return the empty_hash. | |
1447 | */ | |
1448 | if (ftrace_hash_empty(src)) | |
1449 | return EMPTY_HASH; | |
1450 | ||
3afd801f | 1451 | return __move_hash(src, size); |
714641c3 SRV |
1452 | } |
1453 | ||
d66bb334 SRG |
1454 | /** |
1455 | * ftrace_hash_move - move a new hash to a filter and do updates | |
1456 | * @ops: The ops with the hash that @dst points to | |
1457 | * @enable: True if for the filter hash, false for the notrace hash | |
1458 | * @dst: Points to the @ops hash that should be updated | |
1459 | * @src: The hash to update @dst with | |
1460 | * | |
1461 | * This is called when an ftrace_ops hash is being updated and the | |
1462 | * the kernel needs to reflect this. Note, this only updates the kernel | |
1463 | * function callbacks if the @ops is enabled (not to be confused with | |
1464 | * @enable above). If the @ops is enabled, its hash determines what | |
1465 | * callbacks get called. This function gets called when the @ops hash | |
1466 | * is updated and it requires new callbacks. | |
1467 | * | |
1468 | * On success the elements of @src is moved to @dst, and @dst is updated | |
1469 | * properly, as well as the functions determined by the @ops hashes | |
1470 | * are now calling the @ops callback function. | |
1471 | * | |
1472 | * Regardless of return type, @src should be freed with free_ftrace_hash(). | |
1473 | */ | |
3e278c0d NK |
1474 | static int |
1475 | ftrace_hash_move(struct ftrace_ops *ops, int enable, | |
1476 | struct ftrace_hash **dst, struct ftrace_hash *src) | |
1477 | { | |
1478 | struct ftrace_hash *new_hash; | |
1479 | int ret; | |
1480 | ||
1481 | /* Reject setting notrace hash on IPMODIFY ftrace_ops */ | |
1482 | if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) | |
1483 | return -EINVAL; | |
1484 | ||
1485 | new_hash = __ftrace_hash_move(src); | |
1486 | if (!new_hash) | |
1487 | return -ENOMEM; | |
1488 | ||
f8b8be8a MH |
1489 | /* Make sure this can be applied if it is IPMODIFY ftrace_ops */ |
1490 | if (enable) { | |
1491 | /* IPMODIFY should be updated only when filter_hash updating */ | |
1492 | ret = ftrace_hash_ipmodify_update(ops, new_hash); | |
1493 | if (ret < 0) { | |
1494 | free_ftrace_hash(new_hash); | |
1495 | return ret; | |
1496 | } | |
1497 | } | |
1498 | ||
5c27c775 MH |
1499 | /* |
1500 | * Remove the current set, update the hash and add | |
1501 | * them back. | |
1502 | */ | |
07bbe083 | 1503 | ftrace_hash_rec_disable_modify(ops); |
5c27c775 | 1504 | |
07fd5515 | 1505 | rcu_assign_pointer(*dst, new_hash); |
07fd5515 | 1506 | |
07bbe083 | 1507 | ftrace_hash_rec_enable_modify(ops); |
41fb61c2 | 1508 | |
5c27c775 | 1509 | return 0; |
33dc9b12 SR |
1510 | } |
1511 | ||
fef5aeee SRRH |
1512 | static bool hash_contains_ip(unsigned long ip, |
1513 | struct ftrace_ops_hash *hash) | |
1514 | { | |
1515 | /* | |
1516 | * The function record is a match if it exists in the filter | |
fdda88d3 | 1517 | * hash and not in the notrace hash. Note, an empty hash is |
fef5aeee SRRH |
1518 | * considered a match for the filter hash, but an empty |
1519 | * notrace hash is considered not in the notrace hash. | |
1520 | */ | |
1521 | return (ftrace_hash_empty(hash->filter_hash) || | |
2b2c279c | 1522 | __ftrace_lookup_ip(hash->filter_hash, ip)) && |
fef5aeee | 1523 | (ftrace_hash_empty(hash->notrace_hash) || |
2b2c279c | 1524 | !__ftrace_lookup_ip(hash->notrace_hash, ip)); |
fef5aeee SRRH |
1525 | } |
1526 | ||
b848914c SR |
1527 | /* |
1528 | * Test the hashes for this ops to see if we want to call | |
1529 | * the ops->func or not. | |
1530 | * | |
1531 | * It's a match if the ip is in the ops->filter_hash or | |
1532 | * the filter_hash does not exist or is empty, | |
1533 | * AND | |
1534 | * the ip is not in the ops->notrace_hash. | |
cdbe61bf SR |
1535 | * |
1536 | * This needs to be called with preemption disabled as | |
74401729 | 1537 | * the hashes are freed with call_rcu(). |
b848914c | 1538 | */ |
3306fc4a | 1539 | int |
195a8afc | 1540 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) |
b848914c | 1541 | { |
fef5aeee | 1542 | struct ftrace_ops_hash hash; |
b848914c SR |
1543 | int ret; |
1544 | ||
195a8afc SRRH |
1545 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
1546 | /* | |
1547 | * There's a small race when adding ops that the ftrace handler | |
1548 | * that wants regs, may be called without them. We can not | |
1549 | * allow that handler to be called if regs is NULL. | |
1550 | */ | |
1551 | if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) | |
1552 | return 0; | |
1553 | #endif | |
1554 | ||
f86f4180 CZ |
1555 | rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash); |
1556 | rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash); | |
b848914c | 1557 | |
fef5aeee | 1558 | if (hash_contains_ip(ip, &hash)) |
b848914c SR |
1559 | ret = 1; |
1560 | else | |
1561 | ret = 0; | |
b848914c SR |
1562 | |
1563 | return ret; | |
1564 | } | |
1565 | ||
493762fc SR |
1566 | /* |
1567 | * This is a double for. Do not use 'break' to break out of the loop, | |
1568 | * you must use a goto. | |
1569 | */ | |
1570 | #define do_for_each_ftrace_rec(pg, rec) \ | |
1571 | for (pg = ftrace_pages_start; pg; pg = pg->next) { \ | |
1572 | int _____i; \ | |
1573 | for (_____i = 0; _____i < pg->index; _____i++) { \ | |
1574 | rec = &pg->records[_____i]; | |
1575 | ||
1576 | #define while_for_each_ftrace_rec() \ | |
1577 | } \ | |
1578 | } | |
1579 | ||
5855fead SR |
1580 | |
1581 | static int ftrace_cmp_recs(const void *a, const void *b) | |
1582 | { | |
a650e02a SR |
1583 | const struct dyn_ftrace *key = a; |
1584 | const struct dyn_ftrace *rec = b; | |
5855fead | 1585 | |
a650e02a | 1586 | if (key->flags < rec->ip) |
5855fead | 1587 | return -1; |
a650e02a SR |
1588 | if (key->ip >= rec->ip + MCOUNT_INSN_SIZE) |
1589 | return 1; | |
5855fead SR |
1590 | return 0; |
1591 | } | |
1592 | ||
7e16f581 SRV |
1593 | static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end) |
1594 | { | |
1595 | struct ftrace_page *pg; | |
1596 | struct dyn_ftrace *rec = NULL; | |
1597 | struct dyn_ftrace key; | |
1598 | ||
1599 | key.ip = start; | |
1600 | key.flags = end; /* overload flags, as it is unsigned long */ | |
1601 | ||
1602 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | |
ee92fa44 CZ |
1603 | if (pg->index == 0 || |
1604 | end < pg->records[0].ip || | |
7e16f581 SRV |
1605 | start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) |
1606 | continue; | |
1607 | rec = bsearch(&key, pg->records, pg->index, | |
1608 | sizeof(struct dyn_ftrace), | |
1609 | ftrace_cmp_recs); | |
d9815bff AS |
1610 | if (rec) |
1611 | break; | |
7e16f581 SRV |
1612 | } |
1613 | return rec; | |
1614 | } | |
1615 | ||
04cf31a7 ME |
1616 | /** |
1617 | * ftrace_location_range - return the first address of a traced location | |
1618 | * if it touches the given ip range | |
1619 | * @start: start of range to search. | |
1620 | * @end: end of range to search (inclusive). @end points to the last byte | |
1621 | * to check. | |
1622 | * | |
d1530413 | 1623 | * Returns: rec->ip if the related ftrace location is a least partly within |
04cf31a7 ME |
1624 | * the given address range. That is, the first address of the instruction |
1625 | * that is either a NOP or call to the function tracer. It checks the ftrace | |
1626 | * internal tables to determine if the address belongs or not. | |
1627 | */ | |
1628 | unsigned long ftrace_location_range(unsigned long start, unsigned long end) | |
c88fd863 | 1629 | { |
c88fd863 | 1630 | struct dyn_ftrace *rec; |
e60b613d | 1631 | unsigned long ip = 0; |
5855fead | 1632 | |
e60b613d | 1633 | rcu_read_lock(); |
7e16f581 SRV |
1634 | rec = lookup_rec(start, end); |
1635 | if (rec) | |
e60b613d ZY |
1636 | ip = rec->ip; |
1637 | rcu_read_unlock(); | |
c88fd863 | 1638 | |
e60b613d | 1639 | return ip; |
c88fd863 SR |
1640 | } |
1641 | ||
a650e02a | 1642 | /** |
aebfd125 | 1643 | * ftrace_location - return the ftrace location |
a650e02a SR |
1644 | * @ip: the instruction pointer to check |
1645 | * | |
d1530413 RD |
1646 | * Returns: |
1647 | * * If @ip matches the ftrace location, return @ip. | |
1648 | * * If @ip matches sym+0, return sym's ftrace location. | |
1649 | * * Otherwise, return 0. | |
a650e02a | 1650 | */ |
f0cf973a | 1651 | unsigned long ftrace_location(unsigned long ip) |
a650e02a | 1652 | { |
e60b613d | 1653 | unsigned long loc; |
aebfd125 PZ |
1654 | unsigned long offset; |
1655 | unsigned long size; | |
1656 | ||
e60b613d ZY |
1657 | loc = ftrace_location_range(ip, ip); |
1658 | if (!loc) { | |
aebfd125 | 1659 | if (!kallsyms_lookup_size_offset(ip, &size, &offset)) |
77e53cb2 | 1660 | return 0; |
aebfd125 PZ |
1661 | |
1662 | /* map sym+0 to __fentry__ */ | |
1663 | if (!offset) | |
e60b613d | 1664 | loc = ftrace_location_range(ip, ip + size - 1); |
aebfd125 | 1665 | } |
e60b613d | 1666 | return loc; |
a650e02a SR |
1667 | } |
1668 | ||
1669 | /** | |
1670 | * ftrace_text_reserved - return true if range contains an ftrace location | |
1671 | * @start: start of range to search | |
1672 | * @end: end of range to search (inclusive). @end points to the last byte to check. | |
1673 | * | |
d1530413 | 1674 | * Returns: 1 if @start and @end contains a ftrace location. |
a650e02a SR |
1675 | * That is, the instruction that is either a NOP or call to |
1676 | * the function tracer. It checks the ftrace internal tables to | |
1677 | * determine if the address belongs or not. | |
1678 | */ | |
d88471cb | 1679 | int ftrace_text_reserved(const void *start, const void *end) |
a650e02a | 1680 | { |
f0cf973a SR |
1681 | unsigned long ret; |
1682 | ||
1683 | ret = ftrace_location_range((unsigned long)start, | |
1684 | (unsigned long)end); | |
1685 | ||
1686 | return (int)!!ret; | |
a650e02a SR |
1687 | } |
1688 | ||
4fbb48cb SRRH |
1689 | /* Test if ops registered to this rec needs regs */ |
1690 | static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) | |
1691 | { | |
1692 | struct ftrace_ops *ops; | |
1693 | bool keep_regs = false; | |
1694 | ||
1695 | for (ops = ftrace_ops_list; | |
1696 | ops != &ftrace_list_end; ops = ops->next) { | |
1697 | /* pass rec in as regs to have non-NULL val */ | |
1698 | if (ftrace_ops_test(ops, rec->ip, rec)) { | |
1699 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { | |
1700 | keep_regs = true; | |
1701 | break; | |
1702 | } | |
1703 | } | |
1704 | } | |
1705 | ||
1706 | return keep_regs; | |
1707 | } | |
1708 | ||
a124692b CJ |
1709 | static struct ftrace_ops * |
1710 | ftrace_find_tramp_ops_any(struct dyn_ftrace *rec); | |
1711 | static struct ftrace_ops * | |
4c75b0ff NR |
1712 | ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude); |
1713 | static struct ftrace_ops * | |
a124692b CJ |
1714 | ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops); |
1715 | ||
cf04f2d5 SRG |
1716 | static bool skip_record(struct dyn_ftrace *rec) |
1717 | { | |
1718 | /* | |
1719 | * At boot up, weak functions are set to disable. Function tracing | |
1720 | * can be enabled before they are, and they still need to be disabled now. | |
1721 | * If the record is disabled, still continue if it is marked as already | |
1722 | * enabled (this is needed to keep the accounting working). | |
1723 | */ | |
1724 | return rec->flags & FTRACE_FL_DISABLED && | |
1725 | !(rec->flags & FTRACE_FL_ENABLED); | |
1726 | } | |
1727 | ||
07bbe083 SRG |
1728 | /* |
1729 | * This is the main engine to the ftrace updates to the dyn_ftrace records. | |
1730 | * | |
1731 | * It will iterate through all the available ftrace functions | |
1732 | * (the ones that ftrace can have callbacks to) and set the flags | |
1733 | * in the associated dyn_ftrace records. | |
1734 | * | |
1735 | * @inc: If true, the functions associated to @ops are added to | |
1736 | * the dyn_ftrace records, otherwise they are removed. | |
1737 | */ | |
84b6d3e6 | 1738 | static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, |
ed926f9b SR |
1739 | bool inc) |
1740 | { | |
1741 | struct ftrace_hash *hash; | |
07bbe083 | 1742 | struct ftrace_hash *notrace_hash; |
ed926f9b SR |
1743 | struct ftrace_page *pg; |
1744 | struct dyn_ftrace *rec; | |
84b6d3e6 | 1745 | bool update = false; |
ed926f9b | 1746 | int count = 0; |
8c08f0d5 | 1747 | int all = false; |
ed926f9b SR |
1748 | |
1749 | /* Only update if the ops has been registered */ | |
1750 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | |
84b6d3e6 | 1751 | return false; |
ed926f9b SR |
1752 | |
1753 | /* | |
ed926f9b SR |
1754 | * If the count is zero, we update all records. |
1755 | * Otherwise we just update the items in the hash. | |
ed926f9b | 1756 | */ |
07bbe083 SRG |
1757 | hash = ops->func_hash->filter_hash; |
1758 | notrace_hash = ops->func_hash->notrace_hash; | |
1759 | if (ftrace_hash_empty(hash)) | |
1760 | all = true; | |
ed926f9b SR |
1761 | |
1762 | do_for_each_ftrace_rec(pg, rec) { | |
07bbe083 | 1763 | int in_notrace_hash = 0; |
ed926f9b SR |
1764 | int in_hash = 0; |
1765 | int match = 0; | |
1766 | ||
cf04f2d5 | 1767 | if (skip_record(rec)) |
b7ffffbb SRRH |
1768 | continue; |
1769 | ||
ed926f9b SR |
1770 | if (all) { |
1771 | /* | |
1772 | * Only the filter_hash affects all records. | |
1773 | * Update if the record is not in the notrace hash. | |
1774 | */ | |
07bbe083 | 1775 | if (!notrace_hash || !ftrace_lookup_ip(notrace_hash, rec->ip)) |
ed926f9b SR |
1776 | match = 1; |
1777 | } else { | |
06a51d93 | 1778 | in_hash = !!ftrace_lookup_ip(hash, rec->ip); |
07bbe083 | 1779 | in_notrace_hash = !!ftrace_lookup_ip(notrace_hash, rec->ip); |
ed926f9b SR |
1780 | |
1781 | /* | |
07bbe083 SRG |
1782 | * We want to match all functions that are in the hash but |
1783 | * not in the other hash. | |
ed926f9b | 1784 | */ |
07bbe083 | 1785 | if (in_hash && !in_notrace_hash) |
ed926f9b SR |
1786 | match = 1; |
1787 | } | |
1788 | if (!match) | |
1789 | continue; | |
1790 | ||
1791 | if (inc) { | |
1792 | rec->flags++; | |
0376bde1 | 1793 | if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX)) |
84b6d3e6 | 1794 | return false; |
79922b80 | 1795 | |
763e34e7 SRV |
1796 | if (ops->flags & FTRACE_OPS_FL_DIRECT) |
1797 | rec->flags |= FTRACE_FL_DIRECT; | |
1798 | ||
79922b80 SRRH |
1799 | /* |
1800 | * If there's only a single callback registered to a | |
1801 | * function, and the ops has a trampoline registered | |
1802 | * for it, then we can call it directly. | |
1803 | */ | |
fef5aeee | 1804 | if (ftrace_rec_count(rec) == 1 && ops->trampoline) |
79922b80 | 1805 | rec->flags |= FTRACE_FL_TRAMP; |
fef5aeee | 1806 | else |
79922b80 SRRH |
1807 | /* |
1808 | * If we are adding another function callback | |
1809 | * to this function, and the previous had a | |
bce0b6c5 SRRH |
1810 | * custom trampoline in use, then we need to go |
1811 | * back to the default trampoline. | |
79922b80 | 1812 | */ |
fef5aeee | 1813 | rec->flags &= ~FTRACE_FL_TRAMP; |
79922b80 | 1814 | |
08f6fba5 SR |
1815 | /* |
1816 | * If any ops wants regs saved for this function | |
1817 | * then all ops will get saved regs. | |
1818 | */ | |
1819 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) | |
1820 | rec->flags |= FTRACE_FL_REGS; | |
ed926f9b | 1821 | } else { |
0376bde1 | 1822 | if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0)) |
84b6d3e6 | 1823 | return false; |
ed926f9b | 1824 | rec->flags--; |
79922b80 | 1825 | |
763e34e7 SRV |
1826 | /* |
1827 | * Only the internal direct_ops should have the | |
1828 | * DIRECT flag set. Thus, if it is removing a | |
1829 | * function, then that function should no longer | |
1830 | * be direct. | |
1831 | */ | |
1832 | if (ops->flags & FTRACE_OPS_FL_DIRECT) | |
1833 | rec->flags &= ~FTRACE_FL_DIRECT; | |
1834 | ||
4fbb48cb SRRH |
1835 | /* |
1836 | * If the rec had REGS enabled and the ops that is | |
1837 | * being removed had REGS set, then see if there is | |
1838 | * still any ops for this record that wants regs. | |
1839 | * If not, we can stop recording them. | |
1840 | */ | |
0376bde1 | 1841 | if (ftrace_rec_count(rec) > 0 && |
4fbb48cb SRRH |
1842 | rec->flags & FTRACE_FL_REGS && |
1843 | ops->flags & FTRACE_OPS_FL_SAVE_REGS) { | |
1844 | if (!test_rec_ops_needs_regs(rec)) | |
1845 | rec->flags &= ~FTRACE_FL_REGS; | |
1846 | } | |
79922b80 | 1847 | |
fef5aeee | 1848 | /* |
a124692b CJ |
1849 | * The TRAMP needs to be set only if rec count |
1850 | * is decremented to one, and the ops that is | |
1851 | * left has a trampoline. As TRAMP can only be | |
1852 | * enabled if there is only a single ops attached | |
1853 | * to it. | |
fef5aeee | 1854 | */ |
a124692b | 1855 | if (ftrace_rec_count(rec) == 1 && |
4c75b0ff | 1856 | ftrace_find_tramp_ops_any_other(rec, ops)) |
a124692b CJ |
1857 | rec->flags |= FTRACE_FL_TRAMP; |
1858 | else | |
1859 | rec->flags &= ~FTRACE_FL_TRAMP; | |
fef5aeee | 1860 | |
79922b80 SRRH |
1861 | /* |
1862 | * flags will be cleared in ftrace_check_record() | |
1863 | * if rec count is zero. | |
1864 | */ | |
ed926f9b | 1865 | } |
cbad0fb2 MR |
1866 | |
1867 | /* | |
1868 | * If the rec has a single associated ops, and ops->func can be | |
1869 | * called directly, allow the call site to call via the ops. | |
1870 | */ | |
1871 | if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) && | |
1872 | ftrace_rec_count(rec) == 1 && | |
1873 | ftrace_ops_get_func(ops) == ops->func) | |
1874 | rec->flags |= FTRACE_FL_CALL_OPS; | |
1875 | else | |
1876 | rec->flags &= ~FTRACE_FL_CALL_OPS; | |
1877 | ||
ed926f9b | 1878 | count++; |
84b6d3e6 JO |
1879 | |
1880 | /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */ | |
7375dca1 | 1881 | update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE; |
84b6d3e6 | 1882 | |
ed926f9b SR |
1883 | /* Shortcut, if we handled all records, we are done. */ |
1884 | if (!all && count == hash->count) | |
84b6d3e6 | 1885 | return update; |
ed926f9b | 1886 | } while_for_each_ftrace_rec(); |
84b6d3e6 JO |
1887 | |
1888 | return update; | |
ed926f9b SR |
1889 | } |
1890 | ||
da73f6d4 SRG |
1891 | /* |
1892 | * This is called when an ops is removed from tracing. It will decrement | |
1893 | * the counters of the dyn_ftrace records for all the functions that | |
1894 | * the @ops attached to. | |
1895 | */ | |
07bbe083 | 1896 | static bool ftrace_hash_rec_disable(struct ftrace_ops *ops) |
ed926f9b | 1897 | { |
da73f6d4 | 1898 | return __ftrace_hash_rec_update(ops, false); |
ed926f9b SR |
1899 | } |
1900 | ||
da73f6d4 SRG |
1901 | /* |
1902 | * This is called when an ops is added to tracing. It will increment | |
1903 | * the counters of the dyn_ftrace records for all the functions that | |
1904 | * the @ops attached to. | |
1905 | */ | |
07bbe083 | 1906 | static bool ftrace_hash_rec_enable(struct ftrace_ops *ops) |
ed926f9b | 1907 | { |
da73f6d4 | 1908 | return __ftrace_hash_rec_update(ops, true); |
ed926f9b SR |
1909 | } |
1910 | ||
1a88c071 SRG |
1911 | /* |
1912 | * This function will update what functions @ops traces when its filter | |
1913 | * changes. | |
1914 | * | |
1915 | * The @inc states if the @ops callbacks are going to be added or removed. | |
1916 | * When one of the @ops hashes are updated to a "new_hash" the dyn_ftrace | |
1917 | * records are update via: | |
1918 | * | |
1919 | * ftrace_hash_rec_disable_modify(ops); | |
1920 | * ops->hash = new_hash | |
1921 | * ftrace_hash_rec_enable_modify(ops); | |
1922 | * | |
1923 | * Where the @ops is removed from all the records it is tracing using | |
1924 | * its old hash. The @ops hash is updated to the new hash, and then | |
1925 | * the @ops is added back to the records so that it is tracing all | |
1926 | * the new functions. | |
1927 | */ | |
1928 | static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, bool inc) | |
84261912 SRRH |
1929 | { |
1930 | struct ftrace_ops *op; | |
1931 | ||
07bbe083 | 1932 | __ftrace_hash_rec_update(ops, inc); |
84261912 SRRH |
1933 | |
1934 | if (ops->func_hash != &global_ops.local_hash) | |
1935 | return; | |
1936 | ||
1937 | /* | |
1938 | * If the ops shares the global_ops hash, then we need to update | |
1939 | * all ops that are enabled and use this hash. | |
1940 | */ | |
1941 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
1942 | /* Already done */ | |
1943 | if (op == ops) | |
1944 | continue; | |
1945 | if (op->func_hash == &global_ops.local_hash) | |
07bbe083 | 1946 | __ftrace_hash_rec_update(op, inc); |
84261912 SRRH |
1947 | } while_for_each_ftrace_op(op); |
1948 | } | |
1949 | ||
07bbe083 | 1950 | static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops) |
84261912 | 1951 | { |
1a88c071 | 1952 | ftrace_hash_rec_update_modify(ops, false); |
84261912 SRRH |
1953 | } |
1954 | ||
07bbe083 | 1955 | static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops) |
84261912 | 1956 | { |
1a88c071 | 1957 | ftrace_hash_rec_update_modify(ops, true); |
84261912 SRRH |
1958 | } |
1959 | ||
f8b8be8a MH |
1960 | /* |
1961 | * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK | |
1962 | * or no-needed to update, -EBUSY if it detects a conflict of the flag | |
1963 | * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs. | |
1964 | * Note that old_hash and new_hash has below meanings | |
1965 | * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected) | |
1966 | * - If the hash is EMPTY_HASH, it hits nothing | |
1967 | * - Anything else hits the recs which match the hash entries. | |
53cd885b SL |
1968 | * |
1969 | * DIRECT ops does not have IPMODIFY flag, but we still need to check it | |
1970 | * against functions with FTRACE_FL_IPMODIFY. If there is any overlap, call | |
1971 | * ops_func(SHARE_IPMODIFY_SELF) to make sure current ops can share with | |
1972 | * IPMODIFY. If ops_func(SHARE_IPMODIFY_SELF) returns non-zero, propagate | |
1973 | * the return value to the caller and eventually to the owner of the DIRECT | |
1974 | * ops. | |
f8b8be8a MH |
1975 | */ |
1976 | static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, | |
1977 | struct ftrace_hash *old_hash, | |
1978 | struct ftrace_hash *new_hash) | |
1979 | { | |
1980 | struct ftrace_page *pg; | |
1981 | struct dyn_ftrace *rec, *end = NULL; | |
1982 | int in_old, in_new; | |
53cd885b | 1983 | bool is_ipmodify, is_direct; |
f8b8be8a MH |
1984 | |
1985 | /* Only update if the ops has been registered */ | |
1986 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | |
1987 | return 0; | |
1988 | ||
53cd885b SL |
1989 | is_ipmodify = ops->flags & FTRACE_OPS_FL_IPMODIFY; |
1990 | is_direct = ops->flags & FTRACE_OPS_FL_DIRECT; | |
1991 | ||
1992 | /* neither IPMODIFY nor DIRECT, skip */ | |
1993 | if (!is_ipmodify && !is_direct) | |
1994 | return 0; | |
1995 | ||
1996 | if (WARN_ON_ONCE(is_ipmodify && is_direct)) | |
f8b8be8a MH |
1997 | return 0; |
1998 | ||
1999 | /* | |
53cd885b SL |
2000 | * Since the IPMODIFY and DIRECT are very address sensitive |
2001 | * actions, we do not allow ftrace_ops to set all functions to new | |
2002 | * hash. | |
f8b8be8a MH |
2003 | */ |
2004 | if (!new_hash || !old_hash) | |
2005 | return -EINVAL; | |
2006 | ||
2007 | /* Update rec->flags */ | |
2008 | do_for_each_ftrace_rec(pg, rec) { | |
546fece4 SRRH |
2009 | |
2010 | if (rec->flags & FTRACE_FL_DISABLED) | |
2011 | continue; | |
2012 | ||
f8b8be8a MH |
2013 | /* We need to update only differences of filter_hash */ |
2014 | in_old = !!ftrace_lookup_ip(old_hash, rec->ip); | |
2015 | in_new = !!ftrace_lookup_ip(new_hash, rec->ip); | |
2016 | if (in_old == in_new) | |
2017 | continue; | |
2018 | ||
2019 | if (in_new) { | |
53cd885b SL |
2020 | if (rec->flags & FTRACE_FL_IPMODIFY) { |
2021 | int ret; | |
2022 | ||
2023 | /* Cannot have two ipmodify on same rec */ | |
2024 | if (is_ipmodify) | |
2025 | goto rollback; | |
2026 | ||
2027 | FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT); | |
2028 | ||
2029 | /* | |
2030 | * Another ops with IPMODIFY is already | |
2031 | * attached. We are now attaching a direct | |
2032 | * ops. Run SHARE_IPMODIFY_SELF, to check | |
2033 | * whether sharing is supported. | |
2034 | */ | |
2035 | if (!ops->ops_func) | |
2036 | return -EBUSY; | |
2037 | ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF); | |
2038 | if (ret) | |
2039 | return ret; | |
2040 | } else if (is_ipmodify) { | |
2041 | rec->flags |= FTRACE_FL_IPMODIFY; | |
2042 | } | |
2043 | } else if (is_ipmodify) { | |
f8b8be8a | 2044 | rec->flags &= ~FTRACE_FL_IPMODIFY; |
53cd885b | 2045 | } |
f8b8be8a MH |
2046 | } while_for_each_ftrace_rec(); |
2047 | ||
2048 | return 0; | |
2049 | ||
2050 | rollback: | |
2051 | end = rec; | |
2052 | ||
2053 | /* Roll back what we did above */ | |
2054 | do_for_each_ftrace_rec(pg, rec) { | |
546fece4 SRRH |
2055 | |
2056 | if (rec->flags & FTRACE_FL_DISABLED) | |
2057 | continue; | |
2058 | ||
f8b8be8a | 2059 | if (rec == end) |
77e53cb2 | 2060 | return -EBUSY; |
f8b8be8a MH |
2061 | |
2062 | in_old = !!ftrace_lookup_ip(old_hash, rec->ip); | |
2063 | in_new = !!ftrace_lookup_ip(new_hash, rec->ip); | |
2064 | if (in_old == in_new) | |
2065 | continue; | |
2066 | ||
2067 | if (in_new) | |
2068 | rec->flags &= ~FTRACE_FL_IPMODIFY; | |
2069 | else | |
2070 | rec->flags |= FTRACE_FL_IPMODIFY; | |
2071 | } while_for_each_ftrace_rec(); | |
2072 | ||
f8b8be8a MH |
2073 | return -EBUSY; |
2074 | } | |
2075 | ||
2076 | static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops) | |
2077 | { | |
2078 | struct ftrace_hash *hash = ops->func_hash->filter_hash; | |
2079 | ||
2080 | if (ftrace_hash_empty(hash)) | |
2081 | hash = NULL; | |
2082 | ||
2083 | return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash); | |
2084 | } | |
2085 | ||
2086 | /* Disabling always succeeds */ | |
2087 | static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops) | |
2088 | { | |
2089 | struct ftrace_hash *hash = ops->func_hash->filter_hash; | |
2090 | ||
2091 | if (ftrace_hash_empty(hash)) | |
2092 | hash = NULL; | |
2093 | ||
2094 | __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH); | |
2095 | } | |
2096 | ||
2097 | static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, | |
2098 | struct ftrace_hash *new_hash) | |
2099 | { | |
2100 | struct ftrace_hash *old_hash = ops->func_hash->filter_hash; | |
2101 | ||
2102 | if (ftrace_hash_empty(old_hash)) | |
2103 | old_hash = NULL; | |
2104 | ||
2105 | if (ftrace_hash_empty(new_hash)) | |
2106 | new_hash = NULL; | |
2107 | ||
2108 | return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash); | |
2109 | } | |
2110 | ||
b05086c7 | 2111 | static void print_ip_ins(const char *fmt, const unsigned char *p) |
b17e8a37 | 2112 | { |
6c14133d | 2113 | char ins[MCOUNT_INSN_SIZE]; |
b17e8a37 | 2114 | |
6c14133d SRV |
2115 | if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) { |
2116 | printk(KERN_CONT "%s[FAULT] %px\n", fmt, p); | |
2117 | return; | |
2118 | } | |
2119 | ||
b17e8a37 | 2120 | printk(KERN_CONT "%s", fmt); |
30f7d1ca | 2121 | pr_cont("%*phC", MCOUNT_INSN_SIZE, ins); |
b17e8a37 SR |
2122 | } |
2123 | ||
02a392a0 | 2124 | enum ftrace_bug_type ftrace_bug_type; |
b05086c7 | 2125 | const void *ftrace_expected; |
02a392a0 SRRH |
2126 | |
2127 | static void print_bug_type(void) | |
2128 | { | |
2129 | switch (ftrace_bug_type) { | |
2130 | case FTRACE_BUG_UNKNOWN: | |
2131 | break; | |
2132 | case FTRACE_BUG_INIT: | |
2133 | pr_info("Initializing ftrace call sites\n"); | |
2134 | break; | |
2135 | case FTRACE_BUG_NOP: | |
2136 | pr_info("Setting ftrace call site to NOP\n"); | |
2137 | break; | |
2138 | case FTRACE_BUG_CALL: | |
2139 | pr_info("Setting ftrace call site to call ftrace function\n"); | |
2140 | break; | |
2141 | case FTRACE_BUG_UPDATE: | |
2142 | pr_info("Updating ftrace call site to call a different ftrace function\n"); | |
2143 | break; | |
2144 | } | |
2145 | } | |
2146 | ||
c88fd863 SR |
2147 | /** |
2148 | * ftrace_bug - report and shutdown function tracer | |
2149 | * @failed: The failed type (EFAULT, EINVAL, EPERM) | |
4fd3279b | 2150 | * @rec: The record that failed |
c88fd863 SR |
2151 | * |
2152 | * The arch code that enables or disables the function tracing | |
2153 | * can call ftrace_bug() when it has detected a problem in | |
2154 | * modifying the code. @failed should be one of either: | |
2155 | * EFAULT - if the problem happens on reading the @ip address | |
2156 | * EINVAL - if what is read at @ip is not what was expected | |
9efb85c5 | 2157 | * EPERM - if the problem happens on writing to the @ip address |
c88fd863 | 2158 | */ |
4fd3279b | 2159 | void ftrace_bug(int failed, struct dyn_ftrace *rec) |
b17e8a37 | 2160 | { |
4fd3279b SRRH |
2161 | unsigned long ip = rec ? rec->ip : 0; |
2162 | ||
c143b775 CJ |
2163 | pr_info("------------[ ftrace bug ]------------\n"); |
2164 | ||
b17e8a37 SR |
2165 | switch (failed) { |
2166 | case -EFAULT: | |
b17e8a37 | 2167 | pr_info("ftrace faulted on modifying "); |
2062a4e8 | 2168 | print_ip_sym(KERN_INFO, ip); |
b17e8a37 SR |
2169 | break; |
2170 | case -EINVAL: | |
b17e8a37 | 2171 | pr_info("ftrace failed to modify "); |
2062a4e8 | 2172 | print_ip_sym(KERN_INFO, ip); |
b05086c7 | 2173 | print_ip_ins(" actual: ", (unsigned char *)ip); |
4fd3279b | 2174 | pr_cont("\n"); |
b05086c7 SRRH |
2175 | if (ftrace_expected) { |
2176 | print_ip_ins(" expected: ", ftrace_expected); | |
2177 | pr_cont("\n"); | |
2178 | } | |
b17e8a37 SR |
2179 | break; |
2180 | case -EPERM: | |
b17e8a37 | 2181 | pr_info("ftrace faulted on writing "); |
2062a4e8 | 2182 | print_ip_sym(KERN_INFO, ip); |
b17e8a37 SR |
2183 | break; |
2184 | default: | |
b17e8a37 | 2185 | pr_info("ftrace faulted on unknown error "); |
2062a4e8 | 2186 | print_ip_sym(KERN_INFO, ip); |
b17e8a37 | 2187 | } |
02a392a0 | 2188 | print_bug_type(); |
4fd3279b SRRH |
2189 | if (rec) { |
2190 | struct ftrace_ops *ops = NULL; | |
2191 | ||
2192 | pr_info("ftrace record flags: %lx\n", rec->flags); | |
cbad0fb2 MR |
2193 | pr_cont(" (%ld)%s%s", ftrace_rec_count(rec), |
2194 | rec->flags & FTRACE_FL_REGS ? " R" : " ", | |
2195 | rec->flags & FTRACE_FL_CALL_OPS ? " O" : " "); | |
4fd3279b SRRH |
2196 | if (rec->flags & FTRACE_FL_TRAMP_EN) { |
2197 | ops = ftrace_find_tramp_ops_any(rec); | |
39daa7b9 SRRH |
2198 | if (ops) { |
2199 | do { | |
2200 | pr_cont("\ttramp: %pS (%pS)", | |
2201 | (void *)ops->trampoline, | |
2202 | (void *)ops->func); | |
2203 | ops = ftrace_find_tramp_ops_next(rec, ops); | |
2204 | } while (ops); | |
2205 | } else | |
4fd3279b SRRH |
2206 | pr_cont("\ttramp: ERROR!"); |
2207 | ||
2208 | } | |
2209 | ip = ftrace_get_addr_curr(rec); | |
39daa7b9 | 2210 | pr_cont("\n expected tramp: %lx\n", ip); |
4fd3279b | 2211 | } |
c143b775 CJ |
2212 | |
2213 | FTRACE_WARN_ON_ONCE(1); | |
b17e8a37 SR |
2214 | } |
2215 | ||
7375dca1 | 2216 | static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update) |
5072c59f | 2217 | { |
64fbcd16 | 2218 | unsigned long flag = 0UL; |
e7d3737e | 2219 | |
02a392a0 SRRH |
2220 | ftrace_bug_type = FTRACE_BUG_UNKNOWN; |
2221 | ||
cf04f2d5 | 2222 | if (skip_record(rec)) |
b7ffffbb SRRH |
2223 | return FTRACE_UPDATE_IGNORE; |
2224 | ||
982c350b | 2225 | /* |
30fb6aa7 | 2226 | * If we are updating calls: |
982c350b | 2227 | * |
ed926f9b SR |
2228 | * If the record has a ref count, then we need to enable it |
2229 | * because someone is using it. | |
982c350b | 2230 | * |
ed926f9b SR |
2231 | * Otherwise we make sure its disabled. |
2232 | * | |
30fb6aa7 | 2233 | * If we are disabling calls, then disable all records that |
ed926f9b | 2234 | * are enabled. |
982c350b | 2235 | */ |
0376bde1 | 2236 | if (enable && ftrace_rec_count(rec)) |
ed926f9b | 2237 | flag = FTRACE_FL_ENABLED; |
982c350b | 2238 | |
08f6fba5 | 2239 | /* |
79922b80 SRRH |
2240 | * If enabling and the REGS flag does not match the REGS_EN, or |
2241 | * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore | |
2242 | * this record. Set flags to fail the compare against ENABLED. | |
763e34e7 | 2243 | * Same for direct calls. |
08f6fba5 | 2244 | */ |
79922b80 | 2245 | if (flag) { |
763e34e7 | 2246 | if (!(rec->flags & FTRACE_FL_REGS) != |
79922b80 SRRH |
2247 | !(rec->flags & FTRACE_FL_REGS_EN)) |
2248 | flag |= FTRACE_FL_REGS; | |
2249 | ||
763e34e7 | 2250 | if (!(rec->flags & FTRACE_FL_TRAMP) != |
79922b80 SRRH |
2251 | !(rec->flags & FTRACE_FL_TRAMP_EN)) |
2252 | flag |= FTRACE_FL_TRAMP; | |
763e34e7 SRV |
2253 | |
2254 | /* | |
2255 | * Direct calls are special, as count matters. | |
2256 | * We must test the record for direct, if the | |
2257 | * DIRECT and DIRECT_EN do not match, but only | |
2258 | * if the count is 1. That's because, if the | |
2259 | * count is something other than one, we do not | |
2260 | * want the direct enabled (it will be done via the | |
2261 | * direct helper). But if DIRECT_EN is set, and | |
2262 | * the count is not one, we need to clear it. | |
cbad0fb2 | 2263 | * |
763e34e7 SRV |
2264 | */ |
2265 | if (ftrace_rec_count(rec) == 1) { | |
2266 | if (!(rec->flags & FTRACE_FL_DIRECT) != | |
2267 | !(rec->flags & FTRACE_FL_DIRECT_EN)) | |
2268 | flag |= FTRACE_FL_DIRECT; | |
2269 | } else if (rec->flags & FTRACE_FL_DIRECT_EN) { | |
2270 | flag |= FTRACE_FL_DIRECT; | |
2271 | } | |
cbad0fb2 MR |
2272 | |
2273 | /* | |
2274 | * Ops calls are special, as count matters. | |
2275 | * As with direct calls, they must only be enabled when count | |
2276 | * is one, otherwise they'll be handled via the list ops. | |
2277 | */ | |
2278 | if (ftrace_rec_count(rec) == 1) { | |
2279 | if (!(rec->flags & FTRACE_FL_CALL_OPS) != | |
2280 | !(rec->flags & FTRACE_FL_CALL_OPS_EN)) | |
2281 | flag |= FTRACE_FL_CALL_OPS; | |
2282 | } else if (rec->flags & FTRACE_FL_CALL_OPS_EN) { | |
2283 | flag |= FTRACE_FL_CALL_OPS; | |
2284 | } | |
79922b80 | 2285 | } |
08f6fba5 | 2286 | |
64fbcd16 XG |
2287 | /* If the state of this record hasn't changed, then do nothing */ |
2288 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) | |
c88fd863 | 2289 | return FTRACE_UPDATE_IGNORE; |
982c350b | 2290 | |
64fbcd16 | 2291 | if (flag) { |
08f6fba5 SR |
2292 | /* Save off if rec is being enabled (for return value) */ |
2293 | flag ^= rec->flags & FTRACE_FL_ENABLED; | |
2294 | ||
2295 | if (update) { | |
e11b521a | 2296 | rec->flags |= FTRACE_FL_ENABLED | FTRACE_FL_TOUCHED; |
08f6fba5 SR |
2297 | if (flag & FTRACE_FL_REGS) { |
2298 | if (rec->flags & FTRACE_FL_REGS) | |
2299 | rec->flags |= FTRACE_FL_REGS_EN; | |
2300 | else | |
2301 | rec->flags &= ~FTRACE_FL_REGS_EN; | |
2302 | } | |
79922b80 SRRH |
2303 | if (flag & FTRACE_FL_TRAMP) { |
2304 | if (rec->flags & FTRACE_FL_TRAMP) | |
2305 | rec->flags |= FTRACE_FL_TRAMP_EN; | |
2306 | else | |
2307 | rec->flags &= ~FTRACE_FL_TRAMP_EN; | |
2308 | } | |
d19ad077 | 2309 | |
6ce2c04f SRG |
2310 | /* Keep track of anything that modifies the function */ |
2311 | if (rec->flags & (FTRACE_FL_DIRECT | FTRACE_FL_IPMODIFY)) | |
2312 | rec->flags |= FTRACE_FL_MODIFIED; | |
2313 | ||
763e34e7 SRV |
2314 | if (flag & FTRACE_FL_DIRECT) { |
2315 | /* | |
2316 | * If there's only one user (direct_ops helper) | |
2317 | * then we can call the direct function | |
2318 | * directly (no ftrace trampoline). | |
2319 | */ | |
2320 | if (ftrace_rec_count(rec) == 1) { | |
2321 | if (rec->flags & FTRACE_FL_DIRECT) | |
2322 | rec->flags |= FTRACE_FL_DIRECT_EN; | |
2323 | else | |
2324 | rec->flags &= ~FTRACE_FL_DIRECT_EN; | |
2325 | } else { | |
2326 | /* | |
2327 | * Can only call directly if there's | |
2328 | * only one callback to the function. | |
2329 | */ | |
2330 | rec->flags &= ~FTRACE_FL_DIRECT_EN; | |
2331 | } | |
2332 | } | |
cbad0fb2 MR |
2333 | |
2334 | if (flag & FTRACE_FL_CALL_OPS) { | |
2335 | if (ftrace_rec_count(rec) == 1) { | |
2336 | if (rec->flags & FTRACE_FL_CALL_OPS) | |
2337 | rec->flags |= FTRACE_FL_CALL_OPS_EN; | |
2338 | else | |
2339 | rec->flags &= ~FTRACE_FL_CALL_OPS_EN; | |
2340 | } else { | |
2341 | /* | |
2342 | * Can only call directly if there's | |
2343 | * only one set of associated ops. | |
2344 | */ | |
2345 | rec->flags &= ~FTRACE_FL_CALL_OPS_EN; | |
2346 | } | |
2347 | } | |
08f6fba5 SR |
2348 | } |
2349 | ||
2350 | /* | |
2351 | * If this record is being updated from a nop, then | |
2352 | * return UPDATE_MAKE_CALL. | |
08f6fba5 SR |
2353 | * Otherwise, |
2354 | * return UPDATE_MODIFY_CALL to tell the caller to convert | |
f1b2f2bd | 2355 | * from the save regs, to a non-save regs function or |
79922b80 | 2356 | * vice versa, or from a trampoline call. |
08f6fba5 | 2357 | */ |
02a392a0 SRRH |
2358 | if (flag & FTRACE_FL_ENABLED) { |
2359 | ftrace_bug_type = FTRACE_BUG_CALL; | |
08f6fba5 | 2360 | return FTRACE_UPDATE_MAKE_CALL; |
02a392a0 | 2361 | } |
f1b2f2bd | 2362 | |
02a392a0 | 2363 | ftrace_bug_type = FTRACE_BUG_UPDATE; |
f1b2f2bd | 2364 | return FTRACE_UPDATE_MODIFY_CALL; |
c88fd863 SR |
2365 | } |
2366 | ||
08f6fba5 SR |
2367 | if (update) { |
2368 | /* If there's no more users, clear all flags */ | |
0376bde1 | 2369 | if (!ftrace_rec_count(rec)) |
e11b521a | 2370 | rec->flags &= FTRACE_NOCLEAR_FLAGS; |
08f6fba5 | 2371 | else |
b24d443b SRRH |
2372 | /* |
2373 | * Just disable the record, but keep the ops TRAMP | |
2374 | * and REGS states. The _EN flags must be disabled though. | |
2375 | */ | |
2376 | rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | | |
cbad0fb2 MR |
2377 | FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN | |
2378 | FTRACE_FL_CALL_OPS_EN); | |
08f6fba5 | 2379 | } |
c88fd863 | 2380 | |
02a392a0 | 2381 | ftrace_bug_type = FTRACE_BUG_NOP; |
c88fd863 SR |
2382 | return FTRACE_UPDATE_MAKE_NOP; |
2383 | } | |
2384 | ||
2385 | /** | |
6130722f | 2386 | * ftrace_update_record - set a record that now is tracing or not |
c88fd863 | 2387 | * @rec: the record to update |
7375dca1 | 2388 | * @enable: set to true if the record is tracing, false to force disable |
c88fd863 SR |
2389 | * |
2390 | * The records that represent all functions that can be traced need | |
2391 | * to be updated when tracing has been enabled. | |
2392 | */ | |
7375dca1 | 2393 | int ftrace_update_record(struct dyn_ftrace *rec, bool enable) |
c88fd863 | 2394 | { |
7375dca1 | 2395 | return ftrace_check_record(rec, enable, true); |
c88fd863 SR |
2396 | } |
2397 | ||
2398 | /** | |
6130722f | 2399 | * ftrace_test_record - check if the record has been enabled or not |
c88fd863 | 2400 | * @rec: the record to test |
7375dca1 | 2401 | * @enable: set to true to check if enabled, false if it is disabled |
c88fd863 SR |
2402 | * |
2403 | * The arch code may need to test if a record is already set to | |
2404 | * tracing to determine how to modify the function code that it | |
2405 | * represents. | |
2406 | */ | |
7375dca1 | 2407 | int ftrace_test_record(struct dyn_ftrace *rec, bool enable) |
c88fd863 | 2408 | { |
7375dca1 | 2409 | return ftrace_check_record(rec, enable, false); |
c88fd863 SR |
2410 | } |
2411 | ||
5fecaa04 SRRH |
2412 | static struct ftrace_ops * |
2413 | ftrace_find_tramp_ops_any(struct dyn_ftrace *rec) | |
2414 | { | |
2415 | struct ftrace_ops *op; | |
fef5aeee | 2416 | unsigned long ip = rec->ip; |
5fecaa04 SRRH |
2417 | |
2418 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
2419 | ||
2420 | if (!op->trampoline) | |
2421 | continue; | |
2422 | ||
fef5aeee | 2423 | if (hash_contains_ip(ip, op->func_hash)) |
5fecaa04 SRRH |
2424 | return op; |
2425 | } while_for_each_ftrace_op(op); | |
2426 | ||
2427 | return NULL; | |
2428 | } | |
2429 | ||
4c75b0ff NR |
2430 | static struct ftrace_ops * |
2431 | ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude) | |
2432 | { | |
2433 | struct ftrace_ops *op; | |
2434 | unsigned long ip = rec->ip; | |
2435 | ||
2436 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
2437 | ||
2438 | if (op == op_exclude || !op->trampoline) | |
2439 | continue; | |
2440 | ||
2441 | if (hash_contains_ip(ip, op->func_hash)) | |
2442 | return op; | |
2443 | } while_for_each_ftrace_op(op); | |
2444 | ||
2445 | return NULL; | |
2446 | } | |
2447 | ||
39daa7b9 SRRH |
2448 | static struct ftrace_ops * |
2449 | ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, | |
2450 | struct ftrace_ops *op) | |
2451 | { | |
2452 | unsigned long ip = rec->ip; | |
2453 | ||
2454 | while_for_each_ftrace_op(op) { | |
2455 | ||
2456 | if (!op->trampoline) | |
2457 | continue; | |
2458 | ||
2459 | if (hash_contains_ip(ip, op->func_hash)) | |
2460 | return op; | |
026bb845 | 2461 | } |
39daa7b9 SRRH |
2462 | |
2463 | return NULL; | |
2464 | } | |
2465 | ||
79922b80 SRRH |
2466 | static struct ftrace_ops * |
2467 | ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) | |
2468 | { | |
2469 | struct ftrace_ops *op; | |
fef5aeee | 2470 | unsigned long ip = rec->ip; |
79922b80 | 2471 | |
fef5aeee SRRH |
2472 | /* |
2473 | * Need to check removed ops first. | |
2474 | * If they are being removed, and this rec has a tramp, | |
2475 | * and this rec is in the ops list, then it would be the | |
2476 | * one with the tramp. | |
2477 | */ | |
2478 | if (removed_ops) { | |
2479 | if (hash_contains_ip(ip, &removed_ops->old_hash)) | |
79922b80 SRRH |
2480 | return removed_ops; |
2481 | } | |
2482 | ||
fef5aeee SRRH |
2483 | /* |
2484 | * Need to find the current trampoline for a rec. | |
2485 | * Now, a trampoline is only attached to a rec if there | |
2486 | * was a single 'ops' attached to it. But this can be called | |
2487 | * when we are adding another op to the rec or removing the | |
2488 | * current one. Thus, if the op is being added, we can | |
2489 | * ignore it because it hasn't attached itself to the rec | |
4fc40904 SRRH |
2490 | * yet. |
2491 | * | |
2492 | * If an ops is being modified (hooking to different functions) | |
2493 | * then we don't care about the new functions that are being | |
2494 | * added, just the old ones (that are probably being removed). | |
2495 | * | |
2496 | * If we are adding an ops to a function that already is using | |
2497 | * a trampoline, it needs to be removed (trampolines are only | |
2498 | * for single ops connected), then an ops that is not being | |
2499 | * modified also needs to be checked. | |
fef5aeee | 2500 | */ |
79922b80 | 2501 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
fef5aeee SRRH |
2502 | |
2503 | if (!op->trampoline) | |
2504 | continue; | |
2505 | ||
2506 | /* | |
2507 | * If the ops is being added, it hasn't gotten to | |
2508 | * the point to be removed from this tree yet. | |
2509 | */ | |
2510 | if (op->flags & FTRACE_OPS_FL_ADDING) | |
79922b80 SRRH |
2511 | continue; |
2512 | ||
4fc40904 | 2513 | |
fef5aeee | 2514 | /* |
4fc40904 SRRH |
2515 | * If the ops is being modified and is in the old |
2516 | * hash, then it is probably being removed from this | |
2517 | * function. | |
fef5aeee | 2518 | */ |
fef5aeee SRRH |
2519 | if ((op->flags & FTRACE_OPS_FL_MODIFYING) && |
2520 | hash_contains_ip(ip, &op->old_hash)) | |
79922b80 | 2521 | return op; |
4fc40904 SRRH |
2522 | /* |
2523 | * If the ops is not being added or modified, and it's | |
2524 | * in its normal filter hash, then this must be the one | |
2525 | * we want! | |
2526 | */ | |
2527 | if (!(op->flags & FTRACE_OPS_FL_MODIFYING) && | |
2528 | hash_contains_ip(ip, op->func_hash)) | |
2529 | return op; | |
79922b80 SRRH |
2530 | |
2531 | } while_for_each_ftrace_op(op); | |
2532 | ||
2533 | return NULL; | |
2534 | } | |
2535 | ||
2536 | static struct ftrace_ops * | |
2537 | ftrace_find_tramp_ops_new(struct dyn_ftrace *rec) | |
2538 | { | |
2539 | struct ftrace_ops *op; | |
fef5aeee | 2540 | unsigned long ip = rec->ip; |
79922b80 SRRH |
2541 | |
2542 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
2543 | /* pass rec in as regs to have non-NULL val */ | |
fef5aeee | 2544 | if (hash_contains_ip(ip, op->func_hash)) |
79922b80 SRRH |
2545 | return op; |
2546 | } while_for_each_ftrace_op(op); | |
2547 | ||
2548 | return NULL; | |
2549 | } | |
2550 | ||
cbad0fb2 MR |
2551 | struct ftrace_ops * |
2552 | ftrace_find_unique_ops(struct dyn_ftrace *rec) | |
2553 | { | |
2554 | struct ftrace_ops *op, *found = NULL; | |
2555 | unsigned long ip = rec->ip; | |
2556 | ||
2557 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
2558 | ||
2559 | if (hash_contains_ip(ip, op->func_hash)) { | |
2560 | if (found) | |
2561 | return NULL; | |
2562 | found = op; | |
2563 | } | |
2564 | ||
2565 | } while_for_each_ftrace_op(op); | |
2566 | ||
2567 | return found; | |
2568 | } | |
2569 | ||
763e34e7 SRV |
2570 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
2571 | /* Protected by rcu_tasks for reading, and direct_mutex for writing */ | |
d05cb470 | 2572 | static struct ftrace_hash __rcu *direct_functions = EMPTY_HASH; |
763e34e7 SRV |
2573 | static DEFINE_MUTEX(direct_mutex); |
2574 | ||
2575 | /* | |
2576 | * Search the direct_functions hash to see if the given instruction pointer | |
2577 | * has a direct caller attached to it. | |
2578 | */ | |
ff205766 | 2579 | unsigned long ftrace_find_rec_direct(unsigned long ip) |
763e34e7 SRV |
2580 | { |
2581 | struct ftrace_func_entry *entry; | |
2582 | ||
2583 | entry = __ftrace_lookup_ip(direct_functions, ip); | |
2584 | if (!entry) | |
2585 | return 0; | |
2586 | ||
2587 | return entry->direct; | |
2588 | } | |
2589 | ||
2590 | static void call_direct_funcs(unsigned long ip, unsigned long pip, | |
d19ad077 | 2591 | struct ftrace_ops *ops, struct ftrace_regs *fregs) |
763e34e7 | 2592 | { |
dbaccb61 | 2593 | unsigned long addr = READ_ONCE(ops->direct_call); |
763e34e7 | 2594 | |
763e34e7 SRV |
2595 | if (!addr) |
2596 | return; | |
2597 | ||
9705bc70 | 2598 | arch_ftrace_set_direct_caller(fregs, addr); |
763e34e7 | 2599 | } |
763e34e7 SRV |
2600 | #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ |
2601 | ||
7413af1f SRRH |
2602 | /** |
2603 | * ftrace_get_addr_new - Get the call address to set to | |
2604 | * @rec: The ftrace record descriptor | |
2605 | * | |
2606 | * If the record has the FTRACE_FL_REGS set, that means that it | |
2607 | * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS | |
5c8c206e | 2608 | * is not set, then it wants to convert to the normal callback. |
7413af1f | 2609 | * |
d1530413 | 2610 | * Returns: the address of the trampoline to set to |
7413af1f SRRH |
2611 | */ |
2612 | unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) | |
2613 | { | |
79922b80 | 2614 | struct ftrace_ops *ops; |
763e34e7 SRV |
2615 | unsigned long addr; |
2616 | ||
2617 | if ((rec->flags & FTRACE_FL_DIRECT) && | |
2618 | (ftrace_rec_count(rec) == 1)) { | |
ff205766 | 2619 | addr = ftrace_find_rec_direct(rec->ip); |
763e34e7 SRV |
2620 | if (addr) |
2621 | return addr; | |
2622 | WARN_ON_ONCE(1); | |
2623 | } | |
79922b80 SRRH |
2624 | |
2625 | /* Trampolines take precedence over regs */ | |
2626 | if (rec->flags & FTRACE_FL_TRAMP) { | |
2627 | ops = ftrace_find_tramp_ops_new(rec); | |
2628 | if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { | |
bce0b6c5 SRRH |
2629 | pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n", |
2630 | (void *)rec->ip, (void *)rec->ip, rec->flags); | |
79922b80 SRRH |
2631 | /* Ftrace is shutting down, return anything */ |
2632 | return (unsigned long)FTRACE_ADDR; | |
2633 | } | |
2634 | return ops->trampoline; | |
2635 | } | |
2636 | ||
7413af1f SRRH |
2637 | if (rec->flags & FTRACE_FL_REGS) |
2638 | return (unsigned long)FTRACE_REGS_ADDR; | |
2639 | else | |
2640 | return (unsigned long)FTRACE_ADDR; | |
2641 | } | |
2642 | ||
2643 | /** | |
2644 | * ftrace_get_addr_curr - Get the call address that is already there | |
2645 | * @rec: The ftrace record descriptor | |
2646 | * | |
2647 | * The FTRACE_FL_REGS_EN is set when the record already points to | |
2648 | * a function that saves all the regs. Basically the '_EN' version | |
2649 | * represents the current state of the function. | |
2650 | * | |
d1530413 | 2651 | * Returns: the address of the trampoline that is currently being called |
7413af1f SRRH |
2652 | */ |
2653 | unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec) | |
2654 | { | |
79922b80 | 2655 | struct ftrace_ops *ops; |
763e34e7 SRV |
2656 | unsigned long addr; |
2657 | ||
2658 | /* Direct calls take precedence over trampolines */ | |
2659 | if (rec->flags & FTRACE_FL_DIRECT_EN) { | |
ff205766 | 2660 | addr = ftrace_find_rec_direct(rec->ip); |
763e34e7 SRV |
2661 | if (addr) |
2662 | return addr; | |
2663 | WARN_ON_ONCE(1); | |
2664 | } | |
79922b80 SRRH |
2665 | |
2666 | /* Trampolines take precedence over regs */ | |
2667 | if (rec->flags & FTRACE_FL_TRAMP_EN) { | |
2668 | ops = ftrace_find_tramp_ops_curr(rec); | |
2669 | if (FTRACE_WARN_ON(!ops)) { | |
a395d6a7 JP |
2670 | pr_warn("Bad trampoline accounting at: %p (%pS)\n", |
2671 | (void *)rec->ip, (void *)rec->ip); | |
79922b80 SRRH |
2672 | /* Ftrace is shutting down, return anything */ |
2673 | return (unsigned long)FTRACE_ADDR; | |
2674 | } | |
2675 | return ops->trampoline; | |
2676 | } | |
2677 | ||
7413af1f SRRH |
2678 | if (rec->flags & FTRACE_FL_REGS_EN) |
2679 | return (unsigned long)FTRACE_REGS_ADDR; | |
2680 | else | |
2681 | return (unsigned long)FTRACE_ADDR; | |
2682 | } | |
2683 | ||
c88fd863 | 2684 | static int |
7375dca1 | 2685 | __ftrace_replace_code(struct dyn_ftrace *rec, bool enable) |
c88fd863 | 2686 | { |
08f6fba5 | 2687 | unsigned long ftrace_old_addr; |
c88fd863 SR |
2688 | unsigned long ftrace_addr; |
2689 | int ret; | |
2690 | ||
7c0868e0 | 2691 | ftrace_addr = ftrace_get_addr_new(rec); |
c88fd863 | 2692 | |
7c0868e0 SRRH |
2693 | /* This needs to be done before we call ftrace_update_record */ |
2694 | ftrace_old_addr = ftrace_get_addr_curr(rec); | |
2695 | ||
2696 | ret = ftrace_update_record(rec, enable); | |
08f6fba5 | 2697 | |
02a392a0 SRRH |
2698 | ftrace_bug_type = FTRACE_BUG_UNKNOWN; |
2699 | ||
c88fd863 SR |
2700 | switch (ret) { |
2701 | case FTRACE_UPDATE_IGNORE: | |
2702 | return 0; | |
2703 | ||
2704 | case FTRACE_UPDATE_MAKE_CALL: | |
02a392a0 | 2705 | ftrace_bug_type = FTRACE_BUG_CALL; |
64fbcd16 | 2706 | return ftrace_make_call(rec, ftrace_addr); |
c88fd863 SR |
2707 | |
2708 | case FTRACE_UPDATE_MAKE_NOP: | |
02a392a0 | 2709 | ftrace_bug_type = FTRACE_BUG_NOP; |
39b5552c | 2710 | return ftrace_make_nop(NULL, rec, ftrace_old_addr); |
08f6fba5 | 2711 | |
08f6fba5 | 2712 | case FTRACE_UPDATE_MODIFY_CALL: |
02a392a0 | 2713 | ftrace_bug_type = FTRACE_BUG_UPDATE; |
08f6fba5 | 2714 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); |
5072c59f SR |
2715 | } |
2716 | ||
9efb85c5 | 2717 | return -1; /* unknown ftrace bug */ |
5072c59f SR |
2718 | } |
2719 | ||
a0572f68 | 2720 | void __weak ftrace_replace_code(int mod_flags) |
3c1720f0 | 2721 | { |
3c1720f0 SR |
2722 | struct dyn_ftrace *rec; |
2723 | struct ftrace_page *pg; | |
7375dca1 | 2724 | bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL; |
a0572f68 | 2725 | int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL; |
6a24a244 | 2726 | int failed; |
3c1720f0 | 2727 | |
45a4a237 SR |
2728 | if (unlikely(ftrace_disabled)) |
2729 | return; | |
2730 | ||
265c831c | 2731 | do_for_each_ftrace_rec(pg, rec) { |
546fece4 | 2732 | |
cf04f2d5 | 2733 | if (skip_record(rec)) |
546fece4 SRRH |
2734 | continue; |
2735 | ||
e4f5d544 | 2736 | failed = __ftrace_replace_code(rec, enable); |
fa9d13cf | 2737 | if (failed) { |
4fd3279b | 2738 | ftrace_bug(failed, rec); |
3279ba37 SR |
2739 | /* Stop processing */ |
2740 | return; | |
3c1720f0 | 2741 | } |
a0572f68 SRV |
2742 | if (schedulable) |
2743 | cond_resched(); | |
265c831c | 2744 | } while_for_each_ftrace_rec(); |
3c1720f0 SR |
2745 | } |
2746 | ||
c88fd863 SR |
2747 | struct ftrace_rec_iter { |
2748 | struct ftrace_page *pg; | |
2749 | int index; | |
2750 | }; | |
2751 | ||
2752 | /** | |
6130722f | 2753 | * ftrace_rec_iter_start - start up iterating over traced functions |
c88fd863 | 2754 | * |
d1530413 | 2755 | * Returns: an iterator handle that is used to iterate over all |
c88fd863 SR |
2756 | * the records that represent address locations where functions |
2757 | * are traced. | |
2758 | * | |
2759 | * May return NULL if no records are available. | |
2760 | */ | |
2761 | struct ftrace_rec_iter *ftrace_rec_iter_start(void) | |
2762 | { | |
2763 | /* | |
2764 | * We only use a single iterator. | |
2765 | * Protected by the ftrace_lock mutex. | |
2766 | */ | |
2767 | static struct ftrace_rec_iter ftrace_rec_iter; | |
2768 | struct ftrace_rec_iter *iter = &ftrace_rec_iter; | |
2769 | ||
2770 | iter->pg = ftrace_pages_start; | |
2771 | iter->index = 0; | |
2772 | ||
2773 | /* Could have empty pages */ | |
2774 | while (iter->pg && !iter->pg->index) | |
2775 | iter->pg = iter->pg->next; | |
2776 | ||
2777 | if (!iter->pg) | |
2778 | return NULL; | |
2779 | ||
2780 | return iter; | |
2781 | } | |
2782 | ||
2783 | /** | |
6130722f | 2784 | * ftrace_rec_iter_next - get the next record to process. |
c88fd863 SR |
2785 | * @iter: The handle to the iterator. |
2786 | * | |
d1530413 | 2787 | * Returns: the next iterator after the given iterator @iter. |
c88fd863 SR |
2788 | */ |
2789 | struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter) | |
2790 | { | |
2791 | iter->index++; | |
2792 | ||
2793 | if (iter->index >= iter->pg->index) { | |
2794 | iter->pg = iter->pg->next; | |
2795 | iter->index = 0; | |
2796 | ||
2797 | /* Could have empty pages */ | |
2798 | while (iter->pg && !iter->pg->index) | |
2799 | iter->pg = iter->pg->next; | |
2800 | } | |
2801 | ||
2802 | if (!iter->pg) | |
2803 | return NULL; | |
2804 | ||
2805 | return iter; | |
2806 | } | |
2807 | ||
2808 | /** | |
6130722f | 2809 | * ftrace_rec_iter_record - get the record at the iterator location |
c88fd863 SR |
2810 | * @iter: The current iterator location |
2811 | * | |
d1530413 | 2812 | * Returns: the record that the current @iter is at. |
c88fd863 SR |
2813 | */ |
2814 | struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) | |
2815 | { | |
2816 | return &iter->pg->records[iter->index]; | |
2817 | } | |
2818 | ||
492a7ea5 | 2819 | static int |
fbf6c73c | 2820 | ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec) |
3c1720f0 | 2821 | { |
593eb8a2 | 2822 | int ret; |
3c1720f0 | 2823 | |
45a4a237 SR |
2824 | if (unlikely(ftrace_disabled)) |
2825 | return 0; | |
2826 | ||
fbf6c73c | 2827 | ret = ftrace_init_nop(mod, rec); |
593eb8a2 | 2828 | if (ret) { |
02a392a0 | 2829 | ftrace_bug_type = FTRACE_BUG_INIT; |
4fd3279b | 2830 | ftrace_bug(ret, rec); |
492a7ea5 | 2831 | return 0; |
37ad5084 | 2832 | } |
492a7ea5 | 2833 | return 1; |
3c1720f0 SR |
2834 | } |
2835 | ||
000ab691 SR |
2836 | /* |
2837 | * archs can override this function if they must do something | |
2838 | * before the modifying code is performed. | |
2839 | */ | |
3a2bfec0 | 2840 | void __weak ftrace_arch_code_modify_prepare(void) |
000ab691 | 2841 | { |
000ab691 SR |
2842 | } |
2843 | ||
2844 | /* | |
2845 | * archs can override this function if they must do something | |
2846 | * after the modifying code is performed. | |
2847 | */ | |
3a2bfec0 | 2848 | void __weak ftrace_arch_code_modify_post_process(void) |
000ab691 | 2849 | { |
000ab691 SR |
2850 | } |
2851 | ||
bd604f3d SRG |
2852 | static int update_ftrace_func(ftrace_func_t func) |
2853 | { | |
2854 | static ftrace_func_t save_func; | |
2855 | ||
2856 | /* Avoid updating if it hasn't changed */ | |
2857 | if (func == save_func) | |
2858 | return 0; | |
2859 | ||
2860 | save_func = func; | |
2861 | ||
2862 | return ftrace_update_ftrace_func(func); | |
2863 | } | |
2864 | ||
8ed3e2cf | 2865 | void ftrace_modify_all_code(int command) |
3d083395 | 2866 | { |
59338f75 | 2867 | int update = command & FTRACE_UPDATE_TRACE_FUNC; |
a0572f68 | 2868 | int mod_flags = 0; |
cd21067f | 2869 | int err = 0; |
59338f75 | 2870 | |
a0572f68 SRV |
2871 | if (command & FTRACE_MAY_SLEEP) |
2872 | mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL; | |
2873 | ||
59338f75 SRRH |
2874 | /* |
2875 | * If the ftrace_caller calls a ftrace_ops func directly, | |
2876 | * we need to make sure that it only traces functions it | |
2877 | * expects to trace. When doing the switch of functions, | |
2878 | * we need to update to the ftrace_ops_list_func first | |
2879 | * before the transition between old and new calls are set, | |
2880 | * as the ftrace_ops_list_func will check the ops hashes | |
2881 | * to make sure the ops are having the right functions | |
2882 | * traced. | |
2883 | */ | |
cd21067f | 2884 | if (update) { |
bd604f3d | 2885 | err = update_ftrace_func(ftrace_ops_list_func); |
cd21067f PM |
2886 | if (FTRACE_WARN_ON(err)) |
2887 | return; | |
2888 | } | |
59338f75 | 2889 | |
8ed3e2cf | 2890 | if (command & FTRACE_UPDATE_CALLS) |
a0572f68 | 2891 | ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL); |
8ed3e2cf | 2892 | else if (command & FTRACE_DISABLE_CALLS) |
a0572f68 | 2893 | ftrace_replace_code(mod_flags); |
d61f82d0 | 2894 | |
405e1d83 SRRH |
2895 | if (update && ftrace_trace_function != ftrace_ops_list_func) { |
2896 | function_trace_op = set_function_trace_op; | |
2897 | smp_wmb(); | |
2898 | /* If irqs are disabled, we are in stop machine */ | |
2899 | if (!irqs_disabled()) | |
2900 | smp_call_function(ftrace_sync_ipi, NULL, 1); | |
bd604f3d | 2901 | err = update_ftrace_func(ftrace_trace_function); |
cd21067f PM |
2902 | if (FTRACE_WARN_ON(err)) |
2903 | return; | |
405e1d83 | 2904 | } |
d61f82d0 | 2905 | |
8ed3e2cf | 2906 | if (command & FTRACE_START_FUNC_RET) |
cd21067f | 2907 | err = ftrace_enable_ftrace_graph_caller(); |
8ed3e2cf | 2908 | else if (command & FTRACE_STOP_FUNC_RET) |
cd21067f PM |
2909 | err = ftrace_disable_ftrace_graph_caller(); |
2910 | FTRACE_WARN_ON(err); | |
8ed3e2cf SR |
2911 | } |
2912 | ||
2913 | static int __ftrace_modify_code(void *data) | |
2914 | { | |
2915 | int *command = data; | |
2916 | ||
2917 | ftrace_modify_all_code(*command); | |
5a45cfe1 | 2918 | |
d61f82d0 | 2919 | return 0; |
3d083395 SR |
2920 | } |
2921 | ||
c88fd863 | 2922 | /** |
6130722f | 2923 | * ftrace_run_stop_machine - go back to the stop machine method |
c88fd863 SR |
2924 | * @command: The command to tell ftrace what to do |
2925 | * | |
2926 | * If an arch needs to fall back to the stop machine method, the | |
2927 | * it can call this function. | |
2928 | */ | |
2929 | void ftrace_run_stop_machine(int command) | |
2930 | { | |
2931 | stop_machine(__ftrace_modify_code, &command, NULL); | |
2932 | } | |
2933 | ||
2934 | /** | |
6130722f | 2935 | * arch_ftrace_update_code - modify the code to trace or not trace |
c88fd863 SR |
2936 | * @command: The command that needs to be done |
2937 | * | |
2938 | * Archs can override this function if it does not need to | |
2939 | * run stop_machine() to modify code. | |
2940 | */ | |
2941 | void __weak arch_ftrace_update_code(int command) | |
2942 | { | |
2943 | ftrace_run_stop_machine(command); | |
2944 | } | |
2945 | ||
e309b41d | 2946 | static void ftrace_run_update_code(int command) |
3d083395 | 2947 | { |
3a2bfec0 | 2948 | ftrace_arch_code_modify_prepare(); |
000ab691 | 2949 | |
c88fd863 SR |
2950 | /* |
2951 | * By default we use stop_machine() to modify the code. | |
2952 | * But archs can do what ever they want as long as it | |
2953 | * is safe. The stop_machine() is the safest, but also | |
2954 | * produces the most overhead. | |
2955 | */ | |
2956 | arch_ftrace_update_code(command); | |
2957 | ||
3a2bfec0 | 2958 | ftrace_arch_code_modify_post_process(); |
3d083395 SR |
2959 | } |
2960 | ||
8252ecf3 | 2961 | static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, |
7485058e | 2962 | struct ftrace_ops_hash *old_hash) |
e1effa01 SRRH |
2963 | { |
2964 | ops->flags |= FTRACE_OPS_FL_MODIFYING; | |
7485058e SRRH |
2965 | ops->old_hash.filter_hash = old_hash->filter_hash; |
2966 | ops->old_hash.notrace_hash = old_hash->notrace_hash; | |
e1effa01 | 2967 | ftrace_run_update_code(command); |
8252ecf3 | 2968 | ops->old_hash.filter_hash = NULL; |
7485058e | 2969 | ops->old_hash.notrace_hash = NULL; |
e1effa01 SRRH |
2970 | ops->flags &= ~FTRACE_OPS_FL_MODIFYING; |
2971 | } | |
2972 | ||
d61f82d0 | 2973 | static ftrace_func_t saved_ftrace_func; |
60a7ecf4 | 2974 | static int ftrace_start_up; |
df4fc315 | 2975 | |
12cce594 SRRH |
2976 | void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops) |
2977 | { | |
2978 | } | |
2979 | ||
fc0ea795 AH |
2980 | /* List of trace_ops that have allocated trampolines */ |
2981 | static LIST_HEAD(ftrace_ops_trampoline_list); | |
2982 | ||
2983 | static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops) | |
2984 | { | |
2985 | lockdep_assert_held(&ftrace_lock); | |
2986 | list_add_rcu(&ops->list, &ftrace_ops_trampoline_list); | |
2987 | } | |
2988 | ||
2989 | static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops) | |
2990 | { | |
2991 | lockdep_assert_held(&ftrace_lock); | |
2992 | list_del_rcu(&ops->list); | |
478ece95 | 2993 | synchronize_rcu(); |
fc0ea795 AH |
2994 | } |
2995 | ||
2996 | /* | |
2997 | * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols | |
2998 | * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is | |
2999 | * not a module. | |
3000 | */ | |
3001 | #define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace" | |
3002 | #define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline" | |
3003 | ||
3004 | static void ftrace_trampoline_free(struct ftrace_ops *ops) | |
3005 | { | |
3006 | if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) && | |
dd9ddf46 | 3007 | ops->trampoline) { |
548e1f6c AH |
3008 | /* |
3009 | * Record the text poke event before the ksymbol unregister | |
3010 | * event. | |
3011 | */ | |
3012 | perf_event_text_poke((void *)ops->trampoline, | |
3013 | (void *)ops->trampoline, | |
3014 | ops->trampoline_size, NULL, 0); | |
dd9ddf46 AH |
3015 | perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, |
3016 | ops->trampoline, ops->trampoline_size, | |
3017 | true, FTRACE_TRAMPOLINE_SYM); | |
3018 | /* Remove from kallsyms after the perf events */ | |
fc0ea795 | 3019 | ftrace_remove_trampoline_from_kallsyms(ops); |
dd9ddf46 | 3020 | } |
fc0ea795 AH |
3021 | |
3022 | arch_ftrace_trampoline_free(ops); | |
3023 | } | |
3024 | ||
df4fc315 SR |
3025 | static void ftrace_startup_enable(int command) |
3026 | { | |
3027 | if (saved_ftrace_func != ftrace_trace_function) { | |
3028 | saved_ftrace_func = ftrace_trace_function; | |
3029 | command |= FTRACE_UPDATE_TRACE_FUNC; | |
3030 | } | |
3031 | ||
3032 | if (!command || !ftrace_enabled) | |
3033 | return; | |
3034 | ||
3035 | ftrace_run_update_code(command); | |
3036 | } | |
d61f82d0 | 3037 | |
e1effa01 SRRH |
3038 | static void ftrace_startup_all(int command) |
3039 | { | |
3040 | update_all_ops = true; | |
3041 | ftrace_startup_enable(command); | |
3042 | update_all_ops = false; | |
3043 | } | |
3044 | ||
3306fc4a | 3045 | int ftrace_startup(struct ftrace_ops *ops, int command) |
3d083395 | 3046 | { |
8a56d776 | 3047 | int ret; |
b848914c | 3048 | |
4eebcc81 | 3049 | if (unlikely(ftrace_disabled)) |
a1cd6173 | 3050 | return -ENODEV; |
4eebcc81 | 3051 | |
8a56d776 SRRH |
3052 | ret = __register_ftrace_function(ops); |
3053 | if (ret) | |
3054 | return ret; | |
3055 | ||
60a7ecf4 | 3056 | ftrace_start_up++; |
d61f82d0 | 3057 | |
e1effa01 SRRH |
3058 | /* |
3059 | * Note that ftrace probes uses this to start up | |
3060 | * and modify functions it will probe. But we still | |
3061 | * set the ADDING flag for modification, as probes | |
3062 | * do not have trampolines. If they add them in the | |
3063 | * future, then the probes will need to distinguish | |
3064 | * between adding and updating probes. | |
3065 | */ | |
3066 | ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING; | |
66209a5b | 3067 | |
f8b8be8a MH |
3068 | ret = ftrace_hash_ipmodify_enable(ops); |
3069 | if (ret < 0) { | |
3070 | /* Rollback registration process */ | |
3071 | __unregister_ftrace_function(ops); | |
3072 | ftrace_start_up--; | |
3073 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; | |
d5e47505 MB |
3074 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) |
3075 | ftrace_trampoline_free(ops); | |
f8b8be8a MH |
3076 | return ret; |
3077 | } | |
3078 | ||
07bbe083 | 3079 | if (ftrace_hash_rec_enable(ops)) |
7f50d06b | 3080 | command |= FTRACE_UPDATE_CALLS; |
ed926f9b | 3081 | |
df4fc315 | 3082 | ftrace_startup_enable(command); |
a1cd6173 | 3083 | |
c3b0f72e YJ |
3084 | /* |
3085 | * If ftrace is in an undefined state, we just remove ops from list | |
3086 | * to prevent the NULL pointer, instead of totally rolling it back and | |
3087 | * free trampoline, because those actions could cause further damage. | |
3088 | */ | |
3089 | if (unlikely(ftrace_disabled)) { | |
3090 | __unregister_ftrace_function(ops); | |
3091 | return -ENODEV; | |
3092 | } | |
3093 | ||
e1effa01 SRRH |
3094 | ops->flags &= ~FTRACE_OPS_FL_ADDING; |
3095 | ||
a1cd6173 | 3096 | return 0; |
3d083395 SR |
3097 | } |
3098 | ||
3306fc4a | 3099 | int ftrace_shutdown(struct ftrace_ops *ops, int command) |
3d083395 | 3100 | { |
8a56d776 | 3101 | int ret; |
b848914c | 3102 | |
4eebcc81 | 3103 | if (unlikely(ftrace_disabled)) |
8a56d776 SRRH |
3104 | return -ENODEV; |
3105 | ||
3106 | ret = __unregister_ftrace_function(ops); | |
3107 | if (ret) | |
3108 | return ret; | |
4eebcc81 | 3109 | |
60a7ecf4 | 3110 | ftrace_start_up--; |
9ea1a153 FW |
3111 | /* |
3112 | * Just warn in case of unbalance, no need to kill ftrace, it's not | |
3113 | * critical but the ftrace_call callers may be never nopped again after | |
3114 | * further ftrace uses. | |
3115 | */ | |
3116 | WARN_ON_ONCE(ftrace_start_up < 0); | |
3117 | ||
f8b8be8a MH |
3118 | /* Disabling ipmodify never fails */ |
3119 | ftrace_hash_ipmodify_disable(ops); | |
ed926f9b | 3120 | |
07bbe083 | 3121 | if (ftrace_hash_rec_disable(ops)) |
7f50d06b | 3122 | command |= FTRACE_UPDATE_CALLS; |
b848914c | 3123 | |
7f50d06b | 3124 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; |
3d083395 | 3125 | |
d61f82d0 SR |
3126 | if (saved_ftrace_func != ftrace_trace_function) { |
3127 | saved_ftrace_func = ftrace_trace_function; | |
3128 | command |= FTRACE_UPDATE_TRACE_FUNC; | |
3129 | } | |
3d083395 | 3130 | |
0e792b89 LH |
3131 | if (!command || !ftrace_enabled) |
3132 | goto out; | |
d61f82d0 | 3133 | |
79922b80 SRRH |
3134 | /* |
3135 | * If the ops uses a trampoline, then it needs to be | |
3136 | * tested first on update. | |
3137 | */ | |
e1effa01 | 3138 | ops->flags |= FTRACE_OPS_FL_REMOVING; |
79922b80 SRRH |
3139 | removed_ops = ops; |
3140 | ||
fef5aeee SRRH |
3141 | /* The trampoline logic checks the old hashes */ |
3142 | ops->old_hash.filter_hash = ops->func_hash->filter_hash; | |
3143 | ops->old_hash.notrace_hash = ops->func_hash->notrace_hash; | |
3144 | ||
d61f82d0 | 3145 | ftrace_run_update_code(command); |
a4c35ed2 | 3146 | |
84bde62c SRRH |
3147 | /* |
3148 | * If there's no more ops registered with ftrace, run a | |
3149 | * sanity check to make sure all rec flags are cleared. | |
3150 | */ | |
f86f4180 CZ |
3151 | if (rcu_dereference_protected(ftrace_ops_list, |
3152 | lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { | |
84bde62c SRRH |
3153 | struct ftrace_page *pg; |
3154 | struct dyn_ftrace *rec; | |
3155 | ||
3156 | do_for_each_ftrace_rec(pg, rec) { | |
e11b521a | 3157 | if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_NOCLEAR_FLAGS)) |
84bde62c SRRH |
3158 | pr_warn(" %pS flags:%lx\n", |
3159 | (void *)rec->ip, rec->flags); | |
3160 | } while_for_each_ftrace_rec(); | |
3161 | } | |
3162 | ||
fef5aeee SRRH |
3163 | ops->old_hash.filter_hash = NULL; |
3164 | ops->old_hash.notrace_hash = NULL; | |
3165 | ||
3166 | removed_ops = NULL; | |
e1effa01 | 3167 | ops->flags &= ~FTRACE_OPS_FL_REMOVING; |
79922b80 | 3168 | |
0e792b89 | 3169 | out: |
a4c35ed2 SRRH |
3170 | /* |
3171 | * Dynamic ops may be freed, we must make sure that all | |
3172 | * callers are done before leaving this function. | |
a4c35ed2 | 3173 | */ |
b3a88803 | 3174 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) { |
0598e4f0 SRV |
3175 | /* |
3176 | * We need to do a hard force of sched synchronization. | |
3177 | * This is because we use preempt_disable() to do RCU, but | |
3178 | * the function tracers can be called where RCU is not watching | |
3179 | * (like before user_exit()). We can not rely on the RCU | |
3180 | * infrastructure to do the synchronization, thus we must do it | |
3181 | * ourselves. | |
3182 | */ | |
e5a971d7 | 3183 | synchronize_rcu_tasks_rude(); |
a4c35ed2 | 3184 | |
0598e4f0 | 3185 | /* |
fdda88d3 | 3186 | * When the kernel is preemptive, tasks can be preempted |
0598e4f0 SRV |
3187 | * while on a ftrace trampoline. Just scheduling a task on |
3188 | * a CPU is not good enough to flush them. Calling | |
f2cc020d | 3189 | * synchronize_rcu_tasks() will wait for those tasks to |
0598e4f0 SRV |
3190 | * execute and either schedule voluntarily or enter user space. |
3191 | */ | |
64ec8b6a | 3192 | synchronize_rcu_tasks(); |
0598e4f0 | 3193 | |
fc0ea795 | 3194 | ftrace_trampoline_free(ops); |
a4c35ed2 SRRH |
3195 | } |
3196 | ||
8a56d776 | 3197 | return 0; |
3d083395 SR |
3198 | } |
3199 | ||
5fccc755 SRG |
3200 | /* Simply make a copy of @src and return it */ |
3201 | static struct ftrace_hash *copy_hash(struct ftrace_hash *src) | |
3202 | { | |
3203 | if (ftrace_hash_empty(src)) | |
3204 | return EMPTY_HASH; | |
3205 | ||
3206 | return alloc_and_copy_ftrace_hash(src->size_bits, src); | |
3207 | } | |
3208 | ||
3209 | /* | |
3210 | * Append @new_hash entries to @hash: | |
3211 | * | |
3212 | * If @hash is the EMPTY_HASH then it traces all functions and nothing | |
3213 | * needs to be done. | |
3214 | * | |
3215 | * If @new_hash is the EMPTY_HASH, then make *hash the EMPTY_HASH so | |
3216 | * that it traces everything. | |
3217 | * | |
3218 | * Otherwise, go through all of @new_hash and add anything that @hash | |
3219 | * doesn't already have, to @hash. | |
3220 | * | |
3221 | * The filter_hash updates uses just the append_hash() function | |
3222 | * and the notrace_hash does not. | |
3223 | */ | |
38b14061 SR |
3224 | static int append_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash, |
3225 | int size_bits) | |
5fccc755 SRG |
3226 | { |
3227 | struct ftrace_func_entry *entry; | |
3228 | int size; | |
3229 | int i; | |
3230 | ||
38b14061 SR |
3231 | if (*hash) { |
3232 | /* An empty hash does everything */ | |
3233 | if (ftrace_hash_empty(*hash)) | |
3234 | return 0; | |
3235 | } else { | |
3236 | *hash = alloc_ftrace_hash(size_bits); | |
3237 | if (!*hash) | |
3238 | return -ENOMEM; | |
3239 | } | |
5fccc755 SRG |
3240 | |
3241 | /* If new_hash has everything make hash have everything */ | |
3242 | if (ftrace_hash_empty(new_hash)) { | |
3243 | free_ftrace_hash(*hash); | |
3244 | *hash = EMPTY_HASH; | |
3245 | return 0; | |
3246 | } | |
3247 | ||
3248 | size = 1 << new_hash->size_bits; | |
3249 | for (i = 0; i < size; i++) { | |
3250 | hlist_for_each_entry(entry, &new_hash->buckets[i], hlist) { | |
3251 | /* Only add if not already in hash */ | |
3252 | if (!__ftrace_lookup_ip(*hash, entry->ip) && | |
3253 | add_hash_entry(*hash, entry->ip) == NULL) | |
3254 | return -ENOMEM; | |
3255 | } | |
3256 | } | |
3257 | return 0; | |
3258 | } | |
3259 | ||
0ae6b8ce SR |
3260 | /* |
3261 | * Remove functions from @hash that are in @notrace_hash | |
3262 | */ | |
3263 | static void remove_hash(struct ftrace_hash *hash, struct ftrace_hash *notrace_hash) | |
3264 | { | |
3265 | struct ftrace_func_entry *entry; | |
3266 | struct hlist_node *tmp; | |
3267 | int size; | |
3268 | int i; | |
3269 | ||
3270 | /* If the notrace hash is empty, there's nothing to do */ | |
3271 | if (ftrace_hash_empty(notrace_hash)) | |
3272 | return; | |
3273 | ||
3274 | size = 1 << hash->size_bits; | |
3275 | for (i = 0; i < size; i++) { | |
3276 | hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) { | |
3277 | if (!__ftrace_lookup_ip(notrace_hash, entry->ip)) | |
3278 | continue; | |
3279 | remove_hash_entry(hash, entry); | |
3280 | kfree(entry); | |
3281 | } | |
3282 | } | |
3283 | } | |
3284 | ||
5fccc755 SRG |
3285 | /* |
3286 | * Add to @hash only those that are in both @new_hash1 and @new_hash2 | |
3287 | * | |
3288 | * The notrace_hash updates uses just the intersect_hash() function | |
3289 | * and the filter_hash does not. | |
3290 | */ | |
3291 | static int intersect_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash1, | |
3292 | struct ftrace_hash *new_hash2) | |
3293 | { | |
3294 | struct ftrace_func_entry *entry; | |
3295 | int size; | |
3296 | int i; | |
3297 | ||
3298 | /* | |
3299 | * If new_hash1 or new_hash2 is the EMPTY_HASH then make the hash | |
3300 | * empty as well as empty for notrace means none are notraced. | |
3301 | */ | |
3302 | if (ftrace_hash_empty(new_hash1) || ftrace_hash_empty(new_hash2)) { | |
3303 | free_ftrace_hash(*hash); | |
3304 | *hash = EMPTY_HASH; | |
3305 | return 0; | |
3306 | } | |
3307 | ||
3308 | size = 1 << new_hash1->size_bits; | |
3309 | for (i = 0; i < size; i++) { | |
3310 | hlist_for_each_entry(entry, &new_hash1->buckets[i], hlist) { | |
3311 | /* Only add if in both @new_hash1 and @new_hash2 */ | |
3312 | if (__ftrace_lookup_ip(new_hash2, entry->ip) && | |
3313 | add_hash_entry(*hash, entry->ip) == NULL) | |
3314 | return -ENOMEM; | |
3315 | } | |
3316 | } | |
3317 | /* If nothing intersects, make it the empty set */ | |
3318 | if (ftrace_hash_empty(*hash)) { | |
3319 | free_ftrace_hash(*hash); | |
3320 | *hash = EMPTY_HASH; | |
3321 | } | |
3322 | return 0; | |
3323 | } | |
3324 | ||
5fccc755 SRG |
3325 | static bool ops_equal(struct ftrace_hash *A, struct ftrace_hash *B) |
3326 | { | |
3327 | struct ftrace_func_entry *entry; | |
3328 | int size; | |
3329 | int i; | |
3330 | ||
3331 | if (ftrace_hash_empty(A)) | |
3332 | return ftrace_hash_empty(B); | |
3333 | ||
3334 | if (ftrace_hash_empty(B)) | |
3335 | return ftrace_hash_empty(A); | |
3336 | ||
3337 | if (A->count != B->count) | |
3338 | return false; | |
3339 | ||
3340 | size = 1 << A->size_bits; | |
3341 | for (i = 0; i < size; i++) { | |
3342 | hlist_for_each_entry(entry, &A->buckets[i], hlist) { | |
3343 | if (!__ftrace_lookup_ip(B, entry->ip)) | |
3344 | return false; | |
3345 | } | |
3346 | } | |
3347 | ||
3348 | return true; | |
3349 | } | |
3350 | ||
d9bbfbd1 SRG |
3351 | static void ftrace_ops_update_code(struct ftrace_ops *ops, |
3352 | struct ftrace_ops_hash *old_hash); | |
3353 | ||
3354 | static int __ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, | |
3355 | struct ftrace_hash **orig_hash, | |
3356 | struct ftrace_hash *hash, | |
3357 | int enable) | |
3358 | { | |
3359 | struct ftrace_ops_hash old_hash_ops; | |
3360 | struct ftrace_hash *old_hash; | |
3361 | int ret; | |
3362 | ||
3363 | old_hash = *orig_hash; | |
3364 | old_hash_ops.filter_hash = ops->func_hash->filter_hash; | |
3365 | old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; | |
3366 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); | |
3367 | if (!ret) { | |
3368 | ftrace_ops_update_code(ops, &old_hash_ops); | |
3369 | free_ftrace_hash_rcu(old_hash); | |
3370 | } | |
3371 | return ret; | |
3372 | } | |
5fccc755 SRG |
3373 | |
3374 | static int ftrace_update_ops(struct ftrace_ops *ops, struct ftrace_hash *filter_hash, | |
3375 | struct ftrace_hash *notrace_hash) | |
3376 | { | |
3377 | int ret; | |
3378 | ||
3379 | if (!ops_equal(filter_hash, ops->func_hash->filter_hash)) { | |
d9bbfbd1 SRG |
3380 | ret = __ftrace_hash_move_and_update_ops(ops, &ops->func_hash->filter_hash, |
3381 | filter_hash, 1); | |
5fccc755 SRG |
3382 | if (ret < 0) |
3383 | return ret; | |
3384 | } | |
3385 | ||
3386 | if (!ops_equal(notrace_hash, ops->func_hash->notrace_hash)) { | |
d9bbfbd1 SRG |
3387 | ret = __ftrace_hash_move_and_update_ops(ops, &ops->func_hash->notrace_hash, |
3388 | notrace_hash, 0); | |
5fccc755 SRG |
3389 | if (ret < 0) |
3390 | return ret; | |
3391 | } | |
3392 | ||
3393 | return 0; | |
3394 | } | |
3395 | ||
0ae6b8ce SR |
3396 | static int add_first_hash(struct ftrace_hash **filter_hash, struct ftrace_hash **notrace_hash, |
3397 | struct ftrace_ops_hash *func_hash) | |
3398 | { | |
3399 | /* If the filter hash is not empty, simply remove the nohash from it */ | |
3400 | if (!ftrace_hash_empty(func_hash->filter_hash)) { | |
3401 | *filter_hash = copy_hash(func_hash->filter_hash); | |
3402 | if (!*filter_hash) | |
3403 | return -ENOMEM; | |
3404 | remove_hash(*filter_hash, func_hash->notrace_hash); | |
3405 | *notrace_hash = EMPTY_HASH; | |
3406 | ||
3407 | } else { | |
3408 | *notrace_hash = copy_hash(func_hash->notrace_hash); | |
3409 | if (!*notrace_hash) | |
3410 | return -ENOMEM; | |
3411 | *filter_hash = EMPTY_HASH; | |
3412 | } | |
3413 | return 0; | |
3414 | } | |
3415 | ||
3416 | static int add_next_hash(struct ftrace_hash **filter_hash, struct ftrace_hash **notrace_hash, | |
3417 | struct ftrace_ops_hash *ops_hash, struct ftrace_ops_hash *subops_hash) | |
3418 | { | |
3419 | int size_bits; | |
3420 | int ret; | |
3421 | ||
3422 | /* If the subops trace all functions so must the main ops */ | |
3423 | if (ftrace_hash_empty(ops_hash->filter_hash) || | |
3424 | ftrace_hash_empty(subops_hash->filter_hash)) { | |
3425 | *filter_hash = EMPTY_HASH; | |
3426 | } else { | |
3427 | /* | |
3428 | * The main ops filter hash is not empty, so its | |
3429 | * notrace_hash had better be, as the notrace hash | |
3430 | * is only used for empty main filter hashes. | |
3431 | */ | |
3432 | WARN_ON_ONCE(!ftrace_hash_empty(ops_hash->notrace_hash)); | |
3433 | ||
3434 | size_bits = max(ops_hash->filter_hash->size_bits, | |
3435 | subops_hash->filter_hash->size_bits); | |
3436 | ||
3437 | /* Copy the subops hash */ | |
3438 | *filter_hash = alloc_and_copy_ftrace_hash(size_bits, subops_hash->filter_hash); | |
3c1d9cfa | 3439 | if (!*filter_hash) |
0ae6b8ce SR |
3440 | return -ENOMEM; |
3441 | /* Remove any notrace functions from the copy */ | |
3442 | remove_hash(*filter_hash, subops_hash->notrace_hash); | |
3443 | ||
3444 | ret = append_hash(filter_hash, ops_hash->filter_hash, | |
3445 | size_bits); | |
3446 | if (ret < 0) { | |
3447 | free_ftrace_hash(*filter_hash); | |
08275e59 | 3448 | *filter_hash = EMPTY_HASH; |
0ae6b8ce SR |
3449 | return ret; |
3450 | } | |
3451 | } | |
3452 | ||
3453 | /* | |
3454 | * Only process notrace hashes if the main filter hash is empty | |
3455 | * (tracing all functions), otherwise the filter hash will just | |
3456 | * remove the notrace hash functions, and the notrace hash is | |
3457 | * not needed. | |
3458 | */ | |
3459 | if (ftrace_hash_empty(*filter_hash)) { | |
3460 | /* | |
3461 | * Intersect the notrace functions. That is, if two | |
3462 | * subops are not tracing a set of functions, the | |
3463 | * main ops will only not trace the functions that are | |
3464 | * in both subops, but has to trace the functions that | |
3465 | * are only notrace in one of the subops, for the other | |
3466 | * subops to be able to trace them. | |
3467 | */ | |
3468 | size_bits = max(ops_hash->notrace_hash->size_bits, | |
3469 | subops_hash->notrace_hash->size_bits); | |
3470 | *notrace_hash = alloc_ftrace_hash(size_bits); | |
3471 | if (!*notrace_hash) | |
3472 | return -ENOMEM; | |
3473 | ||
3474 | ret = intersect_hash(notrace_hash, ops_hash->notrace_hash, | |
3475 | subops_hash->notrace_hash); | |
3476 | if (ret < 0) { | |
3477 | free_ftrace_hash(*notrace_hash); | |
08275e59 | 3478 | *notrace_hash = EMPTY_HASH; |
0ae6b8ce SR |
3479 | return ret; |
3480 | } | |
3481 | } | |
3482 | return 0; | |
3483 | } | |
3484 | ||
5fccc755 SRG |
3485 | /** |
3486 | * ftrace_startup_subops - enable tracing for subops of an ops | |
3487 | * @ops: Manager ops (used to pick all the functions of its subops) | |
3488 | * @subops: A new ops to add to @ops | |
3489 | * @command: Extra commands to use to enable tracing | |
3490 | * | |
3491 | * The @ops is a manager @ops that has the filter that includes all the functions | |
3492 | * that its list of subops are tracing. Adding a new @subops will add the | |
3493 | * functions of @subops to @ops. | |
3494 | */ | |
3495 | int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command) | |
3496 | { | |
31d11399 SR |
3497 | struct ftrace_hash *filter_hash = EMPTY_HASH; |
3498 | struct ftrace_hash *notrace_hash = EMPTY_HASH; | |
5fccc755 SRG |
3499 | struct ftrace_hash *save_filter_hash; |
3500 | struct ftrace_hash *save_notrace_hash; | |
5fccc755 SRG |
3501 | int ret; |
3502 | ||
3503 | if (unlikely(ftrace_disabled)) | |
3504 | return -ENODEV; | |
3505 | ||
3506 | ftrace_ops_init(ops); | |
3507 | ftrace_ops_init(subops); | |
3508 | ||
3509 | if (WARN_ON_ONCE(subops->flags & FTRACE_OPS_FL_ENABLED)) | |
3510 | return -EBUSY; | |
3511 | ||
3512 | /* Make everything canonical (Just in case!) */ | |
3513 | if (!ops->func_hash->filter_hash) | |
3514 | ops->func_hash->filter_hash = EMPTY_HASH; | |
3515 | if (!ops->func_hash->notrace_hash) | |
3516 | ops->func_hash->notrace_hash = EMPTY_HASH; | |
3517 | if (!subops->func_hash->filter_hash) | |
3518 | subops->func_hash->filter_hash = EMPTY_HASH; | |
3519 | if (!subops->func_hash->notrace_hash) | |
3520 | subops->func_hash->notrace_hash = EMPTY_HASH; | |
3521 | ||
3522 | /* For the first subops to ops just enable it normally */ | |
3523 | if (list_empty(&ops->subop_list)) { | |
0ae6b8ce SR |
3524 | |
3525 | /* The ops was empty, should have empty hashes */ | |
3526 | WARN_ON_ONCE(!ftrace_hash_empty(ops->func_hash->filter_hash)); | |
3527 | WARN_ON_ONCE(!ftrace_hash_empty(ops->func_hash->notrace_hash)); | |
3528 | ||
3529 | ret = add_first_hash(&filter_hash, ¬race_hash, subops->func_hash); | |
3530 | if (ret < 0) | |
3531 | return ret; | |
5fccc755 SRG |
3532 | |
3533 | save_filter_hash = ops->func_hash->filter_hash; | |
3534 | save_notrace_hash = ops->func_hash->notrace_hash; | |
3535 | ||
3536 | ops->func_hash->filter_hash = filter_hash; | |
3537 | ops->func_hash->notrace_hash = notrace_hash; | |
3538 | list_add(&subops->list, &ops->subop_list); | |
3539 | ret = ftrace_startup(ops, command); | |
3540 | if (ret < 0) { | |
3541 | list_del(&subops->list); | |
3542 | ops->func_hash->filter_hash = save_filter_hash; | |
3543 | ops->func_hash->notrace_hash = save_notrace_hash; | |
3544 | free_ftrace_hash(filter_hash); | |
3545 | free_ftrace_hash(notrace_hash); | |
3546 | } else { | |
3547 | free_ftrace_hash(save_filter_hash); | |
3548 | free_ftrace_hash(save_notrace_hash); | |
d9bbfbd1 SRG |
3549 | subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP; |
3550 | subops->managed = ops; | |
5fccc755 SRG |
3551 | } |
3552 | return ret; | |
3553 | } | |
3554 | ||
3555 | /* | |
3556 | * Here there's already something attached. Here are the rules: | |
0ae6b8ce SR |
3557 | * If the new subops and main ops filter hashes are not empty: |
3558 | * o Make a copy of the subops filter hash | |
3559 | * o Remove all functions in the nohash from it. | |
3560 | * o Add in the main hash filter functions | |
3561 | * o Remove any of these functions from the main notrace hash | |
5fccc755 | 3562 | */ |
5fccc755 | 3563 | |
0ae6b8ce SR |
3564 | ret = add_next_hash(&filter_hash, ¬race_hash, ops->func_hash, subops->func_hash); |
3565 | if (ret < 0) | |
3566 | return ret; | |
5fccc755 SRG |
3567 | |
3568 | list_add(&subops->list, &ops->subop_list); | |
3569 | ||
3570 | ret = ftrace_update_ops(ops, filter_hash, notrace_hash); | |
3571 | free_ftrace_hash(filter_hash); | |
3572 | free_ftrace_hash(notrace_hash); | |
d9bbfbd1 | 3573 | if (ret < 0) { |
5fccc755 | 3574 | list_del(&subops->list); |
d9bbfbd1 SRG |
3575 | } else { |
3576 | subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP; | |
3577 | subops->managed = ops; | |
3578 | } | |
5fccc755 SRG |
3579 | return ret; |
3580 | } | |
3581 | ||
0ae6b8ce SR |
3582 | static int rebuild_hashes(struct ftrace_hash **filter_hash, struct ftrace_hash **notrace_hash, |
3583 | struct ftrace_ops *ops) | |
3584 | { | |
3585 | struct ftrace_ops_hash temp_hash; | |
3586 | struct ftrace_ops *subops; | |
3587 | bool first = true; | |
3588 | int ret; | |
3589 | ||
3590 | temp_hash.filter_hash = EMPTY_HASH; | |
3591 | temp_hash.notrace_hash = EMPTY_HASH; | |
3592 | ||
3593 | list_for_each_entry(subops, &ops->subop_list, list) { | |
3594 | *filter_hash = EMPTY_HASH; | |
3595 | *notrace_hash = EMPTY_HASH; | |
3596 | ||
3597 | if (first) { | |
3598 | ret = add_first_hash(filter_hash, notrace_hash, subops->func_hash); | |
3599 | if (ret < 0) | |
3600 | return ret; | |
3601 | first = false; | |
3602 | } else { | |
3603 | ret = add_next_hash(filter_hash, notrace_hash, | |
3604 | &temp_hash, subops->func_hash); | |
3605 | if (ret < 0) { | |
3606 | free_ftrace_hash(temp_hash.filter_hash); | |
3607 | free_ftrace_hash(temp_hash.notrace_hash); | |
3608 | return ret; | |
3609 | } | |
3610 | } | |
3611 | ||
c45c585d SR |
3612 | free_ftrace_hash(temp_hash.filter_hash); |
3613 | free_ftrace_hash(temp_hash.notrace_hash); | |
3614 | ||
0ae6b8ce SR |
3615 | temp_hash.filter_hash = *filter_hash; |
3616 | temp_hash.notrace_hash = *notrace_hash; | |
3617 | } | |
3618 | return 0; | |
3619 | } | |
3620 | ||
5fccc755 SRG |
3621 | /** |
3622 | * ftrace_shutdown_subops - Remove a subops from a manager ops | |
3623 | * @ops: A manager ops to remove @subops from | |
3624 | * @subops: The subops to remove from @ops | |
3625 | * @command: Any extra command flags to add to modifying the text | |
3626 | * | |
3627 | * Removes the functions being traced by the @subops from @ops. Note, it | |
3628 | * will not affect functions that are being traced by other subops that | |
3629 | * still exist in @ops. | |
3630 | * | |
3631 | * If the last subops is removed from @ops, then @ops is shutdown normally. | |
3632 | */ | |
3633 | int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command) | |
3634 | { | |
31d11399 SR |
3635 | struct ftrace_hash *filter_hash = EMPTY_HASH; |
3636 | struct ftrace_hash *notrace_hash = EMPTY_HASH; | |
5fccc755 SRG |
3637 | int ret; |
3638 | ||
3639 | if (unlikely(ftrace_disabled)) | |
3640 | return -ENODEV; | |
3641 | ||
3642 | if (WARN_ON_ONCE(!(subops->flags & FTRACE_OPS_FL_ENABLED))) | |
3643 | return -EINVAL; | |
3644 | ||
3645 | list_del(&subops->list); | |
3646 | ||
3647 | if (list_empty(&ops->subop_list)) { | |
3648 | /* Last one, just disable the current ops */ | |
3649 | ||
3650 | ret = ftrace_shutdown(ops, command); | |
3651 | if (ret < 0) { | |
3652 | list_add(&subops->list, &ops->subop_list); | |
3653 | return ret; | |
3654 | } | |
3655 | ||
3656 | subops->flags &= ~FTRACE_OPS_FL_ENABLED; | |
3657 | ||
3658 | free_ftrace_hash(ops->func_hash->filter_hash); | |
3659 | free_ftrace_hash(ops->func_hash->notrace_hash); | |
3660 | ops->func_hash->filter_hash = EMPTY_HASH; | |
3661 | ops->func_hash->notrace_hash = EMPTY_HASH; | |
d9bbfbd1 SRG |
3662 | subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP); |
3663 | subops->managed = NULL; | |
5fccc755 SRG |
3664 | |
3665 | return 0; | |
3666 | } | |
3667 | ||
3668 | /* Rebuild the hashes without subops */ | |
0ae6b8ce SR |
3669 | ret = rebuild_hashes(&filter_hash, ¬race_hash, ops); |
3670 | if (ret < 0) | |
3671 | return ret; | |
5fccc755 SRG |
3672 | |
3673 | ret = ftrace_update_ops(ops, filter_hash, notrace_hash); | |
d9bbfbd1 | 3674 | if (ret < 0) { |
5fccc755 | 3675 | list_add(&subops->list, &ops->subop_list); |
d9bbfbd1 SRG |
3676 | } else { |
3677 | subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP); | |
3678 | subops->managed = NULL; | |
3679 | } | |
5fccc755 SRG |
3680 | free_ftrace_hash(filter_hash); |
3681 | free_ftrace_hash(notrace_hash); | |
3682 | return ret; | |
3683 | } | |
3684 | ||
d9bbfbd1 SRG |
3685 | static int ftrace_hash_move_and_update_subops(struct ftrace_ops *subops, |
3686 | struct ftrace_hash **orig_subhash, | |
0ae6b8ce | 3687 | struct ftrace_hash *hash) |
d9bbfbd1 SRG |
3688 | { |
3689 | struct ftrace_ops *ops = subops->managed; | |
0ae6b8ce SR |
3690 | struct ftrace_hash *notrace_hash; |
3691 | struct ftrace_hash *filter_hash; | |
d9bbfbd1 SRG |
3692 | struct ftrace_hash *save_hash; |
3693 | struct ftrace_hash *new_hash; | |
3694 | int ret; | |
3695 | ||
3696 | /* Manager ops can not be subops (yet) */ | |
3697 | if (WARN_ON_ONCE(!ops || ops->flags & FTRACE_OPS_FL_SUBOP)) | |
3698 | return -EINVAL; | |
3699 | ||
3700 | /* Move the new hash over to the subops hash */ | |
3701 | save_hash = *orig_subhash; | |
3702 | *orig_subhash = __ftrace_hash_move(hash); | |
3703 | if (!*orig_subhash) { | |
3704 | *orig_subhash = save_hash; | |
3705 | return -ENOMEM; | |
3706 | } | |
3707 | ||
0ae6b8ce | 3708 | ret = rebuild_hashes(&filter_hash, ¬race_hash, ops); |
c45c585d | 3709 | if (!ret) { |
0ae6b8ce | 3710 | ret = ftrace_update_ops(ops, filter_hash, notrace_hash); |
c45c585d SR |
3711 | free_ftrace_hash(filter_hash); |
3712 | free_ftrace_hash(notrace_hash); | |
3713 | } | |
d9bbfbd1 SRG |
3714 | |
3715 | if (ret) { | |
3716 | /* Put back the original hash */ | |
0ae6b8ce | 3717 | new_hash = *orig_subhash; |
d9bbfbd1 | 3718 | *orig_subhash = save_hash; |
0ae6b8ce | 3719 | free_ftrace_hash_rcu(new_hash); |
d9bbfbd1 SRG |
3720 | } else { |
3721 | free_ftrace_hash_rcu(save_hash); | |
3722 | } | |
3723 | return ret; | |
3724 | } | |
3725 | ||
3726 | ||
36a367b8 SR |
3727 | u64 ftrace_update_time; |
3728 | u64 ftrace_total_mod_time; | |
3d083395 | 3729 | unsigned long ftrace_update_tot_cnt; |
da537f0a SRV |
3730 | unsigned long ftrace_number_of_pages; |
3731 | unsigned long ftrace_number_of_groups; | |
3d083395 | 3732 | |
8c4f3c3f | 3733 | static inline int ops_traces_mod(struct ftrace_ops *ops) |
f7bc8b61 | 3734 | { |
8c4f3c3f SRRH |
3735 | /* |
3736 | * Filter_hash being empty will default to trace module. | |
3737 | * But notrace hash requires a test of individual module functions. | |
3738 | */ | |
33b7f99c SRRH |
3739 | return ftrace_hash_empty(ops->func_hash->filter_hash) && |
3740 | ftrace_hash_empty(ops->func_hash->notrace_hash); | |
8c4f3c3f SRRH |
3741 | } |
3742 | ||
1dc43cf0 | 3743 | static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) |
3d083395 | 3744 | { |
67ccddf8 | 3745 | bool init_nop = ftrace_need_init_nop(); |
85ae32ae | 3746 | struct ftrace_page *pg; |
e94142a6 | 3747 | struct dyn_ftrace *p; |
36a367b8 | 3748 | u64 start, stop, update_time; |
1dc43cf0 | 3749 | unsigned long update_cnt = 0; |
b7ffffbb | 3750 | unsigned long rec_flags = 0; |
85ae32ae | 3751 | int i; |
f7bc8b61 | 3752 | |
b7ffffbb SRRH |
3753 | start = ftrace_now(raw_smp_processor_id()); |
3754 | ||
f7bc8b61 | 3755 | /* |
b7ffffbb SRRH |
3756 | * When a module is loaded, this function is called to convert |
3757 | * the calls to mcount in its text to nops, and also to create | |
3758 | * an entry in the ftrace data. Now, if ftrace is activated | |
3759 | * after this call, but before the module sets its text to | |
3760 | * read-only, the modification of enabling ftrace can fail if | |
3761 | * the read-only is done while ftrace is converting the calls. | |
3762 | * To prevent this, the module's records are set as disabled | |
3763 | * and will be enabled after the call to set the module's text | |
3764 | * to read-only. | |
f7bc8b61 | 3765 | */ |
b7ffffbb SRRH |
3766 | if (mod) |
3767 | rec_flags |= FTRACE_FL_DISABLED; | |
3d083395 | 3768 | |
1dc43cf0 | 3769 | for (pg = new_pgs; pg; pg = pg->next) { |
3d083395 | 3770 | |
85ae32ae | 3771 | for (i = 0; i < pg->index; i++) { |
8c4f3c3f | 3772 | |
85ae32ae SR |
3773 | /* If something went wrong, bail without enabling anything */ |
3774 | if (unlikely(ftrace_disabled)) | |
3775 | return -1; | |
f22f9a89 | 3776 | |
85ae32ae | 3777 | p = &pg->records[i]; |
b7ffffbb | 3778 | p->flags = rec_flags; |
f22f9a89 | 3779 | |
85ae32ae SR |
3780 | /* |
3781 | * Do the initial record conversion from mcount jump | |
3782 | * to the NOP instructions. | |
3783 | */ | |
67ccddf8 | 3784 | if (init_nop && !ftrace_nop_initialize(mod, p)) |
85ae32ae | 3785 | break; |
5cb084bb | 3786 | |
1dc43cf0 | 3787 | update_cnt++; |
5cb084bb | 3788 | } |
3d083395 SR |
3789 | } |
3790 | ||
750ed1a4 | 3791 | stop = ftrace_now(raw_smp_processor_id()); |
36a367b8 SR |
3792 | update_time = stop - start; |
3793 | if (mod) | |
3794 | ftrace_total_mod_time += update_time; | |
3795 | else | |
3796 | ftrace_update_time = update_time; | |
1dc43cf0 | 3797 | ftrace_update_tot_cnt += update_cnt; |
3d083395 | 3798 | |
16444a8a ACM |
3799 | return 0; |
3800 | } | |
3801 | ||
a7900875 | 3802 | static int ftrace_allocate_records(struct ftrace_page *pg, int count) |
3c1720f0 | 3803 | { |
a7900875 | 3804 | int order; |
7ba031e8 | 3805 | int pages; |
3c1720f0 | 3806 | int cnt; |
3c1720f0 | 3807 | |
a7900875 SR |
3808 | if (WARN_ON(!count)) |
3809 | return -EINVAL; | |
3810 | ||
ceaaa129 | 3811 | /* We want to fill as much as possible, with no empty pages */ |
b40c6eab | 3812 | pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE); |
ceaaa129 | 3813 | order = fls(pages) - 1; |
3c1720f0 | 3814 | |
a7900875 SR |
3815 | again: |
3816 | pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | |
3c1720f0 | 3817 | |
a7900875 SR |
3818 | if (!pg->records) { |
3819 | /* if we can't allocate this size, try something smaller */ | |
3820 | if (!order) | |
3821 | return -ENOMEM; | |
bcea02b0 | 3822 | order--; |
a7900875 SR |
3823 | goto again; |
3824 | } | |
3c1720f0 | 3825 | |
da537f0a SRV |
3826 | ftrace_number_of_pages += 1 << order; |
3827 | ftrace_number_of_groups++; | |
3828 | ||
a7900875 | 3829 | cnt = (PAGE_SIZE << order) / ENTRY_SIZE; |
db42523b | 3830 | pg->order = order; |
3c1720f0 | 3831 | |
a7900875 SR |
3832 | if (cnt > count) |
3833 | cnt = count; | |
3834 | ||
3835 | return cnt; | |
3836 | } | |
3837 | ||
26efd79c ZY |
3838 | static void ftrace_free_pages(struct ftrace_page *pages) |
3839 | { | |
3840 | struct ftrace_page *pg = pages; | |
3841 | ||
3842 | while (pg) { | |
3843 | if (pg->records) { | |
3844 | free_pages((unsigned long)pg->records, pg->order); | |
3845 | ftrace_number_of_pages -= 1 << pg->order; | |
3846 | } | |
3847 | pages = pg->next; | |
3848 | kfree(pg); | |
3849 | pg = pages; | |
3850 | ftrace_number_of_groups--; | |
3851 | } | |
3852 | } | |
3853 | ||
a7900875 SR |
3854 | static struct ftrace_page * |
3855 | ftrace_allocate_pages(unsigned long num_to_init) | |
3856 | { | |
3857 | struct ftrace_page *start_pg; | |
3858 | struct ftrace_page *pg; | |
a7900875 SR |
3859 | int cnt; |
3860 | ||
3861 | if (!num_to_init) | |
9efb85c5 | 3862 | return NULL; |
a7900875 SR |
3863 | |
3864 | start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); | |
3865 | if (!pg) | |
3866 | return NULL; | |
3867 | ||
3868 | /* | |
3869 | * Try to allocate as much as possible in one continues | |
3870 | * location that fills in all of the space. We want to | |
3871 | * waste as little space as possible. | |
3872 | */ | |
3873 | for (;;) { | |
3874 | cnt = ftrace_allocate_records(pg, num_to_init); | |
3875 | if (cnt < 0) | |
3876 | goto free_pages; | |
3877 | ||
3878 | num_to_init -= cnt; | |
3879 | if (!num_to_init) | |
3c1720f0 SR |
3880 | break; |
3881 | ||
a7900875 SR |
3882 | pg->next = kzalloc(sizeof(*pg), GFP_KERNEL); |
3883 | if (!pg->next) | |
3884 | goto free_pages; | |
3885 | ||
3c1720f0 SR |
3886 | pg = pg->next; |
3887 | } | |
3888 | ||
a7900875 SR |
3889 | return start_pg; |
3890 | ||
3891 | free_pages: | |
26efd79c | 3892 | ftrace_free_pages(start_pg); |
a7900875 SR |
3893 | pr_info("ftrace: FAILED to allocate memory for functions\n"); |
3894 | return NULL; | |
3895 | } | |
3896 | ||
5072c59f SR |
3897 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
3898 | ||
3899 | struct ftrace_iterator { | |
98c4fd04 | 3900 | loff_t pos; |
4aeb6967 | 3901 | loff_t func_pos; |
5985ea8b | 3902 | loff_t mod_pos; |
4aeb6967 SR |
3903 | struct ftrace_page *pg; |
3904 | struct dyn_ftrace *func; | |
3905 | struct ftrace_func_probe *probe; | |
eee8ded1 | 3906 | struct ftrace_func_entry *probe_entry; |
4aeb6967 | 3907 | struct trace_parser parser; |
1cf41dd7 | 3908 | struct ftrace_hash *hash; |
33dc9b12 | 3909 | struct ftrace_ops *ops; |
5985ea8b SRV |
3910 | struct trace_array *tr; |
3911 | struct list_head *mod_list; | |
eee8ded1 | 3912 | int pidx; |
4aeb6967 SR |
3913 | int idx; |
3914 | unsigned flags; | |
5072c59f SR |
3915 | }; |
3916 | ||
8fc0c701 | 3917 | static void * |
eee8ded1 | 3918 | t_probe_next(struct seq_file *m, loff_t *pos) |
8fc0c701 SR |
3919 | { |
3920 | struct ftrace_iterator *iter = m->private; | |
d2afd57a | 3921 | struct trace_array *tr = iter->ops->private; |
04ec7bb6 | 3922 | struct list_head *func_probes; |
eee8ded1 SRV |
3923 | struct ftrace_hash *hash; |
3924 | struct list_head *next; | |
4aeb6967 | 3925 | struct hlist_node *hnd = NULL; |
8fc0c701 | 3926 | struct hlist_head *hhd; |
eee8ded1 | 3927 | int size; |
8fc0c701 | 3928 | |
8fc0c701 | 3929 | (*pos)++; |
98c4fd04 | 3930 | iter->pos = *pos; |
8fc0c701 | 3931 | |
04ec7bb6 | 3932 | if (!tr) |
8fc0c701 SR |
3933 | return NULL; |
3934 | ||
04ec7bb6 SRV |
3935 | func_probes = &tr->func_probes; |
3936 | if (list_empty(func_probes)) | |
8fc0c701 SR |
3937 | return NULL; |
3938 | ||
eee8ded1 | 3939 | if (!iter->probe) { |
04ec7bb6 | 3940 | next = func_probes->next; |
7b60f3d8 | 3941 | iter->probe = list_entry(next, struct ftrace_func_probe, list); |
eee8ded1 SRV |
3942 | } |
3943 | ||
3944 | if (iter->probe_entry) | |
3945 | hnd = &iter->probe_entry->hlist; | |
3946 | ||
3947 | hash = iter->probe->ops.func_hash->filter_hash; | |
7bd46644 | 3948 | |
372e0d01 SRV |
3949 | /* |
3950 | * A probe being registered may temporarily have an empty hash | |
3951 | * and it's at the end of the func_probes list. | |
3952 | */ | |
3953 | if (!hash || hash == EMPTY_HASH) | |
7bd46644 NR |
3954 | return NULL; |
3955 | ||
eee8ded1 SRV |
3956 | size = 1 << hash->size_bits; |
3957 | ||
3958 | retry: | |
3959 | if (iter->pidx >= size) { | |
04ec7bb6 | 3960 | if (iter->probe->list.next == func_probes) |
eee8ded1 SRV |
3961 | return NULL; |
3962 | next = iter->probe->list.next; | |
7b60f3d8 | 3963 | iter->probe = list_entry(next, struct ftrace_func_probe, list); |
eee8ded1 SRV |
3964 | hash = iter->probe->ops.func_hash->filter_hash; |
3965 | size = 1 << hash->size_bits; | |
3966 | iter->pidx = 0; | |
3967 | } | |
3968 | ||
3969 | hhd = &hash->buckets[iter->pidx]; | |
8fc0c701 SR |
3970 | |
3971 | if (hlist_empty(hhd)) { | |
eee8ded1 | 3972 | iter->pidx++; |
8fc0c701 SR |
3973 | hnd = NULL; |
3974 | goto retry; | |
3975 | } | |
3976 | ||
3977 | if (!hnd) | |
3978 | hnd = hhd->first; | |
3979 | else { | |
3980 | hnd = hnd->next; | |
3981 | if (!hnd) { | |
eee8ded1 | 3982 | iter->pidx++; |
8fc0c701 SR |
3983 | goto retry; |
3984 | } | |
3985 | } | |
3986 | ||
4aeb6967 SR |
3987 | if (WARN_ON_ONCE(!hnd)) |
3988 | return NULL; | |
3989 | ||
eee8ded1 | 3990 | iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist); |
4aeb6967 SR |
3991 | |
3992 | return iter; | |
8fc0c701 SR |
3993 | } |
3994 | ||
eee8ded1 | 3995 | static void *t_probe_start(struct seq_file *m, loff_t *pos) |
8fc0c701 SR |
3996 | { |
3997 | struct ftrace_iterator *iter = m->private; | |
3998 | void *p = NULL; | |
d82d6244 LZ |
3999 | loff_t l; |
4000 | ||
eee8ded1 | 4001 | if (!(iter->flags & FTRACE_ITER_DO_PROBES)) |
69a3083c SR |
4002 | return NULL; |
4003 | ||
5985ea8b | 4004 | if (iter->mod_pos > *pos) |
2bccfffd | 4005 | return NULL; |
8fc0c701 | 4006 | |
eee8ded1 SRV |
4007 | iter->probe = NULL; |
4008 | iter->probe_entry = NULL; | |
4009 | iter->pidx = 0; | |
5985ea8b | 4010 | for (l = 0; l <= (*pos - iter->mod_pos); ) { |
eee8ded1 | 4011 | p = t_probe_next(m, &l); |
d82d6244 LZ |
4012 | if (!p) |
4013 | break; | |
4014 | } | |
4aeb6967 SR |
4015 | if (!p) |
4016 | return NULL; | |
4017 | ||
98c4fd04 | 4018 | /* Only set this if we have an item */ |
eee8ded1 | 4019 | iter->flags |= FTRACE_ITER_PROBE; |
98c4fd04 | 4020 | |
4aeb6967 | 4021 | return iter; |
8fc0c701 SR |
4022 | } |
4023 | ||
4aeb6967 | 4024 | static int |
eee8ded1 | 4025 | t_probe_show(struct seq_file *m, struct ftrace_iterator *iter) |
8fc0c701 | 4026 | { |
eee8ded1 | 4027 | struct ftrace_func_entry *probe_entry; |
7b60f3d8 SRV |
4028 | struct ftrace_probe_ops *probe_ops; |
4029 | struct ftrace_func_probe *probe; | |
8fc0c701 | 4030 | |
eee8ded1 SRV |
4031 | probe = iter->probe; |
4032 | probe_entry = iter->probe_entry; | |
8fc0c701 | 4033 | |
eee8ded1 | 4034 | if (WARN_ON_ONCE(!probe || !probe_entry)) |
4aeb6967 | 4035 | return -EIO; |
8fc0c701 | 4036 | |
7b60f3d8 | 4037 | probe_ops = probe->probe_ops; |
809dcf29 | 4038 | |
7b60f3d8 | 4039 | if (probe_ops->print) |
6e444319 | 4040 | return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data); |
8fc0c701 | 4041 | |
7b60f3d8 SRV |
4042 | seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip, |
4043 | (void *)probe_ops->func); | |
8fc0c701 SR |
4044 | |
4045 | return 0; | |
4046 | } | |
4047 | ||
5985ea8b SRV |
4048 | static void * |
4049 | t_mod_next(struct seq_file *m, loff_t *pos) | |
4050 | { | |
4051 | struct ftrace_iterator *iter = m->private; | |
4052 | struct trace_array *tr = iter->tr; | |
4053 | ||
4054 | (*pos)++; | |
4055 | iter->pos = *pos; | |
4056 | ||
4057 | iter->mod_list = iter->mod_list->next; | |
4058 | ||
4059 | if (iter->mod_list == &tr->mod_trace || | |
4060 | iter->mod_list == &tr->mod_notrace) { | |
4061 | iter->flags &= ~FTRACE_ITER_MOD; | |
4062 | return NULL; | |
4063 | } | |
4064 | ||
4065 | iter->mod_pos = *pos; | |
4066 | ||
4067 | return iter; | |
4068 | } | |
4069 | ||
4070 | static void *t_mod_start(struct seq_file *m, loff_t *pos) | |
4071 | { | |
4072 | struct ftrace_iterator *iter = m->private; | |
4073 | void *p = NULL; | |
4074 | loff_t l; | |
4075 | ||
4076 | if (iter->func_pos > *pos) | |
4077 | return NULL; | |
4078 | ||
4079 | iter->mod_pos = iter->func_pos; | |
4080 | ||
4081 | /* probes are only available if tr is set */ | |
4082 | if (!iter->tr) | |
4083 | return NULL; | |
4084 | ||
4085 | for (l = 0; l <= (*pos - iter->func_pos); ) { | |
4086 | p = t_mod_next(m, &l); | |
4087 | if (!p) | |
4088 | break; | |
4089 | } | |
4090 | if (!p) { | |
4091 | iter->flags &= ~FTRACE_ITER_MOD; | |
4092 | return t_probe_start(m, pos); | |
4093 | } | |
4094 | ||
4095 | /* Only set this if we have an item */ | |
4096 | iter->flags |= FTRACE_ITER_MOD; | |
4097 | ||
4098 | return iter; | |
4099 | } | |
4100 | ||
4101 | static int | |
4102 | t_mod_show(struct seq_file *m, struct ftrace_iterator *iter) | |
4103 | { | |
4104 | struct ftrace_mod_load *ftrace_mod; | |
4105 | struct trace_array *tr = iter->tr; | |
4106 | ||
4107 | if (WARN_ON_ONCE(!iter->mod_list) || | |
4108 | iter->mod_list == &tr->mod_trace || | |
4109 | iter->mod_list == &tr->mod_notrace) | |
4110 | return -EIO; | |
4111 | ||
4112 | ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list); | |
4113 | ||
4114 | if (ftrace_mod->func) | |
4115 | seq_printf(m, "%s", ftrace_mod->func); | |
4116 | else | |
4117 | seq_putc(m, '*'); | |
4118 | ||
4119 | seq_printf(m, ":mod:%s\n", ftrace_mod->module); | |
4120 | ||
4121 | return 0; | |
4122 | } | |
4123 | ||
e309b41d | 4124 | static void * |
5bd84629 | 4125 | t_func_next(struct seq_file *m, loff_t *pos) |
5072c59f SR |
4126 | { |
4127 | struct ftrace_iterator *iter = m->private; | |
4128 | struct dyn_ftrace *rec = NULL; | |
4129 | ||
4130 | (*pos)++; | |
0c75a3ed | 4131 | |
5072c59f SR |
4132 | retry: |
4133 | if (iter->idx >= iter->pg->index) { | |
4134 | if (iter->pg->next) { | |
4135 | iter->pg = iter->pg->next; | |
4136 | iter->idx = 0; | |
4137 | goto retry; | |
4138 | } | |
4139 | } else { | |
4140 | rec = &iter->pg->records[iter->idx++]; | |
c20489da SRV |
4141 | if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && |
4142 | !ftrace_lookup_ip(iter->hash, rec->ip)) || | |
647bcd03 SR |
4143 | |
4144 | ((iter->flags & FTRACE_ITER_ENABLED) && | |
e11b521a SRG |
4145 | !(rec->flags & FTRACE_FL_ENABLED)) || |
4146 | ||
4147 | ((iter->flags & FTRACE_ITER_TOUCHED) && | |
4148 | !(rec->flags & FTRACE_FL_TOUCHED))) { | |
647bcd03 | 4149 | |
5072c59f SR |
4150 | rec = NULL; |
4151 | goto retry; | |
4152 | } | |
4153 | } | |
4154 | ||
4aeb6967 | 4155 | if (!rec) |
5bd84629 | 4156 | return NULL; |
4aeb6967 | 4157 | |
5bd84629 | 4158 | iter->pos = iter->func_pos = *pos; |
4aeb6967 SR |
4159 | iter->func = rec; |
4160 | ||
4161 | return iter; | |
5072c59f SR |
4162 | } |
4163 | ||
5bd84629 SRV |
4164 | static void * |
4165 | t_next(struct seq_file *m, void *v, loff_t *pos) | |
4166 | { | |
4167 | struct ftrace_iterator *iter = m->private; | |
5985ea8b | 4168 | loff_t l = *pos; /* t_probe_start() must use original pos */ |
5bd84629 SRV |
4169 | void *ret; |
4170 | ||
4171 | if (unlikely(ftrace_disabled)) | |
4172 | return NULL; | |
4173 | ||
eee8ded1 SRV |
4174 | if (iter->flags & FTRACE_ITER_PROBE) |
4175 | return t_probe_next(m, pos); | |
5bd84629 | 4176 | |
5985ea8b SRV |
4177 | if (iter->flags & FTRACE_ITER_MOD) |
4178 | return t_mod_next(m, pos); | |
4179 | ||
5bd84629 | 4180 | if (iter->flags & FTRACE_ITER_PRINTALL) { |
eee8ded1 | 4181 | /* next must increment pos, and t_probe_start does not */ |
5bd84629 | 4182 | (*pos)++; |
5985ea8b | 4183 | return t_mod_start(m, &l); |
5bd84629 SRV |
4184 | } |
4185 | ||
4186 | ret = t_func_next(m, pos); | |
4187 | ||
4188 | if (!ret) | |
5985ea8b | 4189 | return t_mod_start(m, &l); |
5bd84629 SRV |
4190 | |
4191 | return ret; | |
4192 | } | |
4193 | ||
98c4fd04 SR |
4194 | static void reset_iter_read(struct ftrace_iterator *iter) |
4195 | { | |
4196 | iter->pos = 0; | |
4197 | iter->func_pos = 0; | |
5985ea8b | 4198 | iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD); |
5072c59f SR |
4199 | } |
4200 | ||
4201 | static void *t_start(struct seq_file *m, loff_t *pos) | |
4202 | { | |
4203 | struct ftrace_iterator *iter = m->private; | |
4204 | void *p = NULL; | |
694ce0a5 | 4205 | loff_t l; |
5072c59f | 4206 | |
8fc0c701 | 4207 | mutex_lock(&ftrace_lock); |
45a4a237 SR |
4208 | |
4209 | if (unlikely(ftrace_disabled)) | |
4210 | return NULL; | |
4211 | ||
98c4fd04 SR |
4212 | /* |
4213 | * If an lseek was done, then reset and start from beginning. | |
4214 | */ | |
4215 | if (*pos < iter->pos) | |
4216 | reset_iter_read(iter); | |
4217 | ||
0c75a3ed SR |
4218 | /* |
4219 | * For set_ftrace_filter reading, if we have the filter | |
4220 | * off, we can short cut and just print out that all | |
4221 | * functions are enabled. | |
4222 | */ | |
c20489da SRV |
4223 | if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && |
4224 | ftrace_hash_empty(iter->hash)) { | |
43ff926a | 4225 | iter->func_pos = 1; /* Account for the message */ |
0c75a3ed | 4226 | if (*pos > 0) |
5985ea8b | 4227 | return t_mod_start(m, pos); |
0c75a3ed | 4228 | iter->flags |= FTRACE_ITER_PRINTALL; |
df091625 | 4229 | /* reset in case of seek/pread */ |
eee8ded1 | 4230 | iter->flags &= ~FTRACE_ITER_PROBE; |
0c75a3ed SR |
4231 | return iter; |
4232 | } | |
4233 | ||
5985ea8b SRV |
4234 | if (iter->flags & FTRACE_ITER_MOD) |
4235 | return t_mod_start(m, pos); | |
8fc0c701 | 4236 | |
98c4fd04 SR |
4237 | /* |
4238 | * Unfortunately, we need to restart at ftrace_pages_start | |
4239 | * every time we let go of the ftrace_mutex. This is because | |
4240 | * those pointers can change without the lock. | |
4241 | */ | |
694ce0a5 LZ |
4242 | iter->pg = ftrace_pages_start; |
4243 | iter->idx = 0; | |
4244 | for (l = 0; l <= *pos; ) { | |
5bd84629 | 4245 | p = t_func_next(m, &l); |
694ce0a5 LZ |
4246 | if (!p) |
4247 | break; | |
50cdaf08 | 4248 | } |
5821e1b7 | 4249 | |
69a3083c | 4250 | if (!p) |
5985ea8b | 4251 | return t_mod_start(m, pos); |
4aeb6967 SR |
4252 | |
4253 | return iter; | |
5072c59f SR |
4254 | } |
4255 | ||
4256 | static void t_stop(struct seq_file *m, void *p) | |
4257 | { | |
8fc0c701 | 4258 | mutex_unlock(&ftrace_lock); |
5072c59f SR |
4259 | } |
4260 | ||
15d5b02c SRRH |
4261 | void * __weak |
4262 | arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) | |
4263 | { | |
4264 | return NULL; | |
4265 | } | |
4266 | ||
4267 | static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops, | |
4268 | struct dyn_ftrace *rec) | |
4269 | { | |
4270 | void *ptr; | |
4271 | ||
4272 | ptr = arch_ftrace_trampoline_func(ops, rec); | |
4273 | if (ptr) | |
4274 | seq_printf(m, " ->%pS", ptr); | |
4275 | } | |
4276 | ||
b39181f7 SRG |
4277 | #ifdef FTRACE_MCOUNT_MAX_OFFSET |
4278 | /* | |
4279 | * Weak functions can still have an mcount/fentry that is saved in | |
4280 | * the __mcount_loc section. These can be detected by having a | |
4281 | * symbol offset of greater than FTRACE_MCOUNT_MAX_OFFSET, as the | |
4282 | * symbol found by kallsyms is not the function that the mcount/fentry | |
4283 | * is part of. The offset is much greater in these cases. | |
4284 | * | |
4285 | * Test the record to make sure that the ip points to a valid kallsyms | |
4286 | * and if not, mark it disabled. | |
4287 | */ | |
4288 | static int test_for_valid_rec(struct dyn_ftrace *rec) | |
4289 | { | |
4290 | char str[KSYM_SYMBOL_LEN]; | |
4291 | unsigned long offset; | |
4292 | const char *ret; | |
4293 | ||
4294 | ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str); | |
4295 | ||
4296 | /* Weak functions can cause invalid addresses */ | |
4297 | if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) { | |
4298 | rec->flags |= FTRACE_FL_DISABLED; | |
4299 | return 0; | |
4300 | } | |
4301 | return 1; | |
4302 | } | |
4303 | ||
4304 | static struct workqueue_struct *ftrace_check_wq __initdata; | |
4305 | static struct work_struct ftrace_check_work __initdata; | |
4306 | ||
4307 | /* | |
4308 | * Scan all the mcount/fentry entries to make sure they are valid. | |
4309 | */ | |
4310 | static __init void ftrace_check_work_func(struct work_struct *work) | |
4311 | { | |
4312 | struct ftrace_page *pg; | |
4313 | struct dyn_ftrace *rec; | |
4314 | ||
4315 | mutex_lock(&ftrace_lock); | |
4316 | do_for_each_ftrace_rec(pg, rec) { | |
4317 | test_for_valid_rec(rec); | |
4318 | } while_for_each_ftrace_rec(); | |
4319 | mutex_unlock(&ftrace_lock); | |
4320 | } | |
4321 | ||
4322 | static int __init ftrace_check_for_weak_functions(void) | |
4323 | { | |
4324 | INIT_WORK(&ftrace_check_work, ftrace_check_work_func); | |
4325 | ||
4326 | ftrace_check_wq = alloc_workqueue("ftrace_check_wq", WQ_UNBOUND, 0); | |
4327 | ||
4328 | queue_work(ftrace_check_wq, &ftrace_check_work); | |
4329 | return 0; | |
4330 | } | |
4331 | ||
4332 | static int __init ftrace_check_sync(void) | |
4333 | { | |
4334 | /* Make sure the ftrace_check updates are finished */ | |
4335 | if (ftrace_check_wq) | |
4336 | destroy_workqueue(ftrace_check_wq); | |
4337 | return 0; | |
4338 | } | |
4339 | ||
4340 | late_initcall_sync(ftrace_check_sync); | |
4341 | subsys_initcall(ftrace_check_for_weak_functions); | |
4342 | ||
4343 | static int print_rec(struct seq_file *m, unsigned long ip) | |
4344 | { | |
4345 | unsigned long offset; | |
4346 | char str[KSYM_SYMBOL_LEN]; | |
4347 | char *modname; | |
4348 | const char *ret; | |
4349 | ||
4350 | ret = kallsyms_lookup(ip, NULL, &offset, &modname, str); | |
4351 | /* Weak functions can cause invalid addresses */ | |
4352 | if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) { | |
4353 | snprintf(str, KSYM_SYMBOL_LEN, "%s_%ld", | |
4354 | FTRACE_INVALID_FUNCTION, offset); | |
4355 | ret = NULL; | |
4356 | } | |
4357 | ||
4358 | seq_puts(m, str); | |
4359 | if (modname) | |
4360 | seq_printf(m, " [%s]", modname); | |
4361 | return ret == NULL ? -1 : 0; | |
4362 | } | |
4363 | #else | |
4364 | static inline int test_for_valid_rec(struct dyn_ftrace *rec) | |
4365 | { | |
4366 | return 1; | |
4367 | } | |
4368 | ||
4369 | static inline int print_rec(struct seq_file *m, unsigned long ip) | |
4370 | { | |
4371 | seq_printf(m, "%ps", (void *)ip); | |
4372 | return 0; | |
4373 | } | |
4374 | #endif | |
4375 | ||
88cefd99 SR |
4376 | static void print_subops(struct seq_file *m, struct ftrace_ops *ops, struct dyn_ftrace *rec) |
4377 | { | |
4378 | struct ftrace_ops *subops; | |
4379 | bool first = true; | |
4380 | ||
4381 | list_for_each_entry(subops, &ops->subop_list, list) { | |
4382 | if (!((subops->flags & FTRACE_OPS_FL_ENABLED) && | |
4383 | hash_contains_ip(rec->ip, subops->func_hash))) | |
4384 | continue; | |
4385 | if (first) { | |
4386 | seq_printf(m, "\tsubops:"); | |
4387 | first = false; | |
4388 | } | |
4389 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
4390 | if (subops->flags & FTRACE_OPS_FL_GRAPH) { | |
4391 | struct fgraph_ops *gops; | |
4392 | ||
4393 | gops = container_of(subops, struct fgraph_ops, ops); | |
4394 | seq_printf(m, " {ent:%pS ret:%pS}", | |
4395 | (void *)gops->entryfunc, | |
4396 | (void *)gops->retfunc); | |
4397 | continue; | |
4398 | } | |
4399 | #endif | |
4400 | if (subops->trampoline) { | |
4401 | seq_printf(m, " {%pS (%pS)}", | |
4402 | (void *)subops->trampoline, | |
4403 | (void *)subops->func); | |
4404 | add_trampoline_func(m, subops, rec); | |
4405 | } else { | |
4406 | seq_printf(m, " {%pS}", | |
4407 | (void *)subops->func); | |
4408 | } | |
4409 | } | |
4410 | } | |
4411 | ||
5072c59f SR |
4412 | static int t_show(struct seq_file *m, void *v) |
4413 | { | |
0c75a3ed | 4414 | struct ftrace_iterator *iter = m->private; |
4aeb6967 | 4415 | struct dyn_ftrace *rec; |
5072c59f | 4416 | |
eee8ded1 SRV |
4417 | if (iter->flags & FTRACE_ITER_PROBE) |
4418 | return t_probe_show(m, iter); | |
8fc0c701 | 4419 | |
5985ea8b SRV |
4420 | if (iter->flags & FTRACE_ITER_MOD) |
4421 | return t_mod_show(m, iter); | |
4422 | ||
0c75a3ed | 4423 | if (iter->flags & FTRACE_ITER_PRINTALL) { |
8c006cf7 | 4424 | if (iter->flags & FTRACE_ITER_NOTRACE) |
fa6f0cc7 | 4425 | seq_puts(m, "#### no functions disabled ####\n"); |
8c006cf7 | 4426 | else |
fa6f0cc7 | 4427 | seq_puts(m, "#### all functions enabled ####\n"); |
0c75a3ed SR |
4428 | return 0; |
4429 | } | |
4430 | ||
4aeb6967 SR |
4431 | rec = iter->func; |
4432 | ||
5072c59f SR |
4433 | if (!rec) |
4434 | return 0; | |
4435 | ||
83f74441 JO |
4436 | if (iter->flags & FTRACE_ITER_ADDRS) |
4437 | seq_printf(m, "%lx ", rec->ip); | |
4438 | ||
b39181f7 SRG |
4439 | if (print_rec(m, rec->ip)) { |
4440 | /* This should only happen when a rec is disabled */ | |
4441 | WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED)); | |
4442 | seq_putc(m, '\n'); | |
4443 | return 0; | |
4444 | } | |
4445 | ||
e11b521a | 4446 | if (iter->flags & (FTRACE_ITER_ENABLED | FTRACE_ITER_TOUCHED)) { |
030f4e1c | 4447 | struct ftrace_ops *ops; |
15d5b02c | 4448 | |
6ce2c04f | 4449 | seq_printf(m, " (%ld)%s%s%s%s%s", |
0376bde1 | 4450 | ftrace_rec_count(rec), |
f8b8be8a | 4451 | rec->flags & FTRACE_FL_REGS ? " R" : " ", |
763e34e7 | 4452 | rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ", |
cbad0fb2 | 4453 | rec->flags & FTRACE_FL_DIRECT ? " D" : " ", |
6ce2c04f SRG |
4454 | rec->flags & FTRACE_FL_CALL_OPS ? " O" : " ", |
4455 | rec->flags & FTRACE_FL_MODIFIED ? " M " : " "); | |
9674b2fa | 4456 | if (rec->flags & FTRACE_FL_TRAMP_EN) { |
5fecaa04 | 4457 | ops = ftrace_find_tramp_ops_any(rec); |
39daa7b9 SRRH |
4458 | if (ops) { |
4459 | do { | |
4460 | seq_printf(m, "\ttramp: %pS (%pS)", | |
4461 | (void *)ops->trampoline, | |
4462 | (void *)ops->func); | |
030f4e1c | 4463 | add_trampoline_func(m, ops, rec); |
88cefd99 | 4464 | print_subops(m, ops, rec); |
39daa7b9 SRRH |
4465 | ops = ftrace_find_tramp_ops_next(rec, ops); |
4466 | } while (ops); | |
4467 | } else | |
fa6f0cc7 | 4468 | seq_puts(m, "\ttramp: ERROR!"); |
030f4e1c SRRH |
4469 | } else { |
4470 | add_trampoline_func(m, NULL, rec); | |
9674b2fa | 4471 | } |
cbad0fb2 MR |
4472 | if (rec->flags & FTRACE_FL_CALL_OPS_EN) { |
4473 | ops = ftrace_find_unique_ops(rec); | |
4474 | if (ops) { | |
4475 | seq_printf(m, "\tops: %pS (%pS)", | |
4476 | ops, ops->func); | |
88cefd99 | 4477 | print_subops(m, ops, rec); |
cbad0fb2 MR |
4478 | } else { |
4479 | seq_puts(m, "\tops: ERROR!"); | |
4480 | } | |
4481 | } | |
763e34e7 SRV |
4482 | if (rec->flags & FTRACE_FL_DIRECT) { |
4483 | unsigned long direct; | |
4484 | ||
ff205766 | 4485 | direct = ftrace_find_rec_direct(rec->ip); |
763e34e7 SRV |
4486 | if (direct) |
4487 | seq_printf(m, "\n\tdirect-->%pS", (void *)direct); | |
4488 | } | |
026bb845 | 4489 | } |
9674b2fa | 4490 | |
fa6f0cc7 | 4491 | seq_putc(m, '\n'); |
5072c59f SR |
4492 | |
4493 | return 0; | |
4494 | } | |
4495 | ||
88e9d34c | 4496 | static const struct seq_operations show_ftrace_seq_ops = { |
5072c59f SR |
4497 | .start = t_start, |
4498 | .next = t_next, | |
4499 | .stop = t_stop, | |
4500 | .show = t_show, | |
4501 | }; | |
4502 | ||
e309b41d | 4503 | static int |
5072c59f SR |
4504 | ftrace_avail_open(struct inode *inode, struct file *file) |
4505 | { | |
4506 | struct ftrace_iterator *iter; | |
17911ff3 SRV |
4507 | int ret; |
4508 | ||
4509 | ret = security_locked_down(LOCKDOWN_TRACEFS); | |
4510 | if (ret) | |
4511 | return ret; | |
5072c59f | 4512 | |
4eebcc81 SR |
4513 | if (unlikely(ftrace_disabled)) |
4514 | return -ENODEV; | |
4515 | ||
50e18b94 | 4516 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
c1bc5919 SRV |
4517 | if (!iter) |
4518 | return -ENOMEM; | |
5072c59f | 4519 | |
c1bc5919 SRV |
4520 | iter->pg = ftrace_pages_start; |
4521 | iter->ops = &global_ops; | |
4522 | ||
4523 | return 0; | |
5072c59f SR |
4524 | } |
4525 | ||
647bcd03 SR |
4526 | static int |
4527 | ftrace_enabled_open(struct inode *inode, struct file *file) | |
4528 | { | |
4529 | struct ftrace_iterator *iter; | |
647bcd03 | 4530 | |
17911ff3 SRV |
4531 | /* |
4532 | * This shows us what functions are currently being | |
4533 | * traced and by what. Not sure if we want lockdown | |
4534 | * to hide such critical information for an admin. | |
4535 | * Although, perhaps it can show information we don't | |
4536 | * want people to see, but if something is tracing | |
4537 | * something, we probably want to know about it. | |
4538 | */ | |
4539 | ||
50e18b94 | 4540 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
c1bc5919 SRV |
4541 | if (!iter) |
4542 | return -ENOMEM; | |
647bcd03 | 4543 | |
c1bc5919 SRV |
4544 | iter->pg = ftrace_pages_start; |
4545 | iter->flags = FTRACE_ITER_ENABLED; | |
4546 | iter->ops = &global_ops; | |
4547 | ||
4548 | return 0; | |
647bcd03 SR |
4549 | } |
4550 | ||
e11b521a SRG |
4551 | static int |
4552 | ftrace_touched_open(struct inode *inode, struct file *file) | |
4553 | { | |
4554 | struct ftrace_iterator *iter; | |
4555 | ||
4556 | /* | |
4557 | * This shows us what functions have ever been enabled | |
4558 | * (traced, direct, patched, etc). Not sure if we want lockdown | |
4559 | * to hide such critical information for an admin. | |
4560 | * Although, perhaps it can show information we don't | |
4561 | * want people to see, but if something had traced | |
4562 | * something, we probably want to know about it. | |
4563 | */ | |
4564 | ||
4565 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); | |
4566 | if (!iter) | |
4567 | return -ENOMEM; | |
4568 | ||
4569 | iter->pg = ftrace_pages_start; | |
4570 | iter->flags = FTRACE_ITER_TOUCHED; | |
4571 | iter->ops = &global_ops; | |
4572 | ||
4573 | return 0; | |
4574 | } | |
4575 | ||
83f74441 JO |
4576 | static int |
4577 | ftrace_avail_addrs_open(struct inode *inode, struct file *file) | |
4578 | { | |
4579 | struct ftrace_iterator *iter; | |
4580 | int ret; | |
4581 | ||
4582 | ret = security_locked_down(LOCKDOWN_TRACEFS); | |
4583 | if (ret) | |
4584 | return ret; | |
4585 | ||
4586 | if (unlikely(ftrace_disabled)) | |
4587 | return -ENODEV; | |
4588 | ||
4589 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); | |
4590 | if (!iter) | |
4591 | return -ENOMEM; | |
4592 | ||
4593 | iter->pg = ftrace_pages_start; | |
4594 | iter->flags = FTRACE_ITER_ADDRS; | |
4595 | iter->ops = &global_ops; | |
4596 | ||
4597 | return 0; | |
4598 | } | |
4599 | ||
fc13cb0c SR |
4600 | /** |
4601 | * ftrace_regex_open - initialize function tracer filter files | |
4602 | * @ops: The ftrace_ops that hold the hash filters | |
4603 | * @flag: The type of filter to process | |
4604 | * @inode: The inode, usually passed in to your open routine | |
4605 | * @file: The file, usually passed in to your open routine | |
4606 | * | |
4607 | * ftrace_regex_open() initializes the filter files for the | |
4608 | * @ops. Depending on @flag it may process the filter hash or | |
4609 | * the notrace hash of @ops. With this called from the open | |
4610 | * routine, you can use ftrace_filter_write() for the write | |
4611 | * routine if @flag has FTRACE_ITER_FILTER set, or | |
4612 | * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. | |
098c879e | 4613 | * tracing_lseek() should be used as the lseek routine, and |
fc13cb0c | 4614 | * release must call ftrace_regex_release(). |
d1530413 RD |
4615 | * |
4616 | * Returns: 0 on success or a negative errno value on failure | |
fc13cb0c SR |
4617 | */ |
4618 | int | |
f45948e8 | 4619 | ftrace_regex_open(struct ftrace_ops *ops, int flag, |
1cf41dd7 | 4620 | struct inode *inode, struct file *file) |
5072c59f SR |
4621 | { |
4622 | struct ftrace_iterator *iter; | |
f45948e8 | 4623 | struct ftrace_hash *hash; |
673feb9d SRV |
4624 | struct list_head *mod_head; |
4625 | struct trace_array *tr = ops->private; | |
9ef16693 | 4626 | int ret = -ENOMEM; |
5072c59f | 4627 | |
f04f24fb MH |
4628 | ftrace_ops_init(ops); |
4629 | ||
4eebcc81 SR |
4630 | if (unlikely(ftrace_disabled)) |
4631 | return -ENODEV; | |
4632 | ||
8530dec6 | 4633 | if (tracing_check_open_get_tr(tr)) |
9ef16693 SRV |
4634 | return -ENODEV; |
4635 | ||
5072c59f SR |
4636 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
4637 | if (!iter) | |
9ef16693 | 4638 | goto out; |
5072c59f | 4639 | |
9ef16693 SRV |
4640 | if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) |
4641 | goto out; | |
689fd8b6 | 4642 | |
3f2367ba MH |
4643 | iter->ops = ops; |
4644 | iter->flags = flag; | |
5985ea8b | 4645 | iter->tr = tr; |
3f2367ba | 4646 | |
33b7f99c | 4647 | mutex_lock(&ops->func_hash->regex_lock); |
3f2367ba | 4648 | |
673feb9d | 4649 | if (flag & FTRACE_ITER_NOTRACE) { |
33b7f99c | 4650 | hash = ops->func_hash->notrace_hash; |
5985ea8b | 4651 | mod_head = tr ? &tr->mod_notrace : NULL; |
673feb9d | 4652 | } else { |
33b7f99c | 4653 | hash = ops->func_hash->filter_hash; |
5985ea8b | 4654 | mod_head = tr ? &tr->mod_trace : NULL; |
673feb9d | 4655 | } |
f45948e8 | 4656 | |
5985ea8b SRV |
4657 | iter->mod_list = mod_head; |
4658 | ||
33dc9b12 | 4659 | if (file->f_mode & FMODE_WRITE) { |
ef2fbe16 NK |
4660 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; |
4661 | ||
673feb9d | 4662 | if (file->f_flags & O_TRUNC) { |
ef2fbe16 | 4663 | iter->hash = alloc_ftrace_hash(size_bits); |
673feb9d SRV |
4664 | clear_ftrace_mod_list(mod_head); |
4665 | } else { | |
ef2fbe16 | 4666 | iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); |
673feb9d | 4667 | } |
ef2fbe16 | 4668 | |
33dc9b12 SR |
4669 | if (!iter->hash) { |
4670 | trace_parser_put(&iter->parser); | |
3f2367ba | 4671 | goto out_unlock; |
33dc9b12 | 4672 | } |
c20489da SRV |
4673 | } else |
4674 | iter->hash = hash; | |
1cf41dd7 | 4675 | |
9ef16693 SRV |
4676 | ret = 0; |
4677 | ||
5072c59f SR |
4678 | if (file->f_mode & FMODE_READ) { |
4679 | iter->pg = ftrace_pages_start; | |
5072c59f SR |
4680 | |
4681 | ret = seq_open(file, &show_ftrace_seq_ops); | |
4682 | if (!ret) { | |
4683 | struct seq_file *m = file->private_data; | |
4684 | m->private = iter; | |
79fe249c | 4685 | } else { |
33dc9b12 SR |
4686 | /* Failed */ |
4687 | free_ftrace_hash(iter->hash); | |
79fe249c | 4688 | trace_parser_put(&iter->parser); |
79fe249c | 4689 | } |
5072c59f SR |
4690 | } else |
4691 | file->private_data = iter; | |
3f2367ba MH |
4692 | |
4693 | out_unlock: | |
33b7f99c | 4694 | mutex_unlock(&ops->func_hash->regex_lock); |
5072c59f | 4695 | |
9ef16693 SRV |
4696 | out: |
4697 | if (ret) { | |
4698 | kfree(iter); | |
4699 | if (tr) | |
4700 | trace_array_put(tr); | |
4701 | } | |
4702 | ||
5072c59f SR |
4703 | return ret; |
4704 | } | |
4705 | ||
41c52c0d SR |
4706 | static int |
4707 | ftrace_filter_open(struct inode *inode, struct file *file) | |
4708 | { | |
e3b3e2e8 SRRH |
4709 | struct ftrace_ops *ops = inode->i_private; |
4710 | ||
17911ff3 | 4711 | /* Checks for tracefs lockdown */ |
e3b3e2e8 | 4712 | return ftrace_regex_open(ops, |
eee8ded1 | 4713 | FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES, |
69a3083c | 4714 | inode, file); |
41c52c0d SR |
4715 | } |
4716 | ||
4717 | static int | |
4718 | ftrace_notrace_open(struct inode *inode, struct file *file) | |
4719 | { | |
e3b3e2e8 SRRH |
4720 | struct ftrace_ops *ops = inode->i_private; |
4721 | ||
17911ff3 | 4722 | /* Checks for tracefs lockdown */ |
e3b3e2e8 | 4723 | return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, |
1cf41dd7 | 4724 | inode, file); |
41c52c0d SR |
4725 | } |
4726 | ||
3ba00929 DS |
4727 | /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */ |
4728 | struct ftrace_glob { | |
4729 | char *search; | |
4730 | unsigned len; | |
4731 | int type; | |
4732 | }; | |
4733 | ||
7132e2d6 TJB |
4734 | /* |
4735 | * If symbols in an architecture don't correspond exactly to the user-visible | |
4736 | * name of what they represent, it is possible to define this function to | |
4737 | * perform the necessary adjustments. | |
4738 | */ | |
4739 | char * __weak arch_ftrace_match_adjust(char *str, const char *search) | |
4740 | { | |
4741 | return str; | |
4742 | } | |
4743 | ||
3ba00929 | 4744 | static int ftrace_match(char *str, struct ftrace_glob *g) |
9f4801e3 | 4745 | { |
9f4801e3 | 4746 | int matched = 0; |
751e9983 | 4747 | int slen; |
9f4801e3 | 4748 | |
7132e2d6 TJB |
4749 | str = arch_ftrace_match_adjust(str, g->search); |
4750 | ||
3ba00929 | 4751 | switch (g->type) { |
9f4801e3 | 4752 | case MATCH_FULL: |
3ba00929 | 4753 | if (strcmp(str, g->search) == 0) |
9f4801e3 SR |
4754 | matched = 1; |
4755 | break; | |
4756 | case MATCH_FRONT_ONLY: | |
3ba00929 | 4757 | if (strncmp(str, g->search, g->len) == 0) |
9f4801e3 SR |
4758 | matched = 1; |
4759 | break; | |
4760 | case MATCH_MIDDLE_ONLY: | |
3ba00929 | 4761 | if (strstr(str, g->search)) |
9f4801e3 SR |
4762 | matched = 1; |
4763 | break; | |
4764 | case MATCH_END_ONLY: | |
751e9983 | 4765 | slen = strlen(str); |
3ba00929 DS |
4766 | if (slen >= g->len && |
4767 | memcmp(str + slen - g->len, g->search, g->len) == 0) | |
9f4801e3 SR |
4768 | matched = 1; |
4769 | break; | |
60f1d5e3 MH |
4770 | case MATCH_GLOB: |
4771 | if (glob_match(g->search, str)) | |
4772 | matched = 1; | |
4773 | break; | |
9f4801e3 SR |
4774 | } |
4775 | ||
4776 | return matched; | |
4777 | } | |
4778 | ||
b448c4e3 | 4779 | static int |
f0a3b154 | 4780 | enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter) |
996e87be | 4781 | { |
b448c4e3 | 4782 | struct ftrace_func_entry *entry; |
b448c4e3 SR |
4783 | int ret = 0; |
4784 | ||
1cf41dd7 | 4785 | entry = ftrace_lookup_ip(hash, rec->ip); |
f0a3b154 | 4786 | if (clear_filter) { |
1cf41dd7 SR |
4787 | /* Do nothing if it doesn't exist */ |
4788 | if (!entry) | |
4789 | return 0; | |
b448c4e3 | 4790 | |
33dc9b12 | 4791 | free_hash_entry(hash, entry); |
1cf41dd7 SR |
4792 | } else { |
4793 | /* Do nothing if it exists */ | |
4794 | if (entry) | |
4795 | return 0; | |
d05cb470 SRG |
4796 | if (add_hash_entry(hash, rec->ip) == NULL) |
4797 | ret = -ENOMEM; | |
b448c4e3 SR |
4798 | } |
4799 | return ret; | |
996e87be SR |
4800 | } |
4801 | ||
f79b3f33 SRV |
4802 | static int |
4803 | add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g, | |
4804 | int clear_filter) | |
4805 | { | |
c5963a09 | 4806 | long index; |
f79b3f33 SRV |
4807 | struct ftrace_page *pg; |
4808 | struct dyn_ftrace *rec; | |
4809 | ||
4810 | /* The index starts at 1 */ | |
c5963a09 | 4811 | if (kstrtoul(func_g->search, 0, &index) || --index < 0) |
f79b3f33 SRV |
4812 | return 0; |
4813 | ||
4814 | do_for_each_ftrace_rec(pg, rec) { | |
4815 | if (pg->index <= index) { | |
4816 | index -= pg->index; | |
4817 | /* this is a double loop, break goes to the next page */ | |
4818 | break; | |
4819 | } | |
4820 | rec = &pg->records[index]; | |
4821 | enter_record(hash, rec, clear_filter); | |
4822 | return 1; | |
4823 | } while_for_each_ftrace_rec(); | |
4824 | return 0; | |
4825 | } | |
4826 | ||
b39181f7 SRG |
4827 | #ifdef FTRACE_MCOUNT_MAX_OFFSET |
4828 | static int lookup_ip(unsigned long ip, char **modname, char *str) | |
4829 | { | |
4830 | unsigned long offset; | |
4831 | ||
4832 | kallsyms_lookup(ip, NULL, &offset, modname, str); | |
4833 | if (offset > FTRACE_MCOUNT_MAX_OFFSET) | |
4834 | return -1; | |
4835 | return 0; | |
4836 | } | |
4837 | #else | |
4838 | static int lookup_ip(unsigned long ip, char **modname, char *str) | |
4839 | { | |
4840 | kallsyms_lookup(ip, NULL, NULL, modname, str); | |
4841 | return 0; | |
4842 | } | |
4843 | #endif | |
4844 | ||
64e7c440 | 4845 | static int |
0b507e1e DS |
4846 | ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g, |
4847 | struct ftrace_glob *mod_g, int exclude_mod) | |
64e7c440 SR |
4848 | { |
4849 | char str[KSYM_SYMBOL_LEN]; | |
b9df92d2 SR |
4850 | char *modname; |
4851 | ||
b39181f7 SRG |
4852 | if (lookup_ip(rec->ip, &modname, str)) { |
4853 | /* This should only happen when a rec is disabled */ | |
4854 | WARN_ON_ONCE(system_state == SYSTEM_RUNNING && | |
4855 | !(rec->flags & FTRACE_FL_DISABLED)); | |
4856 | return 0; | |
4857 | } | |
b9df92d2 | 4858 | |
0b507e1e DS |
4859 | if (mod_g) { |
4860 | int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0; | |
4861 | ||
4862 | /* blank module name to match all modules */ | |
4863 | if (!mod_g->len) { | |
4864 | /* blank module globbing: modname xor exclude_mod */ | |
77c0edde | 4865 | if (!exclude_mod != !modname) |
0b507e1e DS |
4866 | goto func_match; |
4867 | return 0; | |
4868 | } | |
4869 | ||
77c0edde SRV |
4870 | /* |
4871 | * exclude_mod is set to trace everything but the given | |
4872 | * module. If it is set and the module matches, then | |
4873 | * return 0. If it is not set, and the module doesn't match | |
4874 | * also return 0. Otherwise, check the function to see if | |
4875 | * that matches. | |
4876 | */ | |
4877 | if (!mod_matches == !exclude_mod) | |
b9df92d2 | 4878 | return 0; |
0b507e1e | 4879 | func_match: |
b9df92d2 | 4880 | /* blank search means to match all funcs in the mod */ |
3ba00929 | 4881 | if (!func_g->len) |
b9df92d2 SR |
4882 | return 1; |
4883 | } | |
64e7c440 | 4884 | |
3ba00929 | 4885 | return ftrace_match(str, func_g); |
64e7c440 SR |
4886 | } |
4887 | ||
1cf41dd7 | 4888 | static int |
3ba00929 | 4889 | match_records(struct ftrace_hash *hash, char *func, int len, char *mod) |
9f4801e3 | 4890 | { |
9f4801e3 SR |
4891 | struct ftrace_page *pg; |
4892 | struct dyn_ftrace *rec; | |
3ba00929 | 4893 | struct ftrace_glob func_g = { .type = MATCH_FULL }; |
0b507e1e DS |
4894 | struct ftrace_glob mod_g = { .type = MATCH_FULL }; |
4895 | struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL; | |
4896 | int exclude_mod = 0; | |
311d16da | 4897 | int found = 0; |
b448c4e3 | 4898 | int ret; |
2e028c4f | 4899 | int clear_filter = 0; |
9f4801e3 | 4900 | |
0b507e1e | 4901 | if (func) { |
3ba00929 DS |
4902 | func_g.type = filter_parse_regex(func, len, &func_g.search, |
4903 | &clear_filter); | |
4904 | func_g.len = strlen(func_g.search); | |
b9df92d2 | 4905 | } |
9f4801e3 | 4906 | |
0b507e1e DS |
4907 | if (mod) { |
4908 | mod_g.type = filter_parse_regex(mod, strlen(mod), | |
4909 | &mod_g.search, &exclude_mod); | |
4910 | mod_g.len = strlen(mod_g.search); | |
b9df92d2 | 4911 | } |
9f4801e3 | 4912 | |
06294cf0 | 4913 | guard(mutex)(&ftrace_lock); |
265c831c | 4914 | |
b9df92d2 | 4915 | if (unlikely(ftrace_disabled)) |
06294cf0 | 4916 | return 0; |
9f4801e3 | 4917 | |
06294cf0 SR |
4918 | if (func_g.type == MATCH_INDEX) |
4919 | return add_rec_by_index(hash, &func_g, clear_filter); | |
f79b3f33 | 4920 | |
265c831c | 4921 | do_for_each_ftrace_rec(pg, rec) { |
546fece4 SRRH |
4922 | |
4923 | if (rec->flags & FTRACE_FL_DISABLED) | |
4924 | continue; | |
4925 | ||
0b507e1e | 4926 | if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) { |
f0a3b154 | 4927 | ret = enter_record(hash, rec, clear_filter); |
06294cf0 SR |
4928 | if (ret < 0) |
4929 | return ret; | |
311d16da | 4930 | found = 1; |
265c831c | 4931 | } |
d0b24b4e | 4932 | cond_resched(); |
265c831c | 4933 | } while_for_each_ftrace_rec(); |
311d16da LZ |
4934 | |
4935 | return found; | |
5072c59f SR |
4936 | } |
4937 | ||
64e7c440 | 4938 | static int |
1cf41dd7 | 4939 | ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) |
64e7c440 | 4940 | { |
f0a3b154 | 4941 | return match_records(hash, buff, len, NULL); |
64e7c440 SR |
4942 | } |
4943 | ||
e16b35dd SRV |
4944 | static void ftrace_ops_update_code(struct ftrace_ops *ops, |
4945 | struct ftrace_ops_hash *old_hash) | |
4946 | { | |
4947 | struct ftrace_ops *op; | |
4948 | ||
4949 | if (!ftrace_enabled) | |
4950 | return; | |
4951 | ||
4952 | if (ops->flags & FTRACE_OPS_FL_ENABLED) { | |
4953 | ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); | |
4954 | return; | |
4955 | } | |
4956 | ||
4957 | /* | |
4958 | * If this is the shared global_ops filter, then we need to | |
4959 | * check if there is another ops that shares it, is enabled. | |
4960 | * If so, we still need to run the modify code. | |
4961 | */ | |
4962 | if (ops->func_hash != &global_ops.local_hash) | |
4963 | return; | |
4964 | ||
4965 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
4966 | if (op->func_hash == &global_ops.local_hash && | |
4967 | op->flags & FTRACE_OPS_FL_ENABLED) { | |
4968 | ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash); | |
4969 | /* Only need to do this once */ | |
4970 | return; | |
4971 | } | |
4972 | } while_for_each_ftrace_op(op); | |
4973 | } | |
4974 | ||
4975 | static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, | |
4976 | struct ftrace_hash **orig_hash, | |
4977 | struct ftrace_hash *hash, | |
4978 | int enable) | |
4979 | { | |
d9bbfbd1 | 4980 | if (ops->flags & FTRACE_OPS_FL_SUBOP) |
0ae6b8ce | 4981 | return ftrace_hash_move_and_update_subops(ops, orig_hash, hash); |
e16b35dd | 4982 | |
d9bbfbd1 SRG |
4983 | /* |
4984 | * If this ops is not enabled, it could be sharing its filters | |
4985 | * with a subop. If that's the case, update the subop instead of | |
4986 | * this ops. Shared filters are only allowed to have one ops set | |
4987 | * at a time, and if we update the ops that is not enabled, | |
4988 | * it will not affect subops that share it. | |
4989 | */ | |
4990 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) { | |
4991 | struct ftrace_ops *op; | |
4992 | ||
4993 | /* Check if any other manager subops maps to this hash */ | |
4994 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
4995 | struct ftrace_ops *subops; | |
4996 | ||
4997 | list_for_each_entry(subops, &op->subop_list, list) { | |
4998 | if ((subops->flags & FTRACE_OPS_FL_ENABLED) && | |
4999 | subops->func_hash == ops->func_hash) { | |
0ae6b8ce | 5000 | return ftrace_hash_move_and_update_subops(subops, orig_hash, hash); |
d9bbfbd1 SRG |
5001 | } |
5002 | } | |
5003 | } while_for_each_ftrace_op(op); | |
e16b35dd | 5004 | } |
d9bbfbd1 SRG |
5005 | |
5006 | return __ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable); | |
e16b35dd | 5007 | } |
64e7c440 | 5008 | |
673feb9d SRV |
5009 | static int cache_mod(struct trace_array *tr, |
5010 | const char *func, char *module, int enable) | |
5011 | { | |
5012 | struct ftrace_mod_load *ftrace_mod, *n; | |
5013 | struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace; | |
673feb9d | 5014 | |
1432afb5 | 5015 | guard(mutex)(&ftrace_lock); |
673feb9d SRV |
5016 | |
5017 | /* We do not cache inverse filters */ | |
5018 | if (func[0] == '!') { | |
1432afb5 SR |
5019 | int ret = -EINVAL; |
5020 | ||
673feb9d | 5021 | func++; |
673feb9d SRV |
5022 | |
5023 | /* Look to remove this hash */ | |
5024 | list_for_each_entry_safe(ftrace_mod, n, head, list) { | |
5025 | if (strcmp(ftrace_mod->module, module) != 0) | |
5026 | continue; | |
5027 | ||
5028 | /* no func matches all */ | |
44925dff | 5029 | if (strcmp(func, "*") == 0 || |
673feb9d SRV |
5030 | (ftrace_mod->func && |
5031 | strcmp(ftrace_mod->func, func) == 0)) { | |
5032 | ret = 0; | |
5033 | free_ftrace_mod(ftrace_mod); | |
5034 | continue; | |
5035 | } | |
5036 | } | |
1432afb5 | 5037 | return ret; |
673feb9d SRV |
5038 | } |
5039 | ||
673feb9d SRV |
5040 | /* We only care about modules that have not been loaded yet */ |
5041 | if (module_exists(module)) | |
1432afb5 | 5042 | return -EINVAL; |
673feb9d SRV |
5043 | |
5044 | /* Save this string off, and execute it when the module is loaded */ | |
1432afb5 | 5045 | return ftrace_add_mod(tr, func, module, enable); |
673feb9d SRV |
5046 | } |
5047 | ||
69449bbd | 5048 | #ifdef CONFIG_MODULES |
d7fbf8df SRV |
5049 | static void process_mod_list(struct list_head *head, struct ftrace_ops *ops, |
5050 | char *mod, bool enable) | |
5051 | { | |
5052 | struct ftrace_mod_load *ftrace_mod, *n; | |
5053 | struct ftrace_hash **orig_hash, *new_hash; | |
5054 | LIST_HEAD(process_mods); | |
5055 | char *func; | |
d7fbf8df SRV |
5056 | |
5057 | mutex_lock(&ops->func_hash->regex_lock); | |
5058 | ||
5059 | if (enable) | |
5060 | orig_hash = &ops->func_hash->filter_hash; | |
5061 | else | |
5062 | orig_hash = &ops->func_hash->notrace_hash; | |
5063 | ||
5064 | new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, | |
5065 | *orig_hash); | |
5066 | if (!new_hash) | |
3b58a3c7 | 5067 | goto out; /* warn? */ |
d7fbf8df SRV |
5068 | |
5069 | mutex_lock(&ftrace_lock); | |
5070 | ||
5071 | list_for_each_entry_safe(ftrace_mod, n, head, list) { | |
5072 | ||
5073 | if (strcmp(ftrace_mod->module, mod) != 0) | |
5074 | continue; | |
5075 | ||
5076 | if (ftrace_mod->func) | |
5077 | func = kstrdup(ftrace_mod->func, GFP_KERNEL); | |
5078 | else | |
5079 | func = kstrdup("*", GFP_KERNEL); | |
5080 | ||
5081 | if (!func) /* warn? */ | |
5082 | continue; | |
5083 | ||
3ecda644 | 5084 | list_move(&ftrace_mod->list, &process_mods); |
d7fbf8df SRV |
5085 | |
5086 | /* Use the newly allocated func, as it may be "*" */ | |
5087 | kfree(ftrace_mod->func); | |
5088 | ftrace_mod->func = func; | |
5089 | } | |
5090 | ||
5091 | mutex_unlock(&ftrace_lock); | |
5092 | ||
5093 | list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) { | |
5094 | ||
5095 | func = ftrace_mod->func; | |
5096 | ||
5097 | /* Grabs ftrace_lock, which is why we have this extra step */ | |
5098 | match_records(new_hash, func, strlen(func), mod); | |
5099 | free_ftrace_mod(ftrace_mod); | |
5100 | } | |
5101 | ||
8c08f0d5 SRV |
5102 | if (enable && list_empty(head)) |
5103 | new_hash->flags &= ~FTRACE_HASH_FL_MOD; | |
5104 | ||
d7fbf8df SRV |
5105 | mutex_lock(&ftrace_lock); |
5106 | ||
045e269c | 5107 | ftrace_hash_move_and_update_ops(ops, orig_hash, |
d7fbf8df SRV |
5108 | new_hash, enable); |
5109 | mutex_unlock(&ftrace_lock); | |
5110 | ||
3b58a3c7 | 5111 | out: |
d7fbf8df SRV |
5112 | mutex_unlock(&ops->func_hash->regex_lock); |
5113 | ||
5114 | free_ftrace_hash(new_hash); | |
5115 | } | |
5116 | ||
5117 | static void process_cached_mods(const char *mod_name) | |
5118 | { | |
5119 | struct trace_array *tr; | |
5120 | char *mod; | |
5121 | ||
5122 | mod = kstrdup(mod_name, GFP_KERNEL); | |
5123 | if (!mod) | |
5124 | return; | |
5125 | ||
5126 | mutex_lock(&trace_types_lock); | |
5127 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { | |
5128 | if (!list_empty(&tr->mod_trace)) | |
5129 | process_mod_list(&tr->mod_trace, tr->ops, mod, true); | |
5130 | if (!list_empty(&tr->mod_notrace)) | |
5131 | process_mod_list(&tr->mod_notrace, tr->ops, mod, false); | |
5132 | } | |
5133 | mutex_unlock(&trace_types_lock); | |
5134 | ||
5135 | kfree(mod); | |
5136 | } | |
69449bbd | 5137 | #endif |
d7fbf8df | 5138 | |
f6180773 SR |
5139 | /* |
5140 | * We register the module command as a template to show others how | |
5141 | * to register the a command as well. | |
5142 | */ | |
5143 | ||
5144 | static int | |
04ec7bb6 | 5145 | ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash, |
673feb9d | 5146 | char *func_orig, char *cmd, char *module, int enable) |
f6180773 | 5147 | { |
673feb9d | 5148 | char *func; |
5e3949f0 | 5149 | int ret; |
f6180773 | 5150 | |
45af52e7 | 5151 | if (!tr) |
5152 | return -ENODEV; | |
5153 | ||
673feb9d SRV |
5154 | /* match_records() modifies func, and we need the original */ |
5155 | func = kstrdup(func_orig, GFP_KERNEL); | |
5156 | if (!func) | |
5157 | return -ENOMEM; | |
5158 | ||
f6180773 SR |
5159 | /* |
5160 | * cmd == 'mod' because we only registered this func | |
5161 | * for the 'mod' ftrace_func_command. | |
5162 | * But if you register one func with multiple commands, | |
5163 | * you can tell which command was used by the cmd | |
5164 | * parameter. | |
5165 | */ | |
f0a3b154 | 5166 | ret = match_records(hash, func, strlen(func), module); |
673feb9d SRV |
5167 | kfree(func); |
5168 | ||
b448c4e3 | 5169 | if (!ret) |
673feb9d | 5170 | return cache_mod(tr, func_orig, module, enable); |
b448c4e3 SR |
5171 | if (ret < 0) |
5172 | return ret; | |
b448c4e3 | 5173 | return 0; |
f6180773 SR |
5174 | } |
5175 | ||
5176 | static struct ftrace_func_command ftrace_mod_cmd = { | |
5177 | .name = "mod", | |
5178 | .func = ftrace_mod_callback, | |
5179 | }; | |
5180 | ||
5181 | static int __init ftrace_mod_cmd_init(void) | |
5182 | { | |
5183 | return register_ftrace_command(&ftrace_mod_cmd); | |
5184 | } | |
6f415672 | 5185 | core_initcall(ftrace_mod_cmd_init); |
f6180773 | 5186 | |
2f5f6ad9 | 5187 | static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, |
d19ad077 | 5188 | struct ftrace_ops *op, struct ftrace_regs *fregs) |
59df055f | 5189 | { |
eee8ded1 | 5190 | struct ftrace_probe_ops *probe_ops; |
7b60f3d8 | 5191 | struct ftrace_func_probe *probe; |
59df055f | 5192 | |
7b60f3d8 SRV |
5193 | probe = container_of(op, struct ftrace_func_probe, ops); |
5194 | probe_ops = probe->probe_ops; | |
59df055f SR |
5195 | |
5196 | /* | |
5197 | * Disable preemption for these calls to prevent a RCU grace | |
5198 | * period. This syncs the hash iteration and freeing of items | |
5199 | * on the hash. rcu_read_lock is too dangerous here. | |
5200 | */ | |
5168ae50 | 5201 | preempt_disable_notrace(); |
6e444319 | 5202 | probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data); |
5168ae50 | 5203 | preempt_enable_notrace(); |
59df055f SR |
5204 | } |
5205 | ||
41794f19 SRV |
5206 | struct ftrace_func_map { |
5207 | struct ftrace_func_entry entry; | |
5208 | void *data; | |
59df055f SR |
5209 | }; |
5210 | ||
a54665ab SR |
5211 | /* |
5212 | * Note, ftrace_func_mapper is freed by free_ftrace_hash(&mapper->hash). | |
5213 | * The hash field must be the first field. | |
5214 | */ | |
41794f19 | 5215 | struct ftrace_func_mapper { |
a54665ab | 5216 | struct ftrace_hash hash; /* Must be first! */ |
41794f19 | 5217 | }; |
59df055f | 5218 | |
41794f19 SRV |
5219 | /** |
5220 | * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper | |
5221 | * | |
d1530413 | 5222 | * Returns: a ftrace_func_mapper descriptor that can be used to map ips to data. |
41794f19 SRV |
5223 | */ |
5224 | struct ftrace_func_mapper *allocate_ftrace_func_mapper(void) | |
59df055f | 5225 | { |
41794f19 | 5226 | struct ftrace_hash *hash; |
59df055f | 5227 | |
41794f19 SRV |
5228 | /* |
5229 | * The mapper is simply a ftrace_hash, but since the entries | |
5230 | * in the hash are not ftrace_func_entry type, we define it | |
5231 | * as a separate structure. | |
5232 | */ | |
5233 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); | |
5234 | return (struct ftrace_func_mapper *)hash; | |
5235 | } | |
59df055f | 5236 | |
41794f19 SRV |
5237 | /** |
5238 | * ftrace_func_mapper_find_ip - Find some data mapped to an ip | |
5239 | * @mapper: The mapper that has the ip maps | |
5240 | * @ip: the instruction pointer to find the data for | |
5241 | * | |
d1530413 | 5242 | * Returns: the data mapped to @ip if found otherwise NULL. The return |
41794f19 SRV |
5243 | * is actually the address of the mapper data pointer. The address is |
5244 | * returned for use cases where the data is no bigger than a long, and | |
5245 | * the user can use the data pointer as its data instead of having to | |
5246 | * allocate more memory for the reference. | |
5247 | */ | |
5248 | void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, | |
5249 | unsigned long ip) | |
5250 | { | |
5251 | struct ftrace_func_entry *entry; | |
5252 | struct ftrace_func_map *map; | |
59df055f | 5253 | |
41794f19 SRV |
5254 | entry = ftrace_lookup_ip(&mapper->hash, ip); |
5255 | if (!entry) | |
5256 | return NULL; | |
b848914c | 5257 | |
41794f19 SRV |
5258 | map = (struct ftrace_func_map *)entry; |
5259 | return &map->data; | |
59df055f SR |
5260 | } |
5261 | ||
41794f19 SRV |
5262 | /** |
5263 | * ftrace_func_mapper_add_ip - Map some data to an ip | |
5264 | * @mapper: The mapper that has the ip maps | |
5265 | * @ip: The instruction pointer address to map @data to | |
5266 | * @data: The data to map to @ip | |
5267 | * | |
d1530413 | 5268 | * Returns: 0 on success otherwise an error. |
41794f19 SRV |
5269 | */ |
5270 | int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, | |
5271 | unsigned long ip, void *data) | |
59df055f | 5272 | { |
41794f19 SRV |
5273 | struct ftrace_func_entry *entry; |
5274 | struct ftrace_func_map *map; | |
59df055f | 5275 | |
41794f19 SRV |
5276 | entry = ftrace_lookup_ip(&mapper->hash, ip); |
5277 | if (entry) | |
5278 | return -EBUSY; | |
59df055f | 5279 | |
41794f19 SRV |
5280 | map = kmalloc(sizeof(*map), GFP_KERNEL); |
5281 | if (!map) | |
5282 | return -ENOMEM; | |
59df055f | 5283 | |
41794f19 SRV |
5284 | map->entry.ip = ip; |
5285 | map->data = data; | |
b848914c | 5286 | |
41794f19 | 5287 | __add_hash_entry(&mapper->hash, &map->entry); |
59df055f | 5288 | |
41794f19 SRV |
5289 | return 0; |
5290 | } | |
59df055f | 5291 | |
41794f19 SRV |
5292 | /** |
5293 | * ftrace_func_mapper_remove_ip - Remove an ip from the mapping | |
5294 | * @mapper: The mapper that has the ip maps | |
5295 | * @ip: The instruction pointer address to remove the data from | |
5296 | * | |
d1530413 | 5297 | * Returns: the data if it is found, otherwise NULL. |
7d54c15c | 5298 | * Note, if the data pointer is used as the data itself, (see |
41794f19 SRV |
5299 | * ftrace_func_mapper_find_ip(), then the return value may be meaningless, |
5300 | * if the data pointer was set to zero. | |
5301 | */ | |
5302 | void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, | |
5303 | unsigned long ip) | |
59df055f | 5304 | { |
41794f19 SRV |
5305 | struct ftrace_func_entry *entry; |
5306 | struct ftrace_func_map *map; | |
5307 | void *data; | |
5308 | ||
5309 | entry = ftrace_lookup_ip(&mapper->hash, ip); | |
5310 | if (!entry) | |
5311 | return NULL; | |
5312 | ||
5313 | map = (struct ftrace_func_map *)entry; | |
5314 | data = map->data; | |
5315 | ||
5316 | remove_hash_entry(&mapper->hash, entry); | |
59df055f | 5317 | kfree(entry); |
41794f19 SRV |
5318 | |
5319 | return data; | |
5320 | } | |
5321 | ||
5322 | /** | |
5323 | * free_ftrace_func_mapper - free a mapping of ips and data | |
5324 | * @mapper: The mapper that has the ip maps | |
5325 | * @free_func: A function to be called on each data item. | |
5326 | * | |
5327 | * This is used to free the function mapper. The @free_func is optional | |
5328 | * and can be used if the data needs to be freed as well. | |
5329 | */ | |
5330 | void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, | |
5331 | ftrace_mapper_func free_func) | |
5332 | { | |
5333 | struct ftrace_func_entry *entry; | |
5334 | struct ftrace_func_map *map; | |
5335 | struct hlist_head *hhd; | |
04e03d9a WL |
5336 | int size, i; |
5337 | ||
5338 | if (!mapper) | |
5339 | return; | |
41794f19 SRV |
5340 | |
5341 | if (free_func && mapper->hash.count) { | |
04e03d9a | 5342 | size = 1 << mapper->hash.size_bits; |
41794f19 SRV |
5343 | for (i = 0; i < size; i++) { |
5344 | hhd = &mapper->hash.buckets[i]; | |
5345 | hlist_for_each_entry(entry, hhd, hlist) { | |
5346 | map = (struct ftrace_func_map *)entry; | |
5347 | free_func(map); | |
5348 | } | |
5349 | } | |
5350 | } | |
a54665ab | 5351 | /* This also frees the mapper itself */ |
41794f19 SRV |
5352 | free_ftrace_hash(&mapper->hash); |
5353 | } | |
5354 | ||
7b60f3d8 SRV |
5355 | static void release_probe(struct ftrace_func_probe *probe) |
5356 | { | |
5357 | struct ftrace_probe_ops *probe_ops; | |
5358 | ||
9687bbf2 | 5359 | guard(mutex)(&ftrace_lock); |
7b60f3d8 SRV |
5360 | |
5361 | WARN_ON(probe->ref <= 0); | |
5362 | ||
5363 | /* Subtract the ref that was used to protect this instance */ | |
5364 | probe->ref--; | |
5365 | ||
5366 | if (!probe->ref) { | |
5367 | probe_ops = probe->probe_ops; | |
6e444319 SRV |
5368 | /* |
5369 | * Sending zero as ip tells probe_ops to free | |
5370 | * the probe->data itself | |
5371 | */ | |
5372 | if (probe_ops->free) | |
5373 | probe_ops->free(probe_ops, probe->tr, 0, probe->data); | |
7b60f3d8 SRV |
5374 | list_del(&probe->list); |
5375 | kfree(probe); | |
5376 | } | |
7b60f3d8 SRV |
5377 | } |
5378 | ||
5379 | static void acquire_probe_locked(struct ftrace_func_probe *probe) | |
5380 | { | |
5381 | /* | |
5382 | * Add one ref to keep it from being freed when releasing the | |
5383 | * ftrace_lock mutex. | |
5384 | */ | |
5385 | probe->ref++; | |
59df055f SR |
5386 | } |
5387 | ||
59df055f | 5388 | int |
04ec7bb6 | 5389 | register_ftrace_function_probe(char *glob, struct trace_array *tr, |
7b60f3d8 SRV |
5390 | struct ftrace_probe_ops *probe_ops, |
5391 | void *data) | |
59df055f | 5392 | { |
ba27d855 | 5393 | struct ftrace_func_probe *probe = NULL, *iter; |
1ec3a81a | 5394 | struct ftrace_func_entry *entry; |
1ec3a81a SRV |
5395 | struct ftrace_hash **orig_hash; |
5396 | struct ftrace_hash *old_hash; | |
e1df4cb6 | 5397 | struct ftrace_hash *hash; |
59df055f | 5398 | int count = 0; |
1ec3a81a | 5399 | int size; |
e1df4cb6 | 5400 | int ret; |
1ec3a81a | 5401 | int i; |
59df055f | 5402 | |
04ec7bb6 | 5403 | if (WARN_ON(!tr)) |
59df055f SR |
5404 | return -EINVAL; |
5405 | ||
1ec3a81a SRV |
5406 | /* We do not support '!' for function probes */ |
5407 | if (WARN_ON(glob[0] == '!')) | |
59df055f | 5408 | return -EINVAL; |
59df055f | 5409 | |
7485058e | 5410 | |
7b60f3d8 SRV |
5411 | mutex_lock(&ftrace_lock); |
5412 | /* Check if the probe_ops is already registered */ | |
ba27d855 JK |
5413 | list_for_each_entry(iter, &tr->func_probes, list) { |
5414 | if (iter->probe_ops == probe_ops) { | |
5415 | probe = iter; | |
7b60f3d8 | 5416 | break; |
ba27d855 | 5417 | } |
e1df4cb6 | 5418 | } |
ba27d855 | 5419 | if (!probe) { |
7b60f3d8 SRV |
5420 | probe = kzalloc(sizeof(*probe), GFP_KERNEL); |
5421 | if (!probe) { | |
5422 | mutex_unlock(&ftrace_lock); | |
5423 | return -ENOMEM; | |
5424 | } | |
5425 | probe->probe_ops = probe_ops; | |
5426 | probe->ops.func = function_trace_probe_call; | |
5427 | probe->tr = tr; | |
5428 | ftrace_ops_init(&probe->ops); | |
5429 | list_add(&probe->list, &tr->func_probes); | |
e1df4cb6 | 5430 | } |
59df055f | 5431 | |
7b60f3d8 | 5432 | acquire_probe_locked(probe); |
5ae0bf59 | 5433 | |
7b60f3d8 | 5434 | mutex_unlock(&ftrace_lock); |
59df055f | 5435 | |
372e0d01 SRV |
5436 | /* |
5437 | * Note, there's a small window here that the func_hash->filter_hash | |
fdda88d3 | 5438 | * may be NULL or empty. Need to be careful when reading the loop. |
372e0d01 | 5439 | */ |
7b60f3d8 | 5440 | mutex_lock(&probe->ops.func_hash->regex_lock); |
546fece4 | 5441 | |
7b60f3d8 | 5442 | orig_hash = &probe->ops.func_hash->filter_hash; |
1ec3a81a SRV |
5443 | old_hash = *orig_hash; |
5444 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); | |
59df055f | 5445 | |
5b0022dd NR |
5446 | if (!hash) { |
5447 | ret = -ENOMEM; | |
5448 | goto out; | |
5449 | } | |
5450 | ||
1ec3a81a | 5451 | ret = ftrace_match_records(hash, glob, strlen(glob)); |
59df055f | 5452 | |
1ec3a81a SRV |
5453 | /* Nothing found? */ |
5454 | if (!ret) | |
5455 | ret = -EINVAL; | |
59df055f | 5456 | |
1ec3a81a SRV |
5457 | if (ret < 0) |
5458 | goto out; | |
59df055f | 5459 | |
1ec3a81a SRV |
5460 | size = 1 << hash->size_bits; |
5461 | for (i = 0; i < size; i++) { | |
5462 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { | |
5463 | if (ftrace_lookup_ip(old_hash, entry->ip)) | |
59df055f | 5464 | continue; |
1ec3a81a SRV |
5465 | /* |
5466 | * The caller might want to do something special | |
5467 | * for each function we find. We call the callback | |
5468 | * to give the caller an opportunity to do so. | |
5469 | */ | |
7b60f3d8 SRV |
5470 | if (probe_ops->init) { |
5471 | ret = probe_ops->init(probe_ops, tr, | |
6e444319 SRV |
5472 | entry->ip, data, |
5473 | &probe->data); | |
5474 | if (ret < 0) { | |
5475 | if (probe_ops->free && count) | |
5476 | probe_ops->free(probe_ops, tr, | |
5477 | 0, probe->data); | |
5478 | probe->data = NULL; | |
eee8ded1 | 5479 | goto out; |
6e444319 | 5480 | } |
59df055f | 5481 | } |
1ec3a81a | 5482 | count++; |
59df055f | 5483 | } |
1ec3a81a | 5484 | } |
59df055f | 5485 | |
1ec3a81a | 5486 | mutex_lock(&ftrace_lock); |
59df055f | 5487 | |
7b60f3d8 SRV |
5488 | if (!count) { |
5489 | /* Nothing was added? */ | |
5490 | ret = -EINVAL; | |
5491 | goto out_unlock; | |
5492 | } | |
e1df4cb6 | 5493 | |
7b60f3d8 SRV |
5494 | ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, |
5495 | hash, 1); | |
1ec3a81a | 5496 | if (ret < 0) |
8d70725e | 5497 | goto err_unlock; |
8252ecf3 | 5498 | |
7b60f3d8 SRV |
5499 | /* One ref for each new function traced */ |
5500 | probe->ref += count; | |
8252ecf3 | 5501 | |
7b60f3d8 SRV |
5502 | if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED)) |
5503 | ret = ftrace_startup(&probe->ops, 0); | |
e1df4cb6 | 5504 | |
59df055f | 5505 | out_unlock: |
5ae0bf59 | 5506 | mutex_unlock(&ftrace_lock); |
8252ecf3 | 5507 | |
3296fc4e | 5508 | if (!ret) |
1ec3a81a | 5509 | ret = count; |
5ae0bf59 | 5510 | out: |
7b60f3d8 | 5511 | mutex_unlock(&probe->ops.func_hash->regex_lock); |
e1df4cb6 | 5512 | free_ftrace_hash(hash); |
59df055f | 5513 | |
7b60f3d8 | 5514 | release_probe(probe); |
59df055f | 5515 | |
1ec3a81a | 5516 | return ret; |
59df055f | 5517 | |
8d70725e | 5518 | err_unlock: |
7b60f3d8 | 5519 | if (!probe_ops->free || !count) |
8d70725e SRV |
5520 | goto out_unlock; |
5521 | ||
5522 | /* Failed to do the move, need to call the free functions */ | |
5523 | for (i = 0; i < size; i++) { | |
5524 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { | |
5525 | if (ftrace_lookup_ip(old_hash, entry->ip)) | |
5526 | continue; | |
6e444319 | 5527 | probe_ops->free(probe_ops, tr, entry->ip, probe->data); |
8d70725e SRV |
5528 | } |
5529 | } | |
5530 | goto out_unlock; | |
59df055f SR |
5531 | } |
5532 | ||
d3d532d7 | 5533 | int |
7b60f3d8 SRV |
5534 | unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, |
5535 | struct ftrace_probe_ops *probe_ops) | |
59df055f | 5536 | { |
ba27d855 | 5537 | struct ftrace_func_probe *probe = NULL, *iter; |
82cc4fc2 | 5538 | struct ftrace_ops_hash old_hash_ops; |
eee8ded1 | 5539 | struct ftrace_func_entry *entry; |
3ba00929 | 5540 | struct ftrace_glob func_g; |
1ec3a81a SRV |
5541 | struct ftrace_hash **orig_hash; |
5542 | struct ftrace_hash *old_hash; | |
1ec3a81a | 5543 | struct ftrace_hash *hash = NULL; |
b67bfe0d | 5544 | struct hlist_node *tmp; |
eee8ded1 | 5545 | struct hlist_head hhd; |
59df055f | 5546 | char str[KSYM_SYMBOL_LEN]; |
7b60f3d8 SRV |
5547 | int count = 0; |
5548 | int i, ret = -ENODEV; | |
eee8ded1 | 5549 | int size; |
59df055f | 5550 | |
cbab567c | 5551 | if (!glob || !strlen(glob) || !strcmp(glob, "*")) |
3ba00929 | 5552 | func_g.search = NULL; |
cbab567c | 5553 | else { |
59df055f SR |
5554 | int not; |
5555 | ||
3ba00929 DS |
5556 | func_g.type = filter_parse_regex(glob, strlen(glob), |
5557 | &func_g.search, ¬); | |
5558 | func_g.len = strlen(func_g.search); | |
59df055f | 5559 | |
b6887d79 | 5560 | /* we do not support '!' for function probes */ |
59df055f | 5561 | if (WARN_ON(not)) |
d3d532d7 | 5562 | return -EINVAL; |
59df055f SR |
5563 | } |
5564 | ||
7b60f3d8 SRV |
5565 | mutex_lock(&ftrace_lock); |
5566 | /* Check if the probe_ops is already registered */ | |
ba27d855 JK |
5567 | list_for_each_entry(iter, &tr->func_probes, list) { |
5568 | if (iter->probe_ops == probe_ops) { | |
5569 | probe = iter; | |
7b60f3d8 | 5570 | break; |
ba27d855 | 5571 | } |
59df055f | 5572 | } |
ba27d855 | 5573 | if (!probe) |
7b60f3d8 SRV |
5574 | goto err_unlock_ftrace; |
5575 | ||
5576 | ret = -EINVAL; | |
5577 | if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED)) | |
5578 | goto err_unlock_ftrace; | |
5579 | ||
5580 | acquire_probe_locked(probe); | |
5581 | ||
5582 | mutex_unlock(&ftrace_lock); | |
59df055f | 5583 | |
7b60f3d8 | 5584 | mutex_lock(&probe->ops.func_hash->regex_lock); |
1ec3a81a | 5585 | |
7b60f3d8 | 5586 | orig_hash = &probe->ops.func_hash->filter_hash; |
1ec3a81a SRV |
5587 | old_hash = *orig_hash; |
5588 | ||
1ec3a81a SRV |
5589 | if (ftrace_hash_empty(old_hash)) |
5590 | goto out_unlock; | |
e1df4cb6 | 5591 | |
82cc4fc2 SRV |
5592 | old_hash_ops.filter_hash = old_hash; |
5593 | /* Probes only have filters */ | |
5594 | old_hash_ops.notrace_hash = NULL; | |
5595 | ||
d3d532d7 | 5596 | ret = -ENOMEM; |
1ec3a81a | 5597 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); |
e1df4cb6 | 5598 | if (!hash) |
e1df4cb6 SRRH |
5599 | goto out_unlock; |
5600 | ||
eee8ded1 | 5601 | INIT_HLIST_HEAD(&hhd); |
59df055f | 5602 | |
eee8ded1 SRV |
5603 | size = 1 << hash->size_bits; |
5604 | for (i = 0; i < size; i++) { | |
5605 | hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) { | |
59df055f | 5606 | |
3ba00929 | 5607 | if (func_g.search) { |
59df055f SR |
5608 | kallsyms_lookup(entry->ip, NULL, NULL, |
5609 | NULL, str); | |
3ba00929 | 5610 | if (!ftrace_match(str, &func_g)) |
59df055f SR |
5611 | continue; |
5612 | } | |
7b60f3d8 | 5613 | count++; |
eee8ded1 SRV |
5614 | remove_hash_entry(hash, entry); |
5615 | hlist_add_head(&entry->hlist, &hhd); | |
59df055f SR |
5616 | } |
5617 | } | |
d3d532d7 SRV |
5618 | |
5619 | /* Nothing found? */ | |
7b60f3d8 | 5620 | if (!count) { |
d3d532d7 SRV |
5621 | ret = -EINVAL; |
5622 | goto out_unlock; | |
5623 | } | |
5624 | ||
3f2367ba | 5625 | mutex_lock(&ftrace_lock); |
1ec3a81a | 5626 | |
7b60f3d8 | 5627 | WARN_ON(probe->ref < count); |
eee8ded1 | 5628 | |
7b60f3d8 | 5629 | probe->ref -= count; |
1ec3a81a | 5630 | |
7b60f3d8 SRV |
5631 | if (ftrace_hash_empty(hash)) |
5632 | ftrace_shutdown(&probe->ops, 0); | |
5633 | ||
5634 | ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, | |
1ec3a81a | 5635 | hash, 1); |
82cc4fc2 SRV |
5636 | |
5637 | /* still need to update the function call sites */ | |
1ec3a81a | 5638 | if (ftrace_enabled && !ftrace_hash_empty(hash)) |
7b60f3d8 | 5639 | ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, |
82cc4fc2 | 5640 | &old_hash_ops); |
74401729 | 5641 | synchronize_rcu(); |
3296fc4e | 5642 | |
eee8ded1 SRV |
5643 | hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { |
5644 | hlist_del(&entry->hlist); | |
7b60f3d8 | 5645 | if (probe_ops->free) |
6e444319 | 5646 | probe_ops->free(probe_ops, tr, entry->ip, probe->data); |
eee8ded1 | 5647 | kfree(entry); |
7818b388 | 5648 | } |
3f2367ba | 5649 | mutex_unlock(&ftrace_lock); |
3ba00929 | 5650 | |
e1df4cb6 | 5651 | out_unlock: |
7b60f3d8 | 5652 | mutex_unlock(&probe->ops.func_hash->regex_lock); |
e1df4cb6 | 5653 | free_ftrace_hash(hash); |
59df055f | 5654 | |
7b60f3d8 | 5655 | release_probe(probe); |
59df055f | 5656 | |
7b60f3d8 | 5657 | return ret; |
59df055f | 5658 | |
7b60f3d8 SRV |
5659 | err_unlock_ftrace: |
5660 | mutex_unlock(&ftrace_lock); | |
d3d532d7 | 5661 | return ret; |
59df055f SR |
5662 | } |
5663 | ||
a0e6369e NR |
5664 | void clear_ftrace_function_probes(struct trace_array *tr) |
5665 | { | |
5666 | struct ftrace_func_probe *probe, *n; | |
5667 | ||
5668 | list_for_each_entry_safe(probe, n, &tr->func_probes, list) | |
5669 | unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops); | |
5670 | } | |
5671 | ||
f6180773 SR |
5672 | static LIST_HEAD(ftrace_commands); |
5673 | static DEFINE_MUTEX(ftrace_cmd_mutex); | |
5674 | ||
38de93ab TZ |
5675 | /* |
5676 | * Currently we only register ftrace commands from __init, so mark this | |
5677 | * __init too. | |
5678 | */ | |
5679 | __init int register_ftrace_command(struct ftrace_func_command *cmd) | |
f6180773 SR |
5680 | { |
5681 | struct ftrace_func_command *p; | |
f6180773 | 5682 | |
1d95fd9d | 5683 | guard(mutex)(&ftrace_cmd_mutex); |
f6180773 | 5684 | list_for_each_entry(p, &ftrace_commands, list) { |
1d95fd9d SR |
5685 | if (strcmp(cmd->name, p->name) == 0) |
5686 | return -EBUSY; | |
f6180773 SR |
5687 | } |
5688 | list_add(&cmd->list, &ftrace_commands); | |
f6180773 | 5689 | |
1d95fd9d | 5690 | return 0; |
f6180773 SR |
5691 | } |
5692 | ||
38de93ab TZ |
5693 | /* |
5694 | * Currently we only unregister ftrace commands from __init, so mark | |
5695 | * this __init too. | |
5696 | */ | |
5697 | __init int unregister_ftrace_command(struct ftrace_func_command *cmd) | |
f6180773 SR |
5698 | { |
5699 | struct ftrace_func_command *p, *n; | |
f6180773 | 5700 | |
1d95fd9d SR |
5701 | guard(mutex)(&ftrace_cmd_mutex); |
5702 | ||
f6180773 SR |
5703 | list_for_each_entry_safe(p, n, &ftrace_commands, list) { |
5704 | if (strcmp(cmd->name, p->name) == 0) { | |
f6180773 | 5705 | list_del_init(&p->list); |
1d95fd9d | 5706 | return 0; |
f6180773 SR |
5707 | } |
5708 | } | |
f6180773 | 5709 | |
1d95fd9d | 5710 | return -ENODEV; |
f6180773 SR |
5711 | } |
5712 | ||
04ec7bb6 | 5713 | static int ftrace_process_regex(struct ftrace_iterator *iter, |
33dc9b12 | 5714 | char *buff, int len, int enable) |
64e7c440 | 5715 | { |
04ec7bb6 | 5716 | struct ftrace_hash *hash = iter->hash; |
d2afd57a | 5717 | struct trace_array *tr = iter->ops->private; |
f6180773 | 5718 | char *func, *command, *next = buff; |
6a24a244 | 5719 | struct ftrace_func_command *p; |
1d95fd9d | 5720 | int ret; |
64e7c440 SR |
5721 | |
5722 | func = strsep(&next, ":"); | |
5723 | ||
5724 | if (!next) { | |
1cf41dd7 | 5725 | ret = ftrace_match_records(hash, func, len); |
b448c4e3 SR |
5726 | if (!ret) |
5727 | ret = -EINVAL; | |
5728 | if (ret < 0) | |
5729 | return ret; | |
5730 | return 0; | |
64e7c440 SR |
5731 | } |
5732 | ||
f6180773 | 5733 | /* command found */ |
64e7c440 SR |
5734 | |
5735 | command = strsep(&next, ":"); | |
5736 | ||
1d95fd9d SR |
5737 | guard(mutex)(&ftrace_cmd_mutex); |
5738 | ||
f6180773 | 5739 | list_for_each_entry(p, &ftrace_commands, list) { |
1d95fd9d SR |
5740 | if (strcmp(p->name, command) == 0) |
5741 | return p->func(tr, hash, func, command, next, enable); | |
64e7c440 SR |
5742 | } |
5743 | ||
1d95fd9d | 5744 | return -EINVAL; |
64e7c440 SR |
5745 | } |
5746 | ||
e309b41d | 5747 | static ssize_t |
41c52c0d SR |
5748 | ftrace_regex_write(struct file *file, const char __user *ubuf, |
5749 | size_t cnt, loff_t *ppos, int enable) | |
5072c59f SR |
5750 | { |
5751 | struct ftrace_iterator *iter; | |
689fd8b6 | 5752 | struct trace_parser *parser; |
5753 | ssize_t ret, read; | |
5072c59f | 5754 | |
4ba7978e | 5755 | if (!cnt) |
5072c59f SR |
5756 | return 0; |
5757 | ||
5072c59f SR |
5758 | if (file->f_mode & FMODE_READ) { |
5759 | struct seq_file *m = file->private_data; | |
5760 | iter = m->private; | |
5761 | } else | |
5762 | iter = file->private_data; | |
5763 | ||
f04f24fb | 5764 | if (unlikely(ftrace_disabled)) |
3f2367ba MH |
5765 | return -ENODEV; |
5766 | ||
5767 | /* iter->hash is a local copy, so we don't need regex_lock */ | |
f04f24fb | 5768 | |
689fd8b6 | 5769 | parser = &iter->parser; |
5770 | read = trace_get_user(parser, ubuf, cnt, ppos); | |
5072c59f | 5771 | |
4ba7978e | 5772 | if (read >= 0 && trace_parser_loaded(parser) && |
689fd8b6 | 5773 | !trace_parser_cont(parser)) { |
04ec7bb6 | 5774 | ret = ftrace_process_regex(iter, parser->buffer, |
689fd8b6 | 5775 | parser->idx, enable); |
313254a9 | 5776 | trace_parser_clear(parser); |
7c088b51 | 5777 | if (ret < 0) |
77e53cb2 | 5778 | return ret; |
eda1e328 | 5779 | } |
5072c59f | 5780 | |
77e53cb2 | 5781 | return read; |
5072c59f SR |
5782 | } |
5783 | ||
fc13cb0c | 5784 | ssize_t |
41c52c0d SR |
5785 | ftrace_filter_write(struct file *file, const char __user *ubuf, |
5786 | size_t cnt, loff_t *ppos) | |
5787 | { | |
5788 | return ftrace_regex_write(file, ubuf, cnt, ppos, 1); | |
5789 | } | |
5790 | ||
fc13cb0c | 5791 | ssize_t |
41c52c0d SR |
5792 | ftrace_notrace_write(struct file *file, const char __user *ubuf, |
5793 | size_t cnt, loff_t *ppos) | |
5794 | { | |
5795 | return ftrace_regex_write(file, ubuf, cnt, ppos, 0); | |
5796 | } | |
5797 | ||
33dc9b12 | 5798 | static int |
4f554e95 | 5799 | __ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) |
647664ea MH |
5800 | { |
5801 | struct ftrace_func_entry *entry; | |
5802 | ||
aebfd125 PZ |
5803 | ip = ftrace_location(ip); |
5804 | if (!ip) | |
647664ea MH |
5805 | return -EINVAL; |
5806 | ||
5807 | if (remove) { | |
5808 | entry = ftrace_lookup_ip(hash, ip); | |
5809 | if (!entry) | |
5810 | return -ENOENT; | |
5811 | free_hash_entry(hash, entry); | |
5812 | return 0; | |
8eb4b09e SR |
5813 | } else if (__ftrace_lookup_ip(hash, ip) != NULL) { |
5814 | /* Already exists */ | |
5815 | return 0; | |
647664ea MH |
5816 | } |
5817 | ||
d05cb470 SRG |
5818 | entry = add_hash_entry(hash, ip); |
5819 | return entry ? 0 : -ENOMEM; | |
647664ea MH |
5820 | } |
5821 | ||
4f554e95 JO |
5822 | static int |
5823 | ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips, | |
5824 | unsigned int cnt, int remove) | |
5825 | { | |
5826 | unsigned int i; | |
5827 | int err; | |
5828 | ||
5829 | for (i = 0; i < cnt; i++) { | |
5830 | err = __ftrace_match_addr(hash, ips[i], remove); | |
5831 | if (err) { | |
5832 | /* | |
5833 | * This expects the @hash is a temporary hash and if this | |
5834 | * fails the caller must free the @hash. | |
5835 | */ | |
5836 | return err; | |
5837 | } | |
5838 | } | |
5839 | return 0; | |
5840 | } | |
5841 | ||
647664ea MH |
5842 | static int |
5843 | ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |
4f554e95 | 5844 | unsigned long *ips, unsigned int cnt, |
31f505dc | 5845 | int remove, int reset, int enable, char *mod) |
41c52c0d | 5846 | { |
33dc9b12 | 5847 | struct ftrace_hash **orig_hash; |
f45948e8 | 5848 | struct ftrace_hash *hash; |
33dc9b12 | 5849 | int ret; |
f45948e8 | 5850 | |
41c52c0d | 5851 | if (unlikely(ftrace_disabled)) |
33dc9b12 | 5852 | return -ENODEV; |
41c52c0d | 5853 | |
33b7f99c | 5854 | mutex_lock(&ops->func_hash->regex_lock); |
3f2367ba | 5855 | |
f45948e8 | 5856 | if (enable) |
33b7f99c | 5857 | orig_hash = &ops->func_hash->filter_hash; |
f45948e8 | 5858 | else |
33b7f99c | 5859 | orig_hash = &ops->func_hash->notrace_hash; |
33dc9b12 | 5860 | |
b972cc58 WN |
5861 | if (reset) |
5862 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); | |
5863 | else | |
5864 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | |
5865 | ||
3f2367ba MH |
5866 | if (!hash) { |
5867 | ret = -ENOMEM; | |
5868 | goto out_regex_unlock; | |
5869 | } | |
f45948e8 | 5870 | |
31f505dc SR |
5871 | if (buf && !match_records(hash, buf, len, mod)) { |
5872 | /* If this was for a module and nothing was enabled, flag it */ | |
5873 | if (mod) | |
5874 | (*orig_hash)->flags |= FTRACE_HASH_FL_MOD; | |
5875 | ||
5876 | /* | |
5877 | * Even if it is a mod, return error to let caller know | |
5878 | * nothing was added | |
5879 | */ | |
ac483c44 JO |
5880 | ret = -EINVAL; |
5881 | goto out_regex_unlock; | |
5882 | } | |
4f554e95 JO |
5883 | if (ips) { |
5884 | ret = ftrace_match_addr(hash, ips, cnt, remove); | |
647664ea MH |
5885 | if (ret < 0) |
5886 | goto out_regex_unlock; | |
5887 | } | |
33dc9b12 SR |
5888 | |
5889 | mutex_lock(&ftrace_lock); | |
e16b35dd | 5890 | ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable); |
33dc9b12 SR |
5891 | mutex_unlock(&ftrace_lock); |
5892 | ||
ac483c44 | 5893 | out_regex_unlock: |
33b7f99c | 5894 | mutex_unlock(&ops->func_hash->regex_lock); |
33dc9b12 SR |
5895 | |
5896 | free_ftrace_hash(hash); | |
5897 | return ret; | |
41c52c0d SR |
5898 | } |
5899 | ||
647664ea | 5900 | static int |
4f554e95 JO |
5901 | ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt, |
5902 | int remove, int reset, int enable) | |
647664ea | 5903 | { |
31f505dc | 5904 | return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable, NULL); |
647664ea MH |
5905 | } |
5906 | ||
763e34e7 | 5907 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
013bf0da | 5908 | |
53cd885b SL |
5909 | static int register_ftrace_function_nolock(struct ftrace_ops *ops); |
5910 | ||
a8b9cf62 MHG |
5911 | /* |
5912 | * If there are multiple ftrace_ops, use SAVE_REGS by default, so that direct | |
5913 | * call will be jumped from ftrace_regs_caller. Only if the architecture does | |
5914 | * not support ftrace_regs_caller but direct_call, use SAVE_ARGS so that it | |
5915 | * jumps from ftrace_caller for multiple ftrace_ops. | |
5916 | */ | |
bdbddb10 | 5917 | #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS |
60c89718 | 5918 | #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_ARGS) |
a8b9cf62 MHG |
5919 | #else |
5920 | #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS) | |
5921 | #endif | |
f64dd462 JO |
5922 | |
5923 | static int check_direct_multi(struct ftrace_ops *ops) | |
5924 | { | |
5925 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) | |
5926 | return -EINVAL; | |
5927 | if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS) | |
5928 | return -EINVAL; | |
5929 | return 0; | |
5930 | } | |
5931 | ||
5932 | static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr) | |
5933 | { | |
5934 | struct ftrace_func_entry *entry, *del; | |
5935 | int size, i; | |
5936 | ||
5937 | size = 1 << hash->size_bits; | |
5938 | for (i = 0; i < size; i++) { | |
5939 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { | |
5940 | del = __ftrace_lookup_ip(direct_functions, entry->ip); | |
5941 | if (del && del->direct == addr) { | |
5942 | remove_hash_entry(direct_functions, del); | |
5943 | kfree(del); | |
5944 | } | |
5945 | } | |
5946 | } | |
5947 | } | |
5948 | ||
33f13714 PM |
5949 | static void register_ftrace_direct_cb(struct rcu_head *rhp) |
5950 | { | |
5951 | struct ftrace_hash *fhp = container_of(rhp, struct ftrace_hash, rcu); | |
5952 | ||
5953 | free_ftrace_hash(fhp); | |
5954 | } | |
5955 | ||
f64dd462 | 5956 | /** |
da8bdfbd | 5957 | * register_ftrace_direct - Call a custom trampoline directly |
f64dd462 JO |
5958 | * for multiple functions registered in @ops |
5959 | * @ops: The address of the struct ftrace_ops object | |
5960 | * @addr: The address of the trampoline to call at @ops functions | |
5961 | * | |
5962 | * This is used to connect a direct calls to @addr from the nop locations | |
5963 | * of the functions registered in @ops (with by ftrace_set_filter_ip | |
5964 | * function). | |
5965 | * | |
5966 | * The location that it calls (@addr) must be able to handle a direct call, | |
5967 | * and save the parameters of the function being traced, and restore them | |
5968 | * (or inject new ones if needed), before returning. | |
5969 | * | |
5970 | * Returns: | |
5971 | * 0 on success | |
5972 | * -EINVAL - The @ops object was already registered with this call or | |
5973 | * when there are no functions in @ops object. | |
5974 | * -EBUSY - Another direct function is already attached (there can be only one) | |
5975 | * -ENODEV - @ip does not point to a ftrace nop location (or not supported) | |
5976 | * -ENOMEM - There was an allocation failure. | |
5977 | */ | |
da8bdfbd | 5978 | int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) |
f64dd462 | 5979 | { |
d05cb470 | 5980 | struct ftrace_hash *hash, *new_hash = NULL, *free_hash = NULL; |
f64dd462 JO |
5981 | struct ftrace_func_entry *entry, *new; |
5982 | int err = -EBUSY, size, i; | |
5983 | ||
5984 | if (ops->func || ops->trampoline) | |
5985 | return -EINVAL; | |
5986 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) | |
5987 | return -EINVAL; | |
5988 | if (ops->flags & FTRACE_OPS_FL_ENABLED) | |
5989 | return -EINVAL; | |
5990 | ||
5991 | hash = ops->func_hash->filter_hash; | |
5992 | if (ftrace_hash_empty(hash)) | |
5993 | return -EINVAL; | |
5994 | ||
5995 | mutex_lock(&direct_mutex); | |
5996 | ||
5997 | /* Make sure requested entries are not already registered.. */ | |
5998 | size = 1 << hash->size_bits; | |
5999 | for (i = 0; i < size; i++) { | |
6000 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { | |
6001 | if (ftrace_find_rec_direct(entry->ip)) | |
6002 | goto out_unlock; | |
6003 | } | |
6004 | } | |
6005 | ||
f64dd462 | 6006 | err = -ENOMEM; |
d05cb470 SRG |
6007 | |
6008 | /* Make a copy hash to place the new and the old entries in */ | |
6009 | size = hash->count + direct_functions->count; | |
92f1d3b4 MD |
6010 | size = fls(size); |
6011 | if (size > FTRACE_HASH_MAX_BITS) | |
6012 | size = FTRACE_HASH_MAX_BITS; | |
6013 | new_hash = alloc_ftrace_hash(size); | |
d05cb470 SRG |
6014 | if (!new_hash) |
6015 | goto out_unlock; | |
6016 | ||
6017 | /* Now copy over the existing direct entries */ | |
6018 | size = 1 << direct_functions->size_bits; | |
6019 | for (i = 0; i < size; i++) { | |
6020 | hlist_for_each_entry(entry, &direct_functions->buckets[i], hlist) { | |
6021 | new = add_hash_entry(new_hash, entry->ip); | |
6022 | if (!new) | |
6023 | goto out_unlock; | |
6024 | new->direct = entry->direct; | |
6025 | } | |
6026 | } | |
6027 | ||
6028 | /* ... and add the new entries */ | |
6029 | size = 1 << hash->size_bits; | |
f64dd462 JO |
6030 | for (i = 0; i < size; i++) { |
6031 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { | |
d05cb470 | 6032 | new = add_hash_entry(new_hash, entry->ip); |
f64dd462 | 6033 | if (!new) |
d05cb470 SRG |
6034 | goto out_unlock; |
6035 | /* Update both the copy and the hash entry */ | |
6036 | new->direct = addr; | |
f64dd462 JO |
6037 | entry->direct = addr; |
6038 | } | |
6039 | } | |
6040 | ||
d05cb470 SRG |
6041 | free_hash = direct_functions; |
6042 | rcu_assign_pointer(direct_functions, new_hash); | |
6043 | new_hash = NULL; | |
6044 | ||
f64dd462 JO |
6045 | ops->func = call_direct_funcs; |
6046 | ops->flags = MULTI_FLAGS; | |
6047 | ops->trampoline = FTRACE_REGS_ADDR; | |
dbaccb61 | 6048 | ops->direct_call = addr; |
f64dd462 | 6049 | |
53cd885b | 6050 | err = register_ftrace_function_nolock(ops); |
f64dd462 | 6051 | |
f64dd462 JO |
6052 | out_unlock: |
6053 | mutex_unlock(&direct_mutex); | |
6054 | ||
33f13714 PM |
6055 | if (free_hash && free_hash != EMPTY_HASH) |
6056 | call_rcu_tasks(&free_hash->rcu, register_ftrace_direct_cb); | |
d05cb470 SRG |
6057 | |
6058 | if (new_hash) | |
6059 | free_ftrace_hash(new_hash); | |
6060 | ||
f64dd462 JO |
6061 | return err; |
6062 | } | |
da8bdfbd | 6063 | EXPORT_SYMBOL_GPL(register_ftrace_direct); |
f64dd462 JO |
6064 | |
6065 | /** | |
da8bdfbd FR |
6066 | * unregister_ftrace_direct - Remove calls to custom trampoline |
6067 | * previously registered by register_ftrace_direct for @ops object. | |
f64dd462 | 6068 | * @ops: The address of the struct ftrace_ops object |
9b5a45eb MG |
6069 | * @addr: The address of the direct function that is called by the @ops functions |
6070 | * @free_filters: Set to true to remove all filters for the ftrace_ops, false otherwise | |
f64dd462 JO |
6071 | * |
6072 | * This is used to remove a direct calls to @addr from the nop locations | |
6073 | * of the functions registered in @ops (with by ftrace_set_filter_ip | |
6074 | * function). | |
6075 | * | |
6076 | * Returns: | |
6077 | * 0 on success | |
6078 | * -EINVAL - The @ops object was not properly registered. | |
6079 | */ | |
da8bdfbd FR |
6080 | int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr, |
6081 | bool free_filters) | |
f64dd462 JO |
6082 | { |
6083 | struct ftrace_hash *hash = ops->func_hash->filter_hash; | |
6084 | int err; | |
6085 | ||
6086 | if (check_direct_multi(ops)) | |
6087 | return -EINVAL; | |
6088 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | |
6089 | return -EINVAL; | |
6090 | ||
6091 | mutex_lock(&direct_mutex); | |
6092 | err = unregister_ftrace_function(ops); | |
6093 | remove_direct_functions_hash(hash, addr); | |
6094 | mutex_unlock(&direct_mutex); | |
fea3ffa4 JO |
6095 | |
6096 | /* cleanup for possible another register call */ | |
6097 | ops->func = NULL; | |
6098 | ops->trampoline = 0; | |
59495740 FR |
6099 | |
6100 | if (free_filters) | |
6101 | ftrace_free_filter(ops); | |
f64dd462 JO |
6102 | return err; |
6103 | } | |
da8bdfbd | 6104 | EXPORT_SYMBOL_GPL(unregister_ftrace_direct); |
ccf5a89e | 6105 | |
f96f644a | 6106 | static int |
da8bdfbd | 6107 | __modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) |
ccf5a89e | 6108 | { |
ed292718 | 6109 | struct ftrace_hash *hash; |
ccf5a89e | 6110 | struct ftrace_func_entry *entry, *iter; |
ed292718 SRV |
6111 | static struct ftrace_ops tmp_ops = { |
6112 | .func = ftrace_stub, | |
6113 | .flags = FTRACE_OPS_FL_STUB, | |
6114 | }; | |
ccf5a89e JO |
6115 | int i, size; |
6116 | int err; | |
6117 | ||
f96f644a | 6118 | lockdep_assert_held_once(&direct_mutex); |
ed292718 SRV |
6119 | |
6120 | /* Enable the tmp_ops to have the same functions as the direct ops */ | |
6121 | ftrace_ops_init(&tmp_ops); | |
6122 | tmp_ops.func_hash = ops->func_hash; | |
dbaccb61 | 6123 | tmp_ops.direct_call = addr; |
ed292718 | 6124 | |
53cd885b | 6125 | err = register_ftrace_function_nolock(&tmp_ops); |
ed292718 | 6126 | if (err) |
f96f644a | 6127 | return err; |
ccf5a89e JO |
6128 | |
6129 | /* | |
ed292718 SRV |
6130 | * Now the ftrace_ops_list_func() is called to do the direct callers. |
6131 | * We can safely change the direct functions attached to each entry. | |
ccf5a89e | 6132 | */ |
ed292718 | 6133 | mutex_lock(&ftrace_lock); |
ccf5a89e | 6134 | |
ed292718 | 6135 | hash = ops->func_hash->filter_hash; |
ccf5a89e JO |
6136 | size = 1 << hash->size_bits; |
6137 | for (i = 0; i < size; i++) { | |
6138 | hlist_for_each_entry(iter, &hash->buckets[i], hlist) { | |
6139 | entry = __ftrace_lookup_ip(direct_functions, iter->ip); | |
6140 | if (!entry) | |
6141 | continue; | |
6142 | entry->direct = addr; | |
6143 | } | |
6144 | } | |
dbaccb61 FR |
6145 | /* Prevent store tearing if a trampoline concurrently accesses the value */ |
6146 | WRITE_ONCE(ops->direct_call, addr); | |
ccf5a89e | 6147 | |
2e6e9058 JO |
6148 | mutex_unlock(&ftrace_lock); |
6149 | ||
ed292718 SRV |
6150 | /* Removing the tmp_ops will add the updated direct callers to the functions */ |
6151 | unregister_ftrace_function(&tmp_ops); | |
ccf5a89e | 6152 | |
f96f644a SL |
6153 | return err; |
6154 | } | |
6155 | ||
6156 | /** | |
da8bdfbd | 6157 | * modify_ftrace_direct_nolock - Modify an existing direct 'multi' call |
f96f644a SL |
6158 | * to call something else |
6159 | * @ops: The address of the struct ftrace_ops object | |
6160 | * @addr: The address of the new trampoline to call at @ops functions | |
6161 | * | |
6162 | * This is used to unregister currently registered direct caller and | |
6163 | * register new one @addr on functions registered in @ops object. | |
6164 | * | |
6165 | * Note there's window between ftrace_shutdown and ftrace_startup calls | |
6166 | * where there will be no callbacks called. | |
6167 | * | |
6168 | * Caller should already have direct_mutex locked, so we don't lock | |
6169 | * direct_mutex here. | |
6170 | * | |
6171 | * Returns: zero on success. Non zero on error, which includes: | |
6172 | * -EINVAL - The @ops object was not properly registered. | |
6173 | */ | |
da8bdfbd | 6174 | int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr) |
f96f644a SL |
6175 | { |
6176 | if (check_direct_multi(ops)) | |
6177 | return -EINVAL; | |
6178 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | |
6179 | return -EINVAL; | |
6180 | ||
da8bdfbd | 6181 | return __modify_ftrace_direct(ops, addr); |
f96f644a | 6182 | } |
da8bdfbd | 6183 | EXPORT_SYMBOL_GPL(modify_ftrace_direct_nolock); |
f96f644a SL |
6184 | |
6185 | /** | |
da8bdfbd | 6186 | * modify_ftrace_direct - Modify an existing direct 'multi' call |
f96f644a SL |
6187 | * to call something else |
6188 | * @ops: The address of the struct ftrace_ops object | |
6189 | * @addr: The address of the new trampoline to call at @ops functions | |
6190 | * | |
6191 | * This is used to unregister currently registered direct caller and | |
6192 | * register new one @addr on functions registered in @ops object. | |
6193 | * | |
6194 | * Note there's window between ftrace_shutdown and ftrace_startup calls | |
6195 | * where there will be no callbacks called. | |
6196 | * | |
6197 | * Returns: zero on success. Non zero on error, which includes: | |
6198 | * -EINVAL - The @ops object was not properly registered. | |
6199 | */ | |
da8bdfbd | 6200 | int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) |
f96f644a SL |
6201 | { |
6202 | int err; | |
6203 | ||
6204 | if (check_direct_multi(ops)) | |
6205 | return -EINVAL; | |
6206 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | |
6207 | return -EINVAL; | |
6208 | ||
6209 | mutex_lock(&direct_mutex); | |
da8bdfbd | 6210 | err = __modify_ftrace_direct(ops, addr); |
ccf5a89e JO |
6211 | mutex_unlock(&direct_mutex); |
6212 | return err; | |
6213 | } | |
da8bdfbd | 6214 | EXPORT_SYMBOL_GPL(modify_ftrace_direct); |
763e34e7 SRV |
6215 | #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ |
6216 | ||
647664ea MH |
6217 | /** |
6218 | * ftrace_set_filter_ip - set a function to filter on in ftrace by address | |
d1530413 RD |
6219 | * @ops: the ops to set the filter with |
6220 | * @ip: the address to add to or remove from the filter. | |
6221 | * @remove: non zero to remove the ip from the filter | |
6222 | * @reset: non zero to reset all filters before applying this filter. | |
647664ea MH |
6223 | * |
6224 | * Filters denote which functions should be enabled when tracing is enabled | |
f2cc020d | 6225 | * If @ip is NULL, it fails to update filter. |
8be9fbd5 MR |
6226 | * |
6227 | * This can allocate memory which must be freed before @ops can be freed, | |
6228 | * either by removing each filtered addr or by using | |
6229 | * ftrace_free_filter(@ops). | |
647664ea MH |
6230 | */ |
6231 | int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, | |
6232 | int remove, int reset) | |
6233 | { | |
f04f24fb | 6234 | ftrace_ops_init(ops); |
4f554e95 | 6235 | return ftrace_set_addr(ops, &ip, 1, remove, reset, 1); |
647664ea MH |
6236 | } |
6237 | EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); | |
6238 | ||
4f554e95 JO |
6239 | /** |
6240 | * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses | |
d1530413 RD |
6241 | * @ops: the ops to set the filter with |
6242 | * @ips: the array of addresses to add to or remove from the filter. | |
6243 | * @cnt: the number of addresses in @ips | |
6244 | * @remove: non zero to remove ips from the filter | |
6245 | * @reset: non zero to reset all filters before applying this filter. | |
4f554e95 JO |
6246 | * |
6247 | * Filters denote which functions should be enabled when tracing is enabled | |
6248 | * If @ips array or any ip specified within is NULL , it fails to update filter. | |
8be9fbd5 MR |
6249 | * |
6250 | * This can allocate memory which must be freed before @ops can be freed, | |
6251 | * either by removing each filtered addr or by using | |
6252 | * ftrace_free_filter(@ops). | |
6253 | */ | |
4f554e95 JO |
6254 | int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips, |
6255 | unsigned int cnt, int remove, int reset) | |
6256 | { | |
6257 | ftrace_ops_init(ops); | |
6258 | return ftrace_set_addr(ops, ips, cnt, remove, reset, 1); | |
6259 | } | |
6260 | EXPORT_SYMBOL_GPL(ftrace_set_filter_ips); | |
6261 | ||
d032ae89 JF |
6262 | /** |
6263 | * ftrace_ops_set_global_filter - setup ops to use global filters | |
d1530413 | 6264 | * @ops: the ops which will use the global filters |
d032ae89 JF |
6265 | * |
6266 | * ftrace users who need global function trace filtering should call this. | |
6267 | * It can set the global filter only if ops were not initialized before. | |
6268 | */ | |
6269 | void ftrace_ops_set_global_filter(struct ftrace_ops *ops) | |
6270 | { | |
6271 | if (ops->flags & FTRACE_OPS_FL_INITIALIZED) | |
6272 | return; | |
6273 | ||
6274 | ftrace_ops_init(ops); | |
6275 | ops->func_hash = &global_ops.local_hash; | |
6276 | } | |
6277 | EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter); | |
6278 | ||
647664ea MH |
6279 | static int |
6280 | ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, | |
6281 | int reset, int enable) | |
6282 | { | |
31f505dc SR |
6283 | char *mod = NULL, *func, *command, *next = buf; |
6284 | char *tmp __free(kfree) = NULL; | |
6285 | struct trace_array *tr = ops->private; | |
6286 | int ret; | |
6287 | ||
6288 | func = strsep(&next, ":"); | |
6289 | ||
6290 | /* This can also handle :mod: parsing */ | |
6291 | if (next) { | |
6292 | if (!tr) | |
6293 | return -EINVAL; | |
6294 | ||
6295 | command = strsep(&next, ":"); | |
6296 | if (strcmp(command, "mod") != 0) | |
6297 | return -EINVAL; | |
6298 | ||
6299 | mod = next; | |
6300 | len = command - func; | |
6301 | /* Save the original func as ftrace_set_hash() can modify it */ | |
6302 | tmp = kstrdup(func, GFP_KERNEL); | |
6303 | } | |
6304 | ||
6305 | ret = ftrace_set_hash(ops, func, len, NULL, 0, 0, reset, enable, mod); | |
6306 | ||
6307 | if (tr && mod && ret < 0) { | |
6308 | /* Did tmp fail to allocate? */ | |
6309 | if (!tmp) | |
6310 | return -ENOMEM; | |
6311 | ret = cache_mod(tr, tmp, mod, enable); | |
6312 | } | |
6313 | ||
6314 | return ret; | |
647664ea MH |
6315 | } |
6316 | ||
77a2b37d SR |
6317 | /** |
6318 | * ftrace_set_filter - set a function to filter on in ftrace | |
d1530413 RD |
6319 | * @ops: the ops to set the filter with |
6320 | * @buf: the string that holds the function filter text. | |
6321 | * @len: the length of the string. | |
6322 | * @reset: non-zero to reset all filters before applying this filter. | |
936e074b SR |
6323 | * |
6324 | * Filters denote which functions should be enabled when tracing is enabled. | |
6325 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. | |
8be9fbd5 MR |
6326 | * |
6327 | * This can allocate memory which must be freed before @ops can be freed, | |
6328 | * either by removing each filtered addr or by using | |
6329 | * ftrace_free_filter(@ops). | |
936e074b | 6330 | */ |
ac483c44 | 6331 | int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
936e074b SR |
6332 | int len, int reset) |
6333 | { | |
f04f24fb | 6334 | ftrace_ops_init(ops); |
ac483c44 | 6335 | return ftrace_set_regex(ops, buf, len, reset, 1); |
936e074b SR |
6336 | } |
6337 | EXPORT_SYMBOL_GPL(ftrace_set_filter); | |
6338 | ||
6339 | /** | |
6340 | * ftrace_set_notrace - set a function to not trace in ftrace | |
d1530413 RD |
6341 | * @ops: the ops to set the notrace filter with |
6342 | * @buf: the string that holds the function notrace text. | |
6343 | * @len: the length of the string. | |
6344 | * @reset: non-zero to reset all filters before applying this filter. | |
936e074b SR |
6345 | * |
6346 | * Notrace Filters denote which functions should not be enabled when tracing | |
6347 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled | |
6348 | * for tracing. | |
8be9fbd5 MR |
6349 | * |
6350 | * This can allocate memory which must be freed before @ops can be freed, | |
6351 | * either by removing each filtered addr or by using | |
6352 | * ftrace_free_filter(@ops). | |
936e074b | 6353 | */ |
ac483c44 | 6354 | int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, |
936e074b SR |
6355 | int len, int reset) |
6356 | { | |
f04f24fb | 6357 | ftrace_ops_init(ops); |
ac483c44 | 6358 | return ftrace_set_regex(ops, buf, len, reset, 0); |
936e074b SR |
6359 | } |
6360 | EXPORT_SYMBOL_GPL(ftrace_set_notrace); | |
6361 | /** | |
8d1b065d | 6362 | * ftrace_set_global_filter - set a function to filter on with global tracers |
d1530413 RD |
6363 | * @buf: the string that holds the function filter text. |
6364 | * @len: the length of the string. | |
6365 | * @reset: non-zero to reset all filters before applying this filter. | |
77a2b37d SR |
6366 | * |
6367 | * Filters denote which functions should be enabled when tracing is enabled. | |
6368 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. | |
6369 | */ | |
936e074b | 6370 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset) |
77a2b37d | 6371 | { |
f45948e8 | 6372 | ftrace_set_regex(&global_ops, buf, len, reset, 1); |
41c52c0d | 6373 | } |
936e074b | 6374 | EXPORT_SYMBOL_GPL(ftrace_set_global_filter); |
4eebcc81 | 6375 | |
41c52c0d | 6376 | /** |
8d1b065d | 6377 | * ftrace_set_global_notrace - set a function to not trace with global tracers |
d1530413 RD |
6378 | * @buf: the string that holds the function notrace text. |
6379 | * @len: the length of the string. | |
6380 | * @reset: non-zero to reset all filters before applying this filter. | |
41c52c0d SR |
6381 | * |
6382 | * Notrace Filters denote which functions should not be enabled when tracing | |
6383 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled | |
6384 | * for tracing. | |
6385 | */ | |
936e074b | 6386 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) |
41c52c0d | 6387 | { |
f45948e8 | 6388 | ftrace_set_regex(&global_ops, buf, len, reset, 0); |
77a2b37d | 6389 | } |
936e074b | 6390 | EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); |
77a2b37d | 6391 | |
2af15d6a SR |
6392 | /* |
6393 | * command line interface to allow users to set filters on boot up. | |
6394 | */ | |
6395 | #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE | |
6396 | static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; | |
6397 | static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; | |
6398 | ||
f1ed7c74 SRRH |
6399 | /* Used by function selftest to not test if filter is set */ |
6400 | bool ftrace_filter_param __initdata; | |
6401 | ||
2af15d6a SR |
6402 | static int __init set_ftrace_notrace(char *str) |
6403 | { | |
f1ed7c74 | 6404 | ftrace_filter_param = true; |
d0c2d66f | 6405 | strscpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); |
2af15d6a SR |
6406 | return 1; |
6407 | } | |
6408 | __setup("ftrace_notrace=", set_ftrace_notrace); | |
6409 | ||
6410 | static int __init set_ftrace_filter(char *str) | |
6411 | { | |
f1ed7c74 | 6412 | ftrace_filter_param = true; |
d0c2d66f | 6413 | strscpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); |
2af15d6a SR |
6414 | return 1; |
6415 | } | |
6416 | __setup("ftrace_filter=", set_ftrace_filter); | |
6417 | ||
369bc18f | 6418 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
f6060f46 | 6419 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; |
0d7d9a16 | 6420 | static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; |
b9b0c831 | 6421 | static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer); |
801c29fd | 6422 | |
369bc18f SA |
6423 | static int __init set_graph_function(char *str) |
6424 | { | |
d0c2d66f | 6425 | strscpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); |
369bc18f SA |
6426 | return 1; |
6427 | } | |
6428 | __setup("ftrace_graph_filter=", set_graph_function); | |
6429 | ||
0d7d9a16 NK |
6430 | static int __init set_graph_notrace_function(char *str) |
6431 | { | |
d0c2d66f | 6432 | strscpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE); |
0d7d9a16 NK |
6433 | return 1; |
6434 | } | |
6435 | __setup("ftrace_graph_notrace=", set_graph_notrace_function); | |
6436 | ||
65a50c65 TB |
6437 | static int __init set_graph_max_depth_function(char *str) |
6438 | { | |
c5963a09 | 6439 | if (!str || kstrtouint(str, 0, &fgraph_max_depth)) |
65a50c65 | 6440 | return 0; |
65a50c65 TB |
6441 | return 1; |
6442 | } | |
6443 | __setup("ftrace_graph_max_depth=", set_graph_max_depth_function); | |
0d7d9a16 NK |
6444 | |
6445 | static void __init set_ftrace_early_graph(char *buf, int enable) | |
369bc18f SA |
6446 | { |
6447 | int ret; | |
6448 | char *func; | |
b9b0c831 | 6449 | struct ftrace_hash *hash; |
0d7d9a16 | 6450 | |
92ad18ec | 6451 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); |
24589e3a | 6452 | if (MEM_FAIL(!hash, "Failed to allocate hash\n")) |
92ad18ec | 6453 | return; |
369bc18f SA |
6454 | |
6455 | while (buf) { | |
6456 | func = strsep(&buf, ","); | |
6457 | /* we allow only one expression at a time */ | |
b9b0c831 | 6458 | ret = ftrace_graph_set_hash(hash, func); |
369bc18f SA |
6459 | if (ret) |
6460 | printk(KERN_DEBUG "ftrace: function %s not " | |
6461 | "traceable\n", func); | |
6462 | } | |
92ad18ec SRV |
6463 | |
6464 | if (enable) | |
6465 | ftrace_graph_hash = hash; | |
6466 | else | |
6467 | ftrace_graph_notrace_hash = hash; | |
369bc18f SA |
6468 | } |
6469 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
6470 | ||
2a85a37f SR |
6471 | void __init |
6472 | ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable) | |
2af15d6a SR |
6473 | { |
6474 | char *func; | |
6475 | ||
f04f24fb MH |
6476 | ftrace_ops_init(ops); |
6477 | ||
31f505dc SR |
6478 | /* The trace_array is needed for caching module function filters */ |
6479 | if (!ops->private) { | |
6480 | struct trace_array *tr = trace_get_global_array(); | |
6481 | ||
6482 | ops->private = tr; | |
6483 | ftrace_init_trace_array(tr); | |
6484 | } | |
6485 | ||
2af15d6a SR |
6486 | while (buf) { |
6487 | func = strsep(&buf, ","); | |
f45948e8 | 6488 | ftrace_set_regex(ops, func, strlen(func), 0, enable); |
2af15d6a SR |
6489 | } |
6490 | } | |
6491 | ||
6492 | static void __init set_ftrace_early_filters(void) | |
6493 | { | |
6494 | if (ftrace_filter_buf[0]) | |
2a85a37f | 6495 | ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1); |
2af15d6a | 6496 | if (ftrace_notrace_buf[0]) |
2a85a37f | 6497 | ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0); |
369bc18f SA |
6498 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
6499 | if (ftrace_graph_buf[0]) | |
0d7d9a16 NK |
6500 | set_ftrace_early_graph(ftrace_graph_buf, 1); |
6501 | if (ftrace_graph_notrace_buf[0]) | |
6502 | set_ftrace_early_graph(ftrace_graph_notrace_buf, 0); | |
369bc18f | 6503 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
2af15d6a SR |
6504 | } |
6505 | ||
fc13cb0c | 6506 | int ftrace_regex_release(struct inode *inode, struct file *file) |
5072c59f SR |
6507 | { |
6508 | struct seq_file *m = (struct seq_file *)file->private_data; | |
6509 | struct ftrace_iterator *iter; | |
33dc9b12 | 6510 | struct ftrace_hash **orig_hash; |
689fd8b6 | 6511 | struct trace_parser *parser; |
ed926f9b | 6512 | int filter_hash; |
5072c59f | 6513 | |
5072c59f SR |
6514 | if (file->f_mode & FMODE_READ) { |
6515 | iter = m->private; | |
5072c59f SR |
6516 | seq_release(inode, file); |
6517 | } else | |
6518 | iter = file->private_data; | |
6519 | ||
689fd8b6 | 6520 | parser = &iter->parser; |
6521 | if (trace_parser_loaded(parser)) { | |
8c9af478 SRV |
6522 | int enable = !(iter->flags & FTRACE_ITER_NOTRACE); |
6523 | ||
6524 | ftrace_process_regex(iter, parser->buffer, | |
6525 | parser->idx, enable); | |
5072c59f SR |
6526 | } |
6527 | ||
689fd8b6 | 6528 | trace_parser_put(parser); |
689fd8b6 | 6529 | |
33b7f99c | 6530 | mutex_lock(&iter->ops->func_hash->regex_lock); |
3f2367ba | 6531 | |
058e297d | 6532 | if (file->f_mode & FMODE_WRITE) { |
ed926f9b SR |
6533 | filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); |
6534 | ||
8c08f0d5 | 6535 | if (filter_hash) { |
33b7f99c | 6536 | orig_hash = &iter->ops->func_hash->filter_hash; |
0ce0638e ZY |
6537 | if (iter->tr) { |
6538 | if (list_empty(&iter->tr->mod_trace)) | |
6539 | iter->hash->flags &= ~FTRACE_HASH_FL_MOD; | |
6540 | else | |
6541 | iter->hash->flags |= FTRACE_HASH_FL_MOD; | |
6542 | } | |
8c08f0d5 | 6543 | } else |
33b7f99c | 6544 | orig_hash = &iter->ops->func_hash->notrace_hash; |
33dc9b12 | 6545 | |
058e297d | 6546 | mutex_lock(&ftrace_lock); |
045e269c | 6547 | ftrace_hash_move_and_update_ops(iter->ops, orig_hash, |
e16b35dd | 6548 | iter->hash, filter_hash); |
058e297d | 6549 | mutex_unlock(&ftrace_lock); |
c20489da SRV |
6550 | } else { |
6551 | /* For read only, the hash is the ops hash */ | |
6552 | iter->hash = NULL; | |
058e297d | 6553 | } |
3f2367ba | 6554 | |
33b7f99c | 6555 | mutex_unlock(&iter->ops->func_hash->regex_lock); |
33dc9b12 | 6556 | free_ftrace_hash(iter->hash); |
9ef16693 SRV |
6557 | if (iter->tr) |
6558 | trace_array_put(iter->tr); | |
33dc9b12 | 6559 | kfree(iter); |
058e297d | 6560 | |
5072c59f SR |
6561 | return 0; |
6562 | } | |
6563 | ||
5e2336a0 | 6564 | static const struct file_operations ftrace_avail_fops = { |
5072c59f SR |
6565 | .open = ftrace_avail_open, |
6566 | .read = seq_read, | |
6567 | .llseek = seq_lseek, | |
3be04b47 | 6568 | .release = seq_release_private, |
5072c59f SR |
6569 | }; |
6570 | ||
647bcd03 SR |
6571 | static const struct file_operations ftrace_enabled_fops = { |
6572 | .open = ftrace_enabled_open, | |
6573 | .read = seq_read, | |
6574 | .llseek = seq_lseek, | |
6575 | .release = seq_release_private, | |
6576 | }; | |
6577 | ||
e11b521a SRG |
6578 | static const struct file_operations ftrace_touched_fops = { |
6579 | .open = ftrace_touched_open, | |
6580 | .read = seq_read, | |
6581 | .llseek = seq_lseek, | |
6582 | .release = seq_release_private, | |
6583 | }; | |
6584 | ||
83f74441 JO |
6585 | static const struct file_operations ftrace_avail_addrs_fops = { |
6586 | .open = ftrace_avail_addrs_open, | |
6587 | .read = seq_read, | |
6588 | .llseek = seq_lseek, | |
6589 | .release = seq_release_private, | |
6590 | }; | |
6591 | ||
5e2336a0 | 6592 | static const struct file_operations ftrace_filter_fops = { |
5072c59f | 6593 | .open = ftrace_filter_open, |
850a80cf | 6594 | .read = seq_read, |
5072c59f | 6595 | .write = ftrace_filter_write, |
098c879e | 6596 | .llseek = tracing_lseek, |
1cf41dd7 | 6597 | .release = ftrace_regex_release, |
5072c59f SR |
6598 | }; |
6599 | ||
5e2336a0 | 6600 | static const struct file_operations ftrace_notrace_fops = { |
41c52c0d | 6601 | .open = ftrace_notrace_open, |
850a80cf | 6602 | .read = seq_read, |
41c52c0d | 6603 | .write = ftrace_notrace_write, |
098c879e | 6604 | .llseek = tracing_lseek, |
1cf41dd7 | 6605 | .release = ftrace_regex_release, |
41c52c0d SR |
6606 | }; |
6607 | ||
ea4e2bc4 SR |
6608 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
6609 | ||
6610 | static DEFINE_MUTEX(graph_lock); | |
6611 | ||
24a9729f | 6612 | struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH; |
fd0e6852 | 6613 | struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH; |
b9b0c831 NK |
6614 | |
6615 | enum graph_filter_type { | |
6616 | GRAPH_FILTER_NOTRACE = 0, | |
6617 | GRAPH_FILTER_FUNCTION, | |
6618 | }; | |
ea4e2bc4 | 6619 | |
555fc781 SRV |
6620 | #define FTRACE_GRAPH_EMPTY ((void *)1) |
6621 | ||
faf982a6 | 6622 | struct ftrace_graph_data { |
e704eff3 SRV |
6623 | struct ftrace_hash *hash; |
6624 | struct ftrace_func_entry *entry; | |
6625 | int idx; /* for hash table iteration */ | |
6626 | enum graph_filter_type type; | |
6627 | struct ftrace_hash *new_hash; | |
6628 | const struct seq_operations *seq_ops; | |
6629 | struct trace_parser parser; | |
faf982a6 NK |
6630 | }; |
6631 | ||
ea4e2bc4 | 6632 | static void * |
85951842 | 6633 | __g_next(struct seq_file *m, loff_t *pos) |
ea4e2bc4 | 6634 | { |
faf982a6 | 6635 | struct ftrace_graph_data *fgd = m->private; |
b9b0c831 NK |
6636 | struct ftrace_func_entry *entry = fgd->entry; |
6637 | struct hlist_head *head; | |
6638 | int i, idx = fgd->idx; | |
faf982a6 | 6639 | |
b9b0c831 | 6640 | if (*pos >= fgd->hash->count) |
ea4e2bc4 | 6641 | return NULL; |
b9b0c831 NK |
6642 | |
6643 | if (entry) { | |
6644 | hlist_for_each_entry_continue(entry, hlist) { | |
6645 | fgd->entry = entry; | |
6646 | return entry; | |
6647 | } | |
6648 | ||
6649 | idx++; | |
6650 | } | |
6651 | ||
6652 | for (i = idx; i < 1 << fgd->hash->size_bits; i++) { | |
6653 | head = &fgd->hash->buckets[i]; | |
6654 | hlist_for_each_entry(entry, head, hlist) { | |
6655 | fgd->entry = entry; | |
6656 | fgd->idx = i; | |
6657 | return entry; | |
6658 | } | |
6659 | } | |
6660 | return NULL; | |
85951842 | 6661 | } |
ea4e2bc4 | 6662 | |
85951842 LZ |
6663 | static void * |
6664 | g_next(struct seq_file *m, void *v, loff_t *pos) | |
6665 | { | |
6666 | (*pos)++; | |
6667 | return __g_next(m, pos); | |
ea4e2bc4 SR |
6668 | } |
6669 | ||
6670 | static void *g_start(struct seq_file *m, loff_t *pos) | |
6671 | { | |
faf982a6 NK |
6672 | struct ftrace_graph_data *fgd = m->private; |
6673 | ||
ea4e2bc4 SR |
6674 | mutex_lock(&graph_lock); |
6675 | ||
649b988b SRV |
6676 | if (fgd->type == GRAPH_FILTER_FUNCTION) |
6677 | fgd->hash = rcu_dereference_protected(ftrace_graph_hash, | |
6678 | lockdep_is_held(&graph_lock)); | |
6679 | else | |
6680 | fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, | |
6681 | lockdep_is_held(&graph_lock)); | |
6682 | ||
f9349a8f | 6683 | /* Nothing, tell g_show to print all functions are enabled */ |
b9b0c831 | 6684 | if (ftrace_hash_empty(fgd->hash) && !*pos) |
555fc781 | 6685 | return FTRACE_GRAPH_EMPTY; |
f9349a8f | 6686 | |
b9b0c831 NK |
6687 | fgd->idx = 0; |
6688 | fgd->entry = NULL; | |
85951842 | 6689 | return __g_next(m, pos); |
ea4e2bc4 SR |
6690 | } |
6691 | ||
6692 | static void g_stop(struct seq_file *m, void *p) | |
6693 | { | |
6694 | mutex_unlock(&graph_lock); | |
6695 | } | |
6696 | ||
6697 | static int g_show(struct seq_file *m, void *v) | |
6698 | { | |
b9b0c831 | 6699 | struct ftrace_func_entry *entry = v; |
ea4e2bc4 | 6700 | |
b9b0c831 | 6701 | if (!entry) |
ea4e2bc4 SR |
6702 | return 0; |
6703 | ||
555fc781 | 6704 | if (entry == FTRACE_GRAPH_EMPTY) { |
280d1429 NK |
6705 | struct ftrace_graph_data *fgd = m->private; |
6706 | ||
b9b0c831 | 6707 | if (fgd->type == GRAPH_FILTER_FUNCTION) |
fa6f0cc7 | 6708 | seq_puts(m, "#### all functions enabled ####\n"); |
280d1429 | 6709 | else |
fa6f0cc7 | 6710 | seq_puts(m, "#### no functions disabled ####\n"); |
f9349a8f FW |
6711 | return 0; |
6712 | } | |
6713 | ||
b9b0c831 | 6714 | seq_printf(m, "%ps\n", (void *)entry->ip); |
ea4e2bc4 SR |
6715 | |
6716 | return 0; | |
6717 | } | |
6718 | ||
88e9d34c | 6719 | static const struct seq_operations ftrace_graph_seq_ops = { |
ea4e2bc4 SR |
6720 | .start = g_start, |
6721 | .next = g_next, | |
6722 | .stop = g_stop, | |
6723 | .show = g_show, | |
6724 | }; | |
6725 | ||
6726 | static int | |
faf982a6 NK |
6727 | __ftrace_graph_open(struct inode *inode, struct file *file, |
6728 | struct ftrace_graph_data *fgd) | |
ea4e2bc4 | 6729 | { |
17911ff3 | 6730 | int ret; |
b9b0c831 | 6731 | struct ftrace_hash *new_hash = NULL; |
ea4e2bc4 | 6732 | |
17911ff3 SRV |
6733 | ret = security_locked_down(LOCKDOWN_TRACEFS); |
6734 | if (ret) | |
6735 | return ret; | |
6736 | ||
b9b0c831 NK |
6737 | if (file->f_mode & FMODE_WRITE) { |
6738 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; | |
6739 | ||
e704eff3 SRV |
6740 | if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX)) |
6741 | return -ENOMEM; | |
6742 | ||
b9b0c831 NK |
6743 | if (file->f_flags & O_TRUNC) |
6744 | new_hash = alloc_ftrace_hash(size_bits); | |
6745 | else | |
6746 | new_hash = alloc_and_copy_ftrace_hash(size_bits, | |
6747 | fgd->hash); | |
6748 | if (!new_hash) { | |
6749 | ret = -ENOMEM; | |
6750 | goto out; | |
6751 | } | |
ea4e2bc4 SR |
6752 | } |
6753 | ||
faf982a6 | 6754 | if (file->f_mode & FMODE_READ) { |
b9b0c831 | 6755 | ret = seq_open(file, &ftrace_graph_seq_ops); |
faf982a6 NK |
6756 | if (!ret) { |
6757 | struct seq_file *m = file->private_data; | |
6758 | m->private = fgd; | |
b9b0c831 NK |
6759 | } else { |
6760 | /* Failed */ | |
6761 | free_ftrace_hash(new_hash); | |
6762 | new_hash = NULL; | |
faf982a6 NK |
6763 | } |
6764 | } else | |
6765 | file->private_data = fgd; | |
ea4e2bc4 | 6766 | |
b9b0c831 | 6767 | out: |
e704eff3 SRV |
6768 | if (ret < 0 && file->f_mode & FMODE_WRITE) |
6769 | trace_parser_put(&fgd->parser); | |
6770 | ||
b9b0c831 | 6771 | fgd->new_hash = new_hash; |
649b988b SRV |
6772 | |
6773 | /* | |
6774 | * All uses of fgd->hash must be taken with the graph_lock | |
6775 | * held. The graph_lock is going to be released, so force | |
6776 | * fgd->hash to be reinitialized when it is taken again. | |
6777 | */ | |
6778 | fgd->hash = NULL; | |
6779 | ||
ea4e2bc4 SR |
6780 | return ret; |
6781 | } | |
6782 | ||
faf982a6 NK |
6783 | static int |
6784 | ftrace_graph_open(struct inode *inode, struct file *file) | |
6785 | { | |
6786 | struct ftrace_graph_data *fgd; | |
b9b0c831 | 6787 | int ret; |
faf982a6 NK |
6788 | |
6789 | if (unlikely(ftrace_disabled)) | |
6790 | return -ENODEV; | |
6791 | ||
6792 | fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); | |
6793 | if (fgd == NULL) | |
6794 | return -ENOMEM; | |
6795 | ||
b9b0c831 NK |
6796 | mutex_lock(&graph_lock); |
6797 | ||
649b988b SRV |
6798 | fgd->hash = rcu_dereference_protected(ftrace_graph_hash, |
6799 | lockdep_is_held(&graph_lock)); | |
b9b0c831 | 6800 | fgd->type = GRAPH_FILTER_FUNCTION; |
faf982a6 NK |
6801 | fgd->seq_ops = &ftrace_graph_seq_ops; |
6802 | ||
b9b0c831 NK |
6803 | ret = __ftrace_graph_open(inode, file, fgd); |
6804 | if (ret < 0) | |
6805 | kfree(fgd); | |
6806 | ||
6807 | mutex_unlock(&graph_lock); | |
6808 | return ret; | |
faf982a6 NK |
6809 | } |
6810 | ||
29ad23b0 NK |
6811 | static int |
6812 | ftrace_graph_notrace_open(struct inode *inode, struct file *file) | |
6813 | { | |
6814 | struct ftrace_graph_data *fgd; | |
b9b0c831 | 6815 | int ret; |
29ad23b0 NK |
6816 | |
6817 | if (unlikely(ftrace_disabled)) | |
6818 | return -ENODEV; | |
6819 | ||
6820 | fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); | |
6821 | if (fgd == NULL) | |
6822 | return -ENOMEM; | |
6823 | ||
b9b0c831 NK |
6824 | mutex_lock(&graph_lock); |
6825 | ||
649b988b SRV |
6826 | fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, |
6827 | lockdep_is_held(&graph_lock)); | |
b9b0c831 | 6828 | fgd->type = GRAPH_FILTER_NOTRACE; |
29ad23b0 NK |
6829 | fgd->seq_ops = &ftrace_graph_seq_ops; |
6830 | ||
b9b0c831 NK |
6831 | ret = __ftrace_graph_open(inode, file, fgd); |
6832 | if (ret < 0) | |
6833 | kfree(fgd); | |
6834 | ||
6835 | mutex_unlock(&graph_lock); | |
6836 | return ret; | |
29ad23b0 NK |
6837 | } |
6838 | ||
87827111 LZ |
6839 | static int |
6840 | ftrace_graph_release(struct inode *inode, struct file *file) | |
6841 | { | |
b9b0c831 | 6842 | struct ftrace_graph_data *fgd; |
e704eff3 SRV |
6843 | struct ftrace_hash *old_hash, *new_hash; |
6844 | struct trace_parser *parser; | |
6845 | int ret = 0; | |
b9b0c831 | 6846 | |
faf982a6 NK |
6847 | if (file->f_mode & FMODE_READ) { |
6848 | struct seq_file *m = file->private_data; | |
6849 | ||
b9b0c831 | 6850 | fgd = m->private; |
87827111 | 6851 | seq_release(inode, file); |
faf982a6 | 6852 | } else { |
b9b0c831 | 6853 | fgd = file->private_data; |
faf982a6 NK |
6854 | } |
6855 | ||
e704eff3 SRV |
6856 | |
6857 | if (file->f_mode & FMODE_WRITE) { | |
6858 | ||
6859 | parser = &fgd->parser; | |
6860 | ||
6861 | if (trace_parser_loaded((parser))) { | |
e704eff3 SRV |
6862 | ret = ftrace_graph_set_hash(fgd->new_hash, |
6863 | parser->buffer); | |
6864 | } | |
6865 | ||
6866 | trace_parser_put(parser); | |
6867 | ||
6868 | new_hash = __ftrace_hash_move(fgd->new_hash); | |
6869 | if (!new_hash) { | |
6870 | ret = -ENOMEM; | |
6871 | goto out; | |
6872 | } | |
6873 | ||
6874 | mutex_lock(&graph_lock); | |
6875 | ||
6876 | if (fgd->type == GRAPH_FILTER_FUNCTION) { | |
6877 | old_hash = rcu_dereference_protected(ftrace_graph_hash, | |
6878 | lockdep_is_held(&graph_lock)); | |
6879 | rcu_assign_pointer(ftrace_graph_hash, new_hash); | |
6880 | } else { | |
6881 | old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, | |
6882 | lockdep_is_held(&graph_lock)); | |
6883 | rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash); | |
6884 | } | |
6885 | ||
6886 | mutex_unlock(&graph_lock); | |
6887 | ||
54a16ff6 SRV |
6888 | /* |
6889 | * We need to do a hard force of sched synchronization. | |
6890 | * This is because we use preempt_disable() to do RCU, but | |
6891 | * the function tracers can be called where RCU is not watching | |
6892 | * (like before user_exit()). We can not rely on the RCU | |
6893 | * infrastructure to do the synchronization, thus we must do it | |
6894 | * ourselves. | |
6895 | */ | |
68e83498 NSJ |
6896 | if (old_hash != EMPTY_HASH) |
6897 | synchronize_rcu_tasks_rude(); | |
e704eff3 SRV |
6898 | |
6899 | free_ftrace_hash(old_hash); | |
6900 | } | |
6901 | ||
6902 | out: | |
f9797c2f | 6903 | free_ftrace_hash(fgd->new_hash); |
b9b0c831 NK |
6904 | kfree(fgd); |
6905 | ||
e704eff3 | 6906 | return ret; |
87827111 LZ |
6907 | } |
6908 | ||
ea4e2bc4 | 6909 | static int |
b9b0c831 | 6910 | ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer) |
ea4e2bc4 | 6911 | { |
3ba00929 | 6912 | struct ftrace_glob func_g; |
ea4e2bc4 SR |
6913 | struct dyn_ftrace *rec; |
6914 | struct ftrace_page *pg; | |
b9b0c831 | 6915 | struct ftrace_func_entry *entry; |
c7c6b1fe | 6916 | int fail = 1; |
3ba00929 | 6917 | int not; |
ea4e2bc4 | 6918 | |
f9349a8f | 6919 | /* decode regex */ |
3ba00929 DS |
6920 | func_g.type = filter_parse_regex(buffer, strlen(buffer), |
6921 | &func_g.search, ¬); | |
f9349a8f | 6922 | |
3ba00929 | 6923 | func_g.len = strlen(func_g.search); |
f9349a8f | 6924 | |
8b0cb3a4 | 6925 | guard(mutex)(&ftrace_lock); |
45a4a237 | 6926 | |
8b0cb3a4 | 6927 | if (unlikely(ftrace_disabled)) |
45a4a237 | 6928 | return -ENODEV; |
45a4a237 | 6929 | |
265c831c SR |
6930 | do_for_each_ftrace_rec(pg, rec) { |
6931 | ||
546fece4 SRRH |
6932 | if (rec->flags & FTRACE_FL_DISABLED) |
6933 | continue; | |
6934 | ||
0b507e1e | 6935 | if (ftrace_match_record(rec, &func_g, NULL, 0)) { |
b9b0c831 | 6936 | entry = ftrace_lookup_ip(hash, rec->ip); |
c7c6b1fe LZ |
6937 | |
6938 | if (!not) { | |
6939 | fail = 0; | |
b9b0c831 NK |
6940 | |
6941 | if (entry) | |
6942 | continue; | |
d05cb470 | 6943 | if (add_hash_entry(hash, rec->ip) == NULL) |
8b0cb3a4 | 6944 | return 0; |
c7c6b1fe | 6945 | } else { |
b9b0c831 NK |
6946 | if (entry) { |
6947 | free_hash_entry(hash, entry); | |
c7c6b1fe LZ |
6948 | fail = 0; |
6949 | } | |
6950 | } | |
ea4e2bc4 | 6951 | } |
42ea22e7 | 6952 | cond_resched(); |
265c831c | 6953 | } while_for_each_ftrace_rec(); |
ea4e2bc4 | 6954 | |
8b0cb3a4 | 6955 | return fail ? -EINVAL : 0; |
ea4e2bc4 SR |
6956 | } |
6957 | ||
6958 | static ssize_t | |
6959 | ftrace_graph_write(struct file *file, const char __user *ubuf, | |
6960 | size_t cnt, loff_t *ppos) | |
6961 | { | |
6a10108b | 6962 | ssize_t read, ret = 0; |
faf982a6 | 6963 | struct ftrace_graph_data *fgd = file->private_data; |
e704eff3 | 6964 | struct trace_parser *parser; |
ea4e2bc4 | 6965 | |
c7c6b1fe | 6966 | if (!cnt) |
ea4e2bc4 SR |
6967 | return 0; |
6968 | ||
ae98d27a SRV |
6969 | /* Read mode uses seq functions */ |
6970 | if (file->f_mode & FMODE_READ) { | |
6971 | struct seq_file *m = file->private_data; | |
6972 | fgd = m->private; | |
6973 | } | |
6974 | ||
e704eff3 | 6975 | parser = &fgd->parser; |
ea4e2bc4 | 6976 | |
e704eff3 | 6977 | read = trace_get_user(parser, ubuf, cnt, ppos); |
689fd8b6 | 6978 | |
e704eff3 SRV |
6979 | if (read >= 0 && trace_parser_loaded(parser) && |
6980 | !trace_parser_cont(parser)) { | |
6a10108b | 6981 | |
b9b0c831 | 6982 | ret = ftrace_graph_set_hash(fgd->new_hash, |
e704eff3 SRV |
6983 | parser->buffer); |
6984 | trace_parser_clear(parser); | |
ea4e2bc4 | 6985 | } |
ea4e2bc4 | 6986 | |
6a10108b NK |
6987 | if (!ret) |
6988 | ret = read; | |
1eb90f13 | 6989 | |
ea4e2bc4 SR |
6990 | return ret; |
6991 | } | |
6992 | ||
6993 | static const struct file_operations ftrace_graph_fops = { | |
87827111 LZ |
6994 | .open = ftrace_graph_open, |
6995 | .read = seq_read, | |
6996 | .write = ftrace_graph_write, | |
098c879e | 6997 | .llseek = tracing_lseek, |
87827111 | 6998 | .release = ftrace_graph_release, |
ea4e2bc4 | 6999 | }; |
29ad23b0 NK |
7000 | |
7001 | static const struct file_operations ftrace_graph_notrace_fops = { | |
7002 | .open = ftrace_graph_notrace_open, | |
7003 | .read = seq_read, | |
7004 | .write = ftrace_graph_write, | |
098c879e | 7005 | .llseek = tracing_lseek, |
29ad23b0 NK |
7006 | .release = ftrace_graph_release, |
7007 | }; | |
ea4e2bc4 SR |
7008 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
7009 | ||
591dffda SRRH |
7010 | void ftrace_create_filter_files(struct ftrace_ops *ops, |
7011 | struct dentry *parent) | |
7012 | { | |
7013 | ||
21ccc9cd | 7014 | trace_create_file("set_ftrace_filter", TRACE_MODE_WRITE, parent, |
591dffda SRRH |
7015 | ops, &ftrace_filter_fops); |
7016 | ||
21ccc9cd | 7017 | trace_create_file("set_ftrace_notrace", TRACE_MODE_WRITE, parent, |
591dffda SRRH |
7018 | ops, &ftrace_notrace_fops); |
7019 | } | |
7020 | ||
7021 | /* | |
7022 | * The name "destroy_filter_files" is really a misnomer. Although | |
9efb85c5 | 7023 | * in the future, it may actually delete the files, but this is |
591dffda SRRH |
7024 | * really intended to make sure the ops passed in are disabled |
7025 | * and that when this function returns, the caller is free to | |
7026 | * free the ops. | |
7027 | * | |
7028 | * The "destroy" name is only to match the "create" name that this | |
7029 | * should be paired with. | |
7030 | */ | |
7031 | void ftrace_destroy_filter_files(struct ftrace_ops *ops) | |
7032 | { | |
7033 | mutex_lock(&ftrace_lock); | |
7034 | if (ops->flags & FTRACE_OPS_FL_ENABLED) | |
7035 | ftrace_shutdown(ops, 0); | |
7036 | ops->flags |= FTRACE_OPS_FL_DELETED; | |
2840f84f | 7037 | ftrace_free_filter(ops); |
591dffda SRRH |
7038 | mutex_unlock(&ftrace_lock); |
7039 | } | |
7040 | ||
8434dc93 | 7041 | static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer) |
5072c59f | 7042 | { |
5072c59f | 7043 | |
21ccc9cd | 7044 | trace_create_file("available_filter_functions", TRACE_MODE_READ, |
5452af66 | 7045 | d_tracer, NULL, &ftrace_avail_fops); |
5072c59f | 7046 | |
83f74441 JO |
7047 | trace_create_file("available_filter_functions_addrs", TRACE_MODE_READ, |
7048 | d_tracer, NULL, &ftrace_avail_addrs_fops); | |
7049 | ||
21ccc9cd | 7050 | trace_create_file("enabled_functions", TRACE_MODE_READ, |
647bcd03 SR |
7051 | d_tracer, NULL, &ftrace_enabled_fops); |
7052 | ||
e11b521a SRG |
7053 | trace_create_file("touched_functions", TRACE_MODE_READ, |
7054 | d_tracer, NULL, &ftrace_touched_fops); | |
7055 | ||
591dffda | 7056 | ftrace_create_filter_files(&global_ops, d_tracer); |
ad90c0e3 | 7057 | |
ea4e2bc4 | 7058 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
21ccc9cd | 7059 | trace_create_file("set_graph_function", TRACE_MODE_WRITE, d_tracer, |
ea4e2bc4 SR |
7060 | NULL, |
7061 | &ftrace_graph_fops); | |
21ccc9cd | 7062 | trace_create_file("set_graph_notrace", TRACE_MODE_WRITE, d_tracer, |
29ad23b0 NK |
7063 | NULL, |
7064 | &ftrace_graph_notrace_fops); | |
ea4e2bc4 SR |
7065 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
7066 | ||
5072c59f SR |
7067 | return 0; |
7068 | } | |
7069 | ||
9fd49328 | 7070 | static int ftrace_cmp_ips(const void *a, const void *b) |
68950619 | 7071 | { |
9fd49328 SR |
7072 | const unsigned long *ipa = a; |
7073 | const unsigned long *ipb = b; | |
68950619 | 7074 | |
9fd49328 SR |
7075 | if (*ipa > *ipb) |
7076 | return 1; | |
7077 | if (*ipa < *ipb) | |
7078 | return -1; | |
7079 | return 0; | |
7080 | } | |
7081 | ||
8147dc78 SRV |
7082 | #ifdef CONFIG_FTRACE_SORT_STARTUP_TEST |
7083 | static void test_is_sorted(unsigned long *start, unsigned long count) | |
7084 | { | |
7085 | int i; | |
7086 | ||
7087 | for (i = 1; i < count; i++) { | |
7088 | if (WARN(start[i - 1] > start[i], | |
7089 | "[%d] %pS at %lx is not sorted with %pS at %lx\n", i, | |
7090 | (void *)start[i - 1], start[i - 1], | |
7091 | (void *)start[i], start[i])) | |
7092 | break; | |
7093 | } | |
7094 | if (i == count) | |
7095 | pr_info("ftrace section at %px sorted properly\n", start); | |
7096 | } | |
7097 | #else | |
7098 | static void test_is_sorted(unsigned long *start, unsigned long count) | |
7099 | { | |
7100 | } | |
7101 | #endif | |
7102 | ||
5cb084bb | 7103 | static int ftrace_process_locs(struct module *mod, |
31e88909 | 7104 | unsigned long *start, |
68bf21aa SR |
7105 | unsigned long *end) |
7106 | { | |
26efd79c | 7107 | struct ftrace_page *pg_unuse = NULL; |
706c81f8 | 7108 | struct ftrace_page *start_pg; |
a7900875 | 7109 | struct ftrace_page *pg; |
706c81f8 | 7110 | struct dyn_ftrace *rec; |
26efd79c | 7111 | unsigned long skipped = 0; |
a7900875 | 7112 | unsigned long count; |
68bf21aa SR |
7113 | unsigned long *p; |
7114 | unsigned long addr; | |
4376cac6 | 7115 | unsigned long flags = 0; /* Shut up gcc */ |
264143c4 | 7116 | unsigned long pages; |
a7900875 SR |
7117 | int ret = -ENOMEM; |
7118 | ||
7119 | count = end - start; | |
7120 | ||
7121 | if (!count) | |
7122 | return 0; | |
7123 | ||
264143c4 SR |
7124 | pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE); |
7125 | ||
72b3942a YL |
7126 | /* |
7127 | * Sorting mcount in vmlinux at build time depend on | |
6b9b6413 | 7128 | * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in |
72b3942a YL |
7129 | * modules can not be sorted at build time. |
7130 | */ | |
6b9b6413 | 7131 | if (!IS_ENABLED(CONFIG_BUILDTIME_MCOUNT_SORT) || mod) { |
72b3942a YL |
7132 | sort(start, count, sizeof(*start), |
7133 | ftrace_cmp_ips, NULL); | |
8147dc78 SRV |
7134 | } else { |
7135 | test_is_sorted(start, count); | |
72b3942a | 7136 | } |
9fd49328 | 7137 | |
706c81f8 SR |
7138 | start_pg = ftrace_allocate_pages(count); |
7139 | if (!start_pg) | |
a7900875 | 7140 | return -ENOMEM; |
68bf21aa | 7141 | |
e6ea44e9 | 7142 | mutex_lock(&ftrace_lock); |
a7900875 | 7143 | |
32082309 SR |
7144 | /* |
7145 | * Core and each module needs their own pages, as | |
7146 | * modules will free them when they are removed. | |
7147 | * Force a new page to be allocated for modules. | |
7148 | */ | |
a7900875 SR |
7149 | if (!mod) { |
7150 | WARN_ON(ftrace_pages || ftrace_pages_start); | |
7151 | /* First initialization */ | |
706c81f8 | 7152 | ftrace_pages = ftrace_pages_start = start_pg; |
a7900875 | 7153 | } else { |
32082309 | 7154 | if (!ftrace_pages) |
a7900875 | 7155 | goto out; |
32082309 | 7156 | |
a7900875 SR |
7157 | if (WARN_ON(ftrace_pages->next)) { |
7158 | /* Hmm, we have free pages? */ | |
7159 | while (ftrace_pages->next) | |
7160 | ftrace_pages = ftrace_pages->next; | |
32082309 | 7161 | } |
a7900875 | 7162 | |
706c81f8 | 7163 | ftrace_pages->next = start_pg; |
32082309 SR |
7164 | } |
7165 | ||
68bf21aa | 7166 | p = start; |
706c81f8 | 7167 | pg = start_pg; |
68bf21aa | 7168 | while (p < end) { |
db42523b | 7169 | unsigned long end_offset; |
6eeca746 SR |
7170 | |
7171 | addr = *p++; | |
7172 | ||
20e5227e SR |
7173 | /* |
7174 | * Some architecture linkers will pad between | |
7175 | * the different mcount_loc sections of different | |
7176 | * object files to satisfy alignments. | |
7177 | * Skip any NULL pointers. | |
7178 | */ | |
26efd79c ZY |
7179 | if (!addr) { |
7180 | skipped++; | |
20e5227e | 7181 | continue; |
26efd79c | 7182 | } |
706c81f8 | 7183 | |
da0f622b SR |
7184 | /* |
7185 | * If this is core kernel, make sure the address is in core | |
7186 | * or inittext, as weak functions get zeroed and KASLR can | |
7187 | * move them to something other than zero. It just will not | |
7188 | * move it to an area where kernel text is. | |
7189 | */ | |
7190 | if (!mod && !(is_kernel_text(addr) || is_kernel_inittext(addr))) { | |
26efd79c | 7191 | skipped++; |
20e5227e | 7192 | continue; |
26efd79c | 7193 | } |
706c81f8 | 7194 | |
6eeca746 SR |
7195 | addr = ftrace_call_adjust(addr); |
7196 | ||
db42523b LT |
7197 | end_offset = (pg->index+1) * sizeof(pg->records[0]); |
7198 | if (end_offset > PAGE_SIZE << pg->order) { | |
706c81f8 SR |
7199 | /* We should have allocated enough */ |
7200 | if (WARN_ON(!pg->next)) | |
7201 | break; | |
7202 | pg = pg->next; | |
7203 | } | |
7204 | ||
7205 | rec = &pg->records[pg->index++]; | |
7206 | rec->ip = addr; | |
68bf21aa SR |
7207 | } |
7208 | ||
26efd79c ZY |
7209 | if (pg->next) { |
7210 | pg_unuse = pg->next; | |
7211 | pg->next = NULL; | |
7212 | } | |
706c81f8 SR |
7213 | |
7214 | /* Assign the last page to ftrace_pages */ | |
7215 | ftrace_pages = pg; | |
7216 | ||
a4f18ed1 | 7217 | /* |
4376cac6 SR |
7218 | * We only need to disable interrupts on start up |
7219 | * because we are modifying code that an interrupt | |
7220 | * may execute, and the modification is not atomic. | |
7221 | * But for modules, nothing runs the code we modify | |
7222 | * until we are finished with it, and there's no | |
7223 | * reason to cause large interrupt latencies while we do it. | |
a4f18ed1 | 7224 | */ |
4376cac6 SR |
7225 | if (!mod) |
7226 | local_irq_save(flags); | |
1dc43cf0 | 7227 | ftrace_update_code(mod, start_pg); |
4376cac6 SR |
7228 | if (!mod) |
7229 | local_irq_restore(flags); | |
a7900875 SR |
7230 | ret = 0; |
7231 | out: | |
e6ea44e9 | 7232 | mutex_unlock(&ftrace_lock); |
68bf21aa | 7233 | |
26efd79c ZY |
7234 | /* We should have used all pages unless we skipped some */ |
7235 | if (pg_unuse) { | |
4a3efc6b SR |
7236 | unsigned long pg_remaining, remaining = 0; |
7237 | unsigned long skip; | |
7238 | ||
7239 | /* Count the number of entries unused and compare it to skipped. */ | |
7240 | pg_remaining = (ENTRIES_PER_PAGE << pg->order) - pg->index; | |
7241 | ||
7242 | if (!WARN(skipped < pg_remaining, "Extra allocated pages for ftrace")) { | |
7243 | ||
7244 | skip = skipped - pg_remaining; | |
7245 | ||
7246 | for (pg = pg_unuse; pg; pg = pg->next) | |
7247 | remaining += 1 << pg->order; | |
7248 | ||
264143c4 SR |
7249 | pages -= remaining; |
7250 | ||
4a3efc6b SR |
7251 | skip = DIV_ROUND_UP(skip, ENTRIES_PER_PAGE); |
7252 | ||
7253 | /* | |
7254 | * Check to see if the number of pages remaining would | |
7255 | * just fit the number of entries skipped. | |
7256 | */ | |
7257 | WARN(skip != remaining, "Extra allocated pages for ftrace: %lu with %lu skipped", | |
7258 | remaining, skipped); | |
7259 | } | |
e60b613d ZY |
7260 | /* Need to synchronize with ftrace_location_range() */ |
7261 | synchronize_rcu(); | |
26efd79c ZY |
7262 | ftrace_free_pages(pg_unuse); |
7263 | } | |
264143c4 SR |
7264 | |
7265 | if (!mod) { | |
7266 | count -= skipped; | |
7267 | pr_info("ftrace: allocating %ld entries in %ld pages\n", | |
7268 | count, pages); | |
7269 | } | |
7270 | ||
a7900875 | 7271 | return ret; |
68bf21aa SR |
7272 | } |
7273 | ||
aba4b5c2 SRV |
7274 | struct ftrace_mod_func { |
7275 | struct list_head list; | |
7276 | char *name; | |
7277 | unsigned long ip; | |
7278 | unsigned int size; | |
7279 | }; | |
7280 | ||
7281 | struct ftrace_mod_map { | |
6aa69784 | 7282 | struct rcu_head rcu; |
aba4b5c2 SRV |
7283 | struct list_head list; |
7284 | struct module *mod; | |
7285 | unsigned long start_addr; | |
7286 | unsigned long end_addr; | |
7287 | struct list_head funcs; | |
6171a031 | 7288 | unsigned int num_funcs; |
aba4b5c2 SRV |
7289 | }; |
7290 | ||
fc0ea795 AH |
7291 | static int ftrace_get_trampoline_kallsym(unsigned int symnum, |
7292 | unsigned long *value, char *type, | |
7293 | char *name, char *module_name, | |
7294 | int *exported) | |
7295 | { | |
7296 | struct ftrace_ops *op; | |
7297 | ||
7298 | list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) { | |
7299 | if (!op->trampoline || symnum--) | |
7300 | continue; | |
7301 | *value = op->trampoline; | |
7302 | *type = 't'; | |
d0c2d66f AS |
7303 | strscpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN); |
7304 | strscpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN); | |
fc0ea795 AH |
7305 | *exported = 0; |
7306 | return 0; | |
7307 | } | |
7308 | ||
7309 | return -ERANGE; | |
7310 | } | |
7311 | ||
123d6455 WJ |
7312 | #if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) || defined(CONFIG_MODULES) |
7313 | /* | |
7314 | * Check if the current ops references the given ip. | |
7315 | * | |
7316 | * If the ops traces all functions, then it was already accounted for. | |
7317 | * If the ops does not trace the current record function, skip it. | |
7318 | * If the ops ignores the function via notrace filter, skip it. | |
7319 | */ | |
7320 | static bool | |
7321 | ops_references_ip(struct ftrace_ops *ops, unsigned long ip) | |
7322 | { | |
7323 | /* If ops isn't enabled, ignore it */ | |
7324 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | |
7325 | return false; | |
7326 | ||
7327 | /* If ops traces all then it includes this function */ | |
7328 | if (ops_traces_mod(ops)) | |
7329 | return true; | |
7330 | ||
7331 | /* The function must be in the filter */ | |
7332 | if (!ftrace_hash_empty(ops->func_hash->filter_hash) && | |
7333 | !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip)) | |
7334 | return false; | |
7335 | ||
7336 | /* If in notrace hash, we ignore it too */ | |
7337 | if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip)) | |
7338 | return false; | |
7339 | ||
7340 | return true; | |
7341 | } | |
7342 | #endif | |
7343 | ||
93eb677d | 7344 | #ifdef CONFIG_MODULES |
32082309 SR |
7345 | |
7346 | #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) | |
7347 | ||
6aa69784 SRV |
7348 | static LIST_HEAD(ftrace_mod_maps); |
7349 | ||
b7ffffbb SRRH |
7350 | static int referenced_filters(struct dyn_ftrace *rec) |
7351 | { | |
7352 | struct ftrace_ops *ops; | |
7353 | int cnt = 0; | |
7354 | ||
7355 | for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { | |
123d6455 | 7356 | if (ops_references_ip(ops, rec->ip)) { |
c5f51572 CZ |
7357 | if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT)) |
7358 | continue; | |
7359 | if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY)) | |
7360 | continue; | |
8a224ffb CZ |
7361 | cnt++; |
7362 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) | |
7363 | rec->flags |= FTRACE_FL_REGS; | |
c5f51572 CZ |
7364 | if (cnt == 1 && ops->trampoline) |
7365 | rec->flags |= FTRACE_FL_TRAMP; | |
7366 | else | |
7367 | rec->flags &= ~FTRACE_FL_TRAMP; | |
8a224ffb | 7368 | } |
b7ffffbb SRRH |
7369 | } |
7370 | ||
7371 | return cnt; | |
7372 | } | |
7373 | ||
2a5bfe47 SRV |
7374 | static void |
7375 | clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash) | |
7376 | { | |
7377 | struct ftrace_func_entry *entry; | |
7378 | struct dyn_ftrace *rec; | |
7379 | int i; | |
7380 | ||
7381 | if (ftrace_hash_empty(hash)) | |
7382 | return; | |
7383 | ||
7384 | for (i = 0; i < pg->index; i++) { | |
7385 | rec = &pg->records[i]; | |
7386 | entry = __ftrace_lookup_ip(hash, rec->ip); | |
7387 | /* | |
7388 | * Do not allow this rec to match again. | |
7389 | * Yeah, it may waste some memory, but will be removed | |
7390 | * if/when the hash is modified again. | |
7391 | */ | |
7392 | if (entry) | |
7393 | entry->ip = 0; | |
7394 | } | |
7395 | } | |
7396 | ||
f2cc020d | 7397 | /* Clear any records from hashes */ |
2a5bfe47 SRV |
7398 | static void clear_mod_from_hashes(struct ftrace_page *pg) |
7399 | { | |
7400 | struct trace_array *tr; | |
7401 | ||
7402 | mutex_lock(&trace_types_lock); | |
7403 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { | |
7404 | if (!tr->ops || !tr->ops->func_hash) | |
7405 | continue; | |
7406 | mutex_lock(&tr->ops->func_hash->regex_lock); | |
7407 | clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash); | |
7408 | clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash); | |
7409 | mutex_unlock(&tr->ops->func_hash->regex_lock); | |
7410 | } | |
7411 | mutex_unlock(&trace_types_lock); | |
7412 | } | |
7413 | ||
6aa69784 SRV |
7414 | static void ftrace_free_mod_map(struct rcu_head *rcu) |
7415 | { | |
7416 | struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu); | |
7417 | struct ftrace_mod_func *mod_func; | |
7418 | struct ftrace_mod_func *n; | |
7419 | ||
7420 | /* All the contents of mod_map are now not visible to readers */ | |
7421 | list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) { | |
7422 | kfree(mod_func->name); | |
7423 | list_del(&mod_func->list); | |
7424 | kfree(mod_func); | |
7425 | } | |
7426 | ||
7427 | kfree(mod_map); | |
7428 | } | |
7429 | ||
e7247a15 | 7430 | void ftrace_release_mod(struct module *mod) |
93eb677d | 7431 | { |
6aa69784 SRV |
7432 | struct ftrace_mod_map *mod_map; |
7433 | struct ftrace_mod_map *n; | |
93eb677d | 7434 | struct dyn_ftrace *rec; |
32082309 | 7435 | struct ftrace_page **last_pg; |
2a5bfe47 | 7436 | struct ftrace_page *tmp_page = NULL; |
93eb677d | 7437 | struct ftrace_page *pg; |
93eb677d | 7438 | |
45a4a237 SR |
7439 | mutex_lock(&ftrace_lock); |
7440 | ||
f914b52c YB |
7441 | /* |
7442 | * To avoid the UAF problem after the module is unloaded, the | |
7443 | * 'mod_map' resource needs to be released unconditionally. | |
7444 | */ | |
6aa69784 SRV |
7445 | list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { |
7446 | if (mod_map->mod == mod) { | |
7447 | list_del_rcu(&mod_map->list); | |
74401729 | 7448 | call_rcu(&mod_map->rcu, ftrace_free_mod_map); |
6aa69784 SRV |
7449 | break; |
7450 | } | |
7451 | } | |
7452 | ||
f914b52c YB |
7453 | if (ftrace_disabled) |
7454 | goto out_unlock; | |
7455 | ||
32082309 SR |
7456 | /* |
7457 | * Each module has its own ftrace_pages, remove | |
7458 | * them from the list. | |
7459 | */ | |
7460 | last_pg = &ftrace_pages_start; | |
7461 | for (pg = ftrace_pages_start; pg; pg = *last_pg) { | |
7462 | rec = &pg->records[0]; | |
13511489 | 7463 | if (within_module(rec->ip, mod)) { |
93eb677d | 7464 | /* |
32082309 SR |
7465 | * As core pages are first, the first |
7466 | * page should never be a module page. | |
93eb677d | 7467 | */ |
32082309 SR |
7468 | if (WARN_ON(pg == ftrace_pages_start)) |
7469 | goto out_unlock; | |
7470 | ||
7471 | /* Check if we are deleting the last page */ | |
7472 | if (pg == ftrace_pages) | |
7473 | ftrace_pages = next_to_ftrace_page(last_pg); | |
7474 | ||
83dd1493 | 7475 | ftrace_update_tot_cnt -= pg->index; |
32082309 | 7476 | *last_pg = pg->next; |
2a5bfe47 SRV |
7477 | |
7478 | pg->next = tmp_page; | |
7479 | tmp_page = pg; | |
32082309 SR |
7480 | } else |
7481 | last_pg = &pg->next; | |
7482 | } | |
45a4a237 | 7483 | out_unlock: |
93eb677d | 7484 | mutex_unlock(&ftrace_lock); |
2a5bfe47 | 7485 | |
e60b613d ZY |
7486 | /* Need to synchronize with ftrace_location_range() */ |
7487 | if (tmp_page) | |
7488 | synchronize_rcu(); | |
2a5bfe47 SRV |
7489 | for (pg = tmp_page; pg; pg = tmp_page) { |
7490 | ||
7491 | /* Needs to be called outside of ftrace_lock */ | |
7492 | clear_mod_from_hashes(pg); | |
7493 | ||
db42523b LT |
7494 | if (pg->records) { |
7495 | free_pages((unsigned long)pg->records, pg->order); | |
7496 | ftrace_number_of_pages -= 1 << pg->order; | |
7497 | } | |
2a5bfe47 SRV |
7498 | tmp_page = pg->next; |
7499 | kfree(pg); | |
da537f0a | 7500 | ftrace_number_of_groups--; |
2a5bfe47 | 7501 | } |
93eb677d SR |
7502 | } |
7503 | ||
7dcd182b | 7504 | void ftrace_module_enable(struct module *mod) |
b7ffffbb SRRH |
7505 | { |
7506 | struct dyn_ftrace *rec; | |
7507 | struct ftrace_page *pg; | |
7508 | ||
7509 | mutex_lock(&ftrace_lock); | |
7510 | ||
7511 | if (ftrace_disabled) | |
7512 | goto out_unlock; | |
7513 | ||
7514 | /* | |
7515 | * If the tracing is enabled, go ahead and enable the record. | |
7516 | * | |
9efb85c5 | 7517 | * The reason not to enable the record immediately is the |
b7ffffbb SRRH |
7518 | * inherent check of ftrace_make_nop/ftrace_make_call for |
7519 | * correct previous instructions. Making first the NOP | |
7520 | * conversion puts the module to the correct state, thus | |
7521 | * passing the ftrace_make_call check. | |
7522 | * | |
7523 | * We also delay this to after the module code already set the | |
7524 | * text to read-only, as we now need to set it back to read-write | |
7525 | * so that we can modify the text. | |
7526 | */ | |
7527 | if (ftrace_start_up) | |
7528 | ftrace_arch_code_modify_prepare(); | |
7529 | ||
7530 | do_for_each_ftrace_rec(pg, rec) { | |
7531 | int cnt; | |
7532 | /* | |
7533 | * do_for_each_ftrace_rec() is a double loop. | |
7534 | * module text shares the pg. If a record is | |
7535 | * not part of this module, then skip this pg, | |
7536 | * which the "break" will do. | |
7537 | */ | |
13511489 | 7538 | if (!within_module(rec->ip, mod)) |
b7ffffbb SRRH |
7539 | break; |
7540 | ||
b39181f7 SRG |
7541 | /* Weak functions should still be ignored */ |
7542 | if (!test_for_valid_rec(rec)) { | |
7543 | /* Clear all other flags. Should not be enabled anyway */ | |
7544 | rec->flags = FTRACE_FL_DISABLED; | |
7545 | continue; | |
7546 | } | |
7547 | ||
b7ffffbb SRRH |
7548 | cnt = 0; |
7549 | ||
7550 | /* | |
7551 | * When adding a module, we need to check if tracers are | |
7552 | * currently enabled and if they are, and can trace this record, | |
7553 | * we need to enable the module functions as well as update the | |
7554 | * reference counts for those function records. | |
7555 | */ | |
7556 | if (ftrace_start_up) | |
7557 | cnt += referenced_filters(rec); | |
7558 | ||
8a224ffb CZ |
7559 | rec->flags &= ~FTRACE_FL_DISABLED; |
7560 | rec->flags += cnt; | |
b7ffffbb SRRH |
7561 | |
7562 | if (ftrace_start_up && cnt) { | |
7563 | int failed = __ftrace_replace_code(rec, 1); | |
7564 | if (failed) { | |
7565 | ftrace_bug(failed, rec); | |
7566 | goto out_loop; | |
7567 | } | |
7568 | } | |
7569 | ||
7570 | } while_for_each_ftrace_rec(); | |
7571 | ||
7572 | out_loop: | |
7573 | if (ftrace_start_up) | |
7574 | ftrace_arch_code_modify_post_process(); | |
7575 | ||
7576 | out_unlock: | |
7577 | mutex_unlock(&ftrace_lock); | |
d7fbf8df SRV |
7578 | |
7579 | process_cached_mods(mod->name); | |
b7ffffbb SRRH |
7580 | } |
7581 | ||
b6b71f66 | 7582 | void ftrace_module_init(struct module *mod) |
90d595fe | 7583 | { |
2889c658 YW |
7584 | int ret; |
7585 | ||
97e9b4fc | 7586 | if (ftrace_disabled || !mod->num_ftrace_callsites) |
fed1939c | 7587 | return; |
90d595fe | 7588 | |
2889c658 YW |
7589 | ret = ftrace_process_locs(mod, mod->ftrace_callsites, |
7590 | mod->ftrace_callsites + mod->num_ftrace_callsites); | |
7591 | if (ret) | |
7592 | pr_warn("ftrace: failed to allocate entries for module '%s' functions\n", | |
7593 | mod->name); | |
8c189ea6 | 7594 | } |
aba4b5c2 SRV |
7595 | |
7596 | static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, | |
7597 | struct dyn_ftrace *rec) | |
7598 | { | |
7599 | struct ftrace_mod_func *mod_func; | |
7600 | unsigned long symsize; | |
7601 | unsigned long offset; | |
7602 | char str[KSYM_SYMBOL_LEN]; | |
7603 | char *modname; | |
7604 | const char *ret; | |
7605 | ||
7606 | ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str); | |
7607 | if (!ret) | |
7608 | return; | |
7609 | ||
7610 | mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL); | |
7611 | if (!mod_func) | |
7612 | return; | |
7613 | ||
7614 | mod_func->name = kstrdup(str, GFP_KERNEL); | |
7615 | if (!mod_func->name) { | |
7616 | kfree(mod_func); | |
7617 | return; | |
7618 | } | |
7619 | ||
7620 | mod_func->ip = rec->ip - offset; | |
7621 | mod_func->size = symsize; | |
7622 | ||
6171a031 SRV |
7623 | mod_map->num_funcs++; |
7624 | ||
aba4b5c2 SRV |
7625 | list_add_rcu(&mod_func->list, &mod_map->funcs); |
7626 | } | |
7627 | ||
aba4b5c2 SRV |
7628 | static struct ftrace_mod_map * |
7629 | allocate_ftrace_mod_map(struct module *mod, | |
7630 | unsigned long start, unsigned long end) | |
7631 | { | |
7632 | struct ftrace_mod_map *mod_map; | |
7633 | ||
5834a597 YB |
7634 | if (ftrace_disabled) |
7635 | return NULL; | |
7636 | ||
aba4b5c2 SRV |
7637 | mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL); |
7638 | if (!mod_map) | |
7639 | return NULL; | |
7640 | ||
7641 | mod_map->mod = mod; | |
7642 | mod_map->start_addr = start; | |
7643 | mod_map->end_addr = end; | |
6171a031 | 7644 | mod_map->num_funcs = 0; |
aba4b5c2 SRV |
7645 | |
7646 | INIT_LIST_HEAD_RCU(&mod_map->funcs); | |
7647 | ||
7648 | list_add_rcu(&mod_map->list, &ftrace_mod_maps); | |
7649 | ||
7650 | return mod_map; | |
7651 | } | |
7652 | ||
7e1f4eb9 | 7653 | static int |
aba4b5c2 SRV |
7654 | ftrace_func_address_lookup(struct ftrace_mod_map *mod_map, |
7655 | unsigned long addr, unsigned long *size, | |
7656 | unsigned long *off, char *sym) | |
7657 | { | |
7658 | struct ftrace_mod_func *found_func = NULL; | |
7659 | struct ftrace_mod_func *mod_func; | |
7660 | ||
7661 | list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { | |
7662 | if (addr >= mod_func->ip && | |
7663 | addr < mod_func->ip + mod_func->size) { | |
7664 | found_func = mod_func; | |
7665 | break; | |
7666 | } | |
7667 | } | |
7668 | ||
7669 | if (found_func) { | |
7670 | if (size) | |
7671 | *size = found_func->size; | |
7672 | if (off) | |
7673 | *off = addr - found_func->ip; | |
7e1f4eb9 | 7674 | return strscpy(sym, found_func->name, KSYM_NAME_LEN); |
aba4b5c2 SRV |
7675 | } |
7676 | ||
7e1f4eb9 | 7677 | return 0; |
aba4b5c2 SRV |
7678 | } |
7679 | ||
7e1f4eb9 | 7680 | int |
aba4b5c2 SRV |
7681 | ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, |
7682 | unsigned long *off, char **modname, char *sym) | |
7683 | { | |
7684 | struct ftrace_mod_map *mod_map; | |
7e1f4eb9 | 7685 | int ret = 0; |
aba4b5c2 | 7686 | |
74401729 | 7687 | /* mod_map is freed via call_rcu() */ |
aba4b5c2 SRV |
7688 | preempt_disable(); |
7689 | list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { | |
7690 | ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); | |
7691 | if (ret) { | |
7692 | if (modname) | |
7693 | *modname = mod_map->mod->name; | |
7694 | break; | |
7695 | } | |
7696 | } | |
7697 | preempt_enable(); | |
7698 | ||
7699 | return ret; | |
7700 | } | |
7701 | ||
6171a031 SRV |
7702 | int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, |
7703 | char *type, char *name, | |
7704 | char *module_name, int *exported) | |
7705 | { | |
7706 | struct ftrace_mod_map *mod_map; | |
7707 | struct ftrace_mod_func *mod_func; | |
fc0ea795 | 7708 | int ret; |
6171a031 SRV |
7709 | |
7710 | preempt_disable(); | |
7711 | list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { | |
7712 | ||
7713 | if (symnum >= mod_map->num_funcs) { | |
7714 | symnum -= mod_map->num_funcs; | |
7715 | continue; | |
7716 | } | |
7717 | ||
7718 | list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { | |
7719 | if (symnum > 1) { | |
7720 | symnum--; | |
7721 | continue; | |
7722 | } | |
7723 | ||
7724 | *value = mod_func->ip; | |
7725 | *type = 'T'; | |
d0c2d66f AS |
7726 | strscpy(name, mod_func->name, KSYM_NAME_LEN); |
7727 | strscpy(module_name, mod_map->mod->name, MODULE_NAME_LEN); | |
6171a031 SRV |
7728 | *exported = 1; |
7729 | preempt_enable(); | |
7730 | return 0; | |
7731 | } | |
7732 | WARN_ON(1); | |
7733 | break; | |
7734 | } | |
fc0ea795 AH |
7735 | ret = ftrace_get_trampoline_kallsym(symnum, value, type, name, |
7736 | module_name, exported); | |
6171a031 | 7737 | preempt_enable(); |
fc0ea795 | 7738 | return ret; |
6171a031 SRV |
7739 | } |
7740 | ||
aba4b5c2 SRV |
7741 | #else |
7742 | static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, | |
7743 | struct dyn_ftrace *rec) { } | |
7744 | static inline struct ftrace_mod_map * | |
7745 | allocate_ftrace_mod_map(struct module *mod, | |
7746 | unsigned long start, unsigned long end) | |
7747 | { | |
7748 | return NULL; | |
7749 | } | |
fc0ea795 AH |
7750 | int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, |
7751 | char *type, char *name, char *module_name, | |
7752 | int *exported) | |
7753 | { | |
7754 | int ret; | |
7755 | ||
7756 | preempt_disable(); | |
7757 | ret = ftrace_get_trampoline_kallsym(symnum, value, type, name, | |
7758 | module_name, exported); | |
7759 | preempt_enable(); | |
7760 | return ret; | |
7761 | } | |
93eb677d SR |
7762 | #endif /* CONFIG_MODULES */ |
7763 | ||
8715b108 JF |
7764 | struct ftrace_init_func { |
7765 | struct list_head list; | |
7766 | unsigned long ip; | |
7767 | }; | |
7768 | ||
7769 | /* Clear any init ips from hashes */ | |
7770 | static void | |
7771 | clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash) | |
42c269c8 | 7772 | { |
8715b108 JF |
7773 | struct ftrace_func_entry *entry; |
7774 | ||
08468754 | 7775 | entry = ftrace_lookup_ip(hash, func->ip); |
8715b108 JF |
7776 | /* |
7777 | * Do not allow this rec to match again. | |
7778 | * Yeah, it may waste some memory, but will be removed | |
7779 | * if/when the hash is modified again. | |
7780 | */ | |
7781 | if (entry) | |
7782 | entry->ip = 0; | |
7783 | } | |
7784 | ||
7785 | static void | |
7786 | clear_func_from_hashes(struct ftrace_init_func *func) | |
7787 | { | |
7788 | struct trace_array *tr; | |
7789 | ||
7790 | mutex_lock(&trace_types_lock); | |
7791 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { | |
7792 | if (!tr->ops || !tr->ops->func_hash) | |
7793 | continue; | |
7794 | mutex_lock(&tr->ops->func_hash->regex_lock); | |
7795 | clear_func_from_hash(func, tr->ops->func_hash->filter_hash); | |
7796 | clear_func_from_hash(func, tr->ops->func_hash->notrace_hash); | |
7797 | mutex_unlock(&tr->ops->func_hash->regex_lock); | |
7798 | } | |
7799 | mutex_unlock(&trace_types_lock); | |
7800 | } | |
7801 | ||
7802 | static void add_to_clear_hash_list(struct list_head *clear_list, | |
7803 | struct dyn_ftrace *rec) | |
7804 | { | |
7805 | struct ftrace_init_func *func; | |
7806 | ||
7807 | func = kmalloc(sizeof(*func), GFP_KERNEL); | |
7808 | if (!func) { | |
24589e3a | 7809 | MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n"); |
8715b108 JF |
7810 | return; |
7811 | } | |
7812 | ||
7813 | func->ip = rec->ip; | |
7814 | list_add(&func->list, clear_list); | |
7815 | } | |
7816 | ||
aba4b5c2 | 7817 | void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) |
42c269c8 | 7818 | { |
6cafbe15 SRV |
7819 | unsigned long start = (unsigned long)(start_ptr); |
7820 | unsigned long end = (unsigned long)(end_ptr); | |
42c269c8 | 7821 | struct ftrace_page **last_pg = &ftrace_pages_start; |
e60b613d | 7822 | struct ftrace_page *tmp_page = NULL; |
42c269c8 SRV |
7823 | struct ftrace_page *pg; |
7824 | struct dyn_ftrace *rec; | |
7825 | struct dyn_ftrace key; | |
aba4b5c2 | 7826 | struct ftrace_mod_map *mod_map = NULL; |
8715b108 | 7827 | struct ftrace_init_func *func, *func_next; |
2a30dbcb | 7828 | LIST_HEAD(clear_hash); |
8715b108 | 7829 | |
42c269c8 SRV |
7830 | key.ip = start; |
7831 | key.flags = end; /* overload flags, as it is unsigned long */ | |
7832 | ||
7833 | mutex_lock(&ftrace_lock); | |
7834 | ||
aba4b5c2 SRV |
7835 | /* |
7836 | * If we are freeing module init memory, then check if | |
7837 | * any tracer is active. If so, we need to save a mapping of | |
7838 | * the module functions being freed with the address. | |
7839 | */ | |
7840 | if (mod && ftrace_ops_list != &ftrace_list_end) | |
7841 | mod_map = allocate_ftrace_mod_map(mod, start, end); | |
7842 | ||
42c269c8 SRV |
7843 | for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) { |
7844 | if (end < pg->records[0].ip || | |
7845 | start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) | |
7846 | continue; | |
7847 | again: | |
7848 | rec = bsearch(&key, pg->records, pg->index, | |
7849 | sizeof(struct dyn_ftrace), | |
7850 | ftrace_cmp_recs); | |
7851 | if (!rec) | |
7852 | continue; | |
aba4b5c2 | 7853 | |
8715b108 JF |
7854 | /* rec will be cleared from hashes after ftrace_lock unlock */ |
7855 | add_to_clear_hash_list(&clear_hash, rec); | |
7856 | ||
aba4b5c2 SRV |
7857 | if (mod_map) |
7858 | save_ftrace_mod_rec(mod_map, rec); | |
7859 | ||
42c269c8 | 7860 | pg->index--; |
4ec78467 | 7861 | ftrace_update_tot_cnt--; |
42c269c8 SRV |
7862 | if (!pg->index) { |
7863 | *last_pg = pg->next; | |
e60b613d ZY |
7864 | pg->next = tmp_page; |
7865 | tmp_page = pg; | |
42c269c8 SRV |
7866 | pg = container_of(last_pg, struct ftrace_page, next); |
7867 | if (!(*last_pg)) | |
7868 | ftrace_pages = pg; | |
7869 | continue; | |
7870 | } | |
7871 | memmove(rec, rec + 1, | |
7872 | (pg->index - (rec - pg->records)) * sizeof(*rec)); | |
7873 | /* More than one function may be in this block */ | |
7874 | goto again; | |
7875 | } | |
7876 | mutex_unlock(&ftrace_lock); | |
8715b108 JF |
7877 | |
7878 | list_for_each_entry_safe(func, func_next, &clear_hash, list) { | |
7879 | clear_func_from_hashes(func); | |
7880 | kfree(func); | |
7881 | } | |
e60b613d ZY |
7882 | /* Need to synchronize with ftrace_location_range() */ |
7883 | if (tmp_page) { | |
7884 | synchronize_rcu(); | |
7885 | ftrace_free_pages(tmp_page); | |
7886 | } | |
42c269c8 SRV |
7887 | } |
7888 | ||
6cafbe15 SRV |
7889 | void __init ftrace_free_init_mem(void) |
7890 | { | |
7891 | void *start = (void *)(&__init_begin); | |
7892 | void *end = (void *)(&__init_end); | |
7893 | ||
380af29b SRG |
7894 | ftrace_boot_snapshot(); |
7895 | ||
aba4b5c2 | 7896 | ftrace_free_mem(NULL, start, end); |
42c269c8 SRV |
7897 | } |
7898 | ||
6644c654 WO |
7899 | int __init __weak ftrace_dyn_arch_init(void) |
7900 | { | |
7901 | return 0; | |
7902 | } | |
7903 | ||
68bf21aa SR |
7904 | void __init ftrace_init(void) |
7905 | { | |
1dc43cf0 JS |
7906 | extern unsigned long __start_mcount_loc[]; |
7907 | extern unsigned long __stop_mcount_loc[]; | |
3a36cb11 | 7908 | unsigned long count, flags; |
68bf21aa SR |
7909 | int ret; |
7910 | ||
68bf21aa | 7911 | local_irq_save(flags); |
3a36cb11 | 7912 | ret = ftrace_dyn_arch_init(); |
68bf21aa | 7913 | local_irq_restore(flags); |
af64a7cb | 7914 | if (ret) |
68bf21aa SR |
7915 | goto failed; |
7916 | ||
7917 | count = __stop_mcount_loc - __start_mcount_loc; | |
c867ccd8 JS |
7918 | if (!count) { |
7919 | pr_info("ftrace: No functions to be traced?\n"); | |
68bf21aa | 7920 | goto failed; |
c867ccd8 JS |
7921 | } |
7922 | ||
5cb084bb | 7923 | ret = ftrace_process_locs(NULL, |
31e88909 | 7924 | __start_mcount_loc, |
68bf21aa | 7925 | __stop_mcount_loc); |
2889c658 YW |
7926 | if (ret) { |
7927 | pr_warn("ftrace: failed to allocate entries for functions\n"); | |
7928 | goto failed; | |
7929 | } | |
68bf21aa | 7930 | |
da537f0a SRV |
7931 | pr_info("ftrace: allocated %ld pages with %ld groups\n", |
7932 | ftrace_number_of_pages, ftrace_number_of_groups); | |
7933 | ||
2889c658 YW |
7934 | last_ftrace_enabled = ftrace_enabled = 1; |
7935 | ||
2af15d6a SR |
7936 | set_ftrace_early_filters(); |
7937 | ||
68bf21aa SR |
7938 | return; |
7939 | failed: | |
7940 | ftrace_disabled = 1; | |
7941 | } | |
68bf21aa | 7942 | |
f3bea491 SRRH |
7943 | /* Do nothing if arch does not support this */ |
7944 | void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops) | |
7945 | { | |
7946 | } | |
7947 | ||
7948 | static void ftrace_update_trampoline(struct ftrace_ops *ops) | |
7949 | { | |
fc0ea795 AH |
7950 | unsigned long trampoline = ops->trampoline; |
7951 | ||
f3bea491 | 7952 | arch_ftrace_update_trampoline(ops); |
fc0ea795 | 7953 | if (ops->trampoline && ops->trampoline != trampoline && |
dd9ddf46 AH |
7954 | (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) { |
7955 | /* Add to kallsyms before the perf events */ | |
fc0ea795 | 7956 | ftrace_add_trampoline_to_kallsyms(ops); |
dd9ddf46 AH |
7957 | perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, |
7958 | ops->trampoline, ops->trampoline_size, false, | |
7959 | FTRACE_TRAMPOLINE_SYM); | |
548e1f6c AH |
7960 | /* |
7961 | * Record the perf text poke event after the ksymbol register | |
7962 | * event. | |
7963 | */ | |
7964 | perf_event_text_poke((void *)ops->trampoline, NULL, 0, | |
7965 | (void *)ops->trampoline, | |
7966 | ops->trampoline_size); | |
dd9ddf46 | 7967 | } |
f3bea491 SRRH |
7968 | } |
7969 | ||
04ec7bb6 SRV |
7970 | void ftrace_init_trace_array(struct trace_array *tr) |
7971 | { | |
31f505dc SR |
7972 | if (tr->flags & TRACE_ARRAY_FL_MOD_INIT) |
7973 | return; | |
7974 | ||
04ec7bb6 | 7975 | INIT_LIST_HEAD(&tr->func_probes); |
673feb9d SRV |
7976 | INIT_LIST_HEAD(&tr->mod_trace); |
7977 | INIT_LIST_HEAD(&tr->mod_notrace); | |
31f505dc SR |
7978 | |
7979 | tr->flags |= TRACE_ARRAY_FL_MOD_INIT; | |
04ec7bb6 | 7980 | } |
3d083395 | 7981 | #else |
0b6e4d56 | 7982 | |
3306fc4a | 7983 | struct ftrace_ops global_ops = { |
bd69c30b | 7984 | .func = ftrace_stub, |
a25d036d | 7985 | .flags = FTRACE_OPS_FL_INITIALIZED | |
e3eea140 | 7986 | FTRACE_OPS_FL_PID, |
bd69c30b SR |
7987 | }; |
7988 | ||
0b6e4d56 FW |
7989 | static int __init ftrace_nodyn_init(void) |
7990 | { | |
7991 | ftrace_enabled = 1; | |
7992 | return 0; | |
7993 | } | |
6f415672 | 7994 | core_initcall(ftrace_nodyn_init); |
0b6e4d56 | 7995 | |
8434dc93 | 7996 | static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; } |
e1effa01 | 7997 | static inline void ftrace_startup_all(int command) { } |
8a56d776 | 7998 | |
f3bea491 SRRH |
7999 | static void ftrace_update_trampoline(struct ftrace_ops *ops) |
8000 | { | |
8001 | } | |
8002 | ||
3d083395 SR |
8003 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
8004 | ||
4104d326 SRRH |
8005 | __init void ftrace_init_global_array_ops(struct trace_array *tr) |
8006 | { | |
8007 | tr->ops = &global_ops; | |
31f505dc SR |
8008 | if (!global_ops.private) |
8009 | global_ops.private = tr; | |
04ec7bb6 | 8010 | ftrace_init_trace_array(tr); |
c132be2c | 8011 | init_array_fgraph_ops(tr, tr->ops); |
4104d326 SRRH |
8012 | } |
8013 | ||
8014 | void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) | |
8015 | { | |
8016 | /* If we filter on pids, update to use the pid function */ | |
8017 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { | |
8018 | if (WARN_ON(tr->ops->func != ftrace_stub)) | |
8019 | printk("ftrace ops had %pS for function\n", | |
8020 | tr->ops->func); | |
4104d326 SRRH |
8021 | } |
8022 | tr->ops->func = func; | |
8023 | tr->ops->private = tr; | |
8024 | } | |
8025 | ||
8026 | void ftrace_reset_array_ops(struct trace_array *tr) | |
8027 | { | |
8028 | tr->ops->func = ftrace_stub; | |
8029 | } | |
8030 | ||
fabe38ab | 8031 | static nokprobe_inline void |
2f5f6ad9 | 8032 | __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
d19ad077 | 8033 | struct ftrace_ops *ignored, struct ftrace_regs *fregs) |
b848914c | 8034 | { |
d19ad077 | 8035 | struct pt_regs *regs = ftrace_get_regs(fregs); |
cdbe61bf | 8036 | struct ftrace_ops *op; |
edc15caf | 8037 | int bit; |
b848914c | 8038 | |
ce5e4803 | 8039 | /* |
8040 | * The ftrace_test_and_set_recursion() will disable preemption, | |
8041 | * which is required since some of the ops may be dynamically | |
8042 | * allocated, they must be freed after a synchronize_rcu(). | |
8043 | */ | |
ed65df63 | 8044 | bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START); |
edc15caf SR |
8045 | if (bit < 0) |
8046 | return; | |
b1cff0ad | 8047 | |
0a016409 | 8048 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
2fa717a0 SRV |
8049 | /* Stub functions don't need to be called nor tested */ |
8050 | if (op->flags & FTRACE_OPS_FL_STUB) | |
8051 | continue; | |
ba27f2bc SRRH |
8052 | /* |
8053 | * Check the following for each ops before calling their func: | |
8054 | * if RCU flag is set, then rcu_is_watching() must be true | |
ba27f2bc SRRH |
8055 | * Otherwise test if the ip matches the ops filter |
8056 | * | |
8057 | * If any of the above fails then the op->func() is not executed. | |
8058 | */ | |
8059 | if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && | |
ba27f2bc | 8060 | ftrace_ops_test(op, ip, regs)) { |
1d48d596 SRRH |
8061 | if (FTRACE_WARN_ON(!op->func)) { |
8062 | pr_warn("op=%p %pS\n", op, op); | |
4104d326 SRRH |
8063 | goto out; |
8064 | } | |
d19ad077 | 8065 | op->func(ip, parent_ip, op, fregs); |
4104d326 | 8066 | } |
0a016409 | 8067 | } while_for_each_ftrace_op(op); |
4104d326 | 8068 | out: |
edc15caf | 8069 | trace_clear_recursion(bit); |
b848914c SR |
8070 | } |
8071 | ||
2f5f6ad9 SR |
8072 | /* |
8073 | * Some archs only support passing ip and parent_ip. Even though | |
8074 | * the list function ignores the op parameter, we do not want any | |
8075 | * C side effects, where a function is called without the caller | |
8076 | * sending a third parameter. | |
a1e2e31d SR |
8077 | * Archs are to support both the regs and ftrace_ops at the same time. |
8078 | * If they support ftrace_ops, it is assumed they support regs. | |
8079 | * If call backs want to use regs, they must either check for regs | |
06aeaaea MH |
8080 | * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. |
8081 | * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. | |
a1e2e31d | 8082 | * An architecture can pass partial regs with ftrace_ops and still |
b8ec330a | 8083 | * set the ARCH_SUPPORTS_FTRACE_OPS. |
34cdd18b SRV |
8084 | * |
8085 | * In vmlinux.lds.h, ftrace_ops_list_func() is defined to be | |
8086 | * arch_ftrace_ops_list_func. | |
2f5f6ad9 SR |
8087 | */ |
8088 | #if ARCH_SUPPORTS_FTRACE_OPS | |
34cdd18b SRV |
8089 | void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
8090 | struct ftrace_ops *op, struct ftrace_regs *fregs) | |
2f5f6ad9 | 8091 | { |
7888af41 | 8092 | kmsan_unpoison_memory(fregs, ftrace_regs_size()); |
d19ad077 | 8093 | __ftrace_ops_list_func(ip, parent_ip, NULL, fregs); |
2f5f6ad9 SR |
8094 | } |
8095 | #else | |
34cdd18b | 8096 | void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) |
2f5f6ad9 | 8097 | { |
a1e2e31d | 8098 | __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); |
2f5f6ad9 SR |
8099 | } |
8100 | #endif | |
34cdd18b | 8101 | NOKPROBE_SYMBOL(arch_ftrace_ops_list_func); |
2f5f6ad9 | 8102 | |
f1ff6348 SRRH |
8103 | /* |
8104 | * If there's only one function registered but it does not support | |
78a01feb ZY |
8105 | * recursion, needs RCU protection, then this function will be called |
8106 | * by the mcount trampoline. | |
f1ff6348 | 8107 | */ |
c68c0fa2 | 8108 | static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, |
d19ad077 | 8109 | struct ftrace_ops *op, struct ftrace_regs *fregs) |
f1ff6348 SRRH |
8110 | { |
8111 | int bit; | |
8112 | ||
ed65df63 | 8113 | bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START); |
f1ff6348 SRRH |
8114 | if (bit < 0) |
8115 | return; | |
8116 | ||
b40341fa | 8117 | if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) |
d19ad077 | 8118 | op->func(ip, parent_ip, op, fregs); |
c68c0fa2 | 8119 | |
f1ff6348 SRRH |
8120 | trace_clear_recursion(bit); |
8121 | } | |
fabe38ab | 8122 | NOKPROBE_SYMBOL(ftrace_ops_assist_func); |
f1ff6348 | 8123 | |
87354059 SRRH |
8124 | /** |
8125 | * ftrace_ops_get_func - get the function a trampoline should call | |
8126 | * @ops: the ops to get the function for | |
8127 | * | |
8128 | * Normally the mcount trampoline will call the ops->func, but there | |
8129 | * are times that it should not. For example, if the ops does not | |
8130 | * have its own recursion protection, then it should call the | |
3a150df9 | 8131 | * ftrace_ops_assist_func() instead. |
87354059 | 8132 | * |
d1530413 | 8133 | * Returns: the function that the trampoline should call for @ops. |
87354059 SRRH |
8134 | */ |
8135 | ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) | |
8136 | { | |
87354059 | 8137 | /* |
a25d036d SRV |
8138 | * If the function does not handle recursion or needs to be RCU safe, |
8139 | * then we need to call the assist handler. | |
87354059 | 8140 | */ |
a25d036d SRV |
8141 | if (ops->flags & (FTRACE_OPS_FL_RECURSION | |
8142 | FTRACE_OPS_FL_RCU)) | |
c68c0fa2 | 8143 | return ftrace_ops_assist_func; |
87354059 SRRH |
8144 | |
8145 | return ops->func; | |
8146 | } | |
8147 | ||
345ddcc8 SRRH |
8148 | static void |
8149 | ftrace_filter_pid_sched_switch_probe(void *data, bool preempt, | |
fa2c3254 | 8150 | struct task_struct *prev, |
9c2136be DK |
8151 | struct task_struct *next, |
8152 | unsigned int prev_state) | |
978f3a45 | 8153 | { |
345ddcc8 SRRH |
8154 | struct trace_array *tr = data; |
8155 | struct trace_pid_list *pid_list; | |
b3b1e6ed | 8156 | struct trace_pid_list *no_pid_list; |
978f3a45 | 8157 | |
345ddcc8 | 8158 | pid_list = rcu_dereference_sched(tr->function_pids); |
b3b1e6ed | 8159 | no_pid_list = rcu_dereference_sched(tr->function_no_pids); |
e32d8956 | 8160 | |
b3b1e6ed | 8161 | if (trace_ignore_this_task(pid_list, no_pid_list, next)) |
717e3f5e SRV |
8162 | this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, |
8163 | FTRACE_PID_IGNORE); | |
8164 | else | |
8165 | this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, | |
8166 | next->pid); | |
978f3a45 SR |
8167 | } |
8168 | ||
1e10486f NK |
8169 | static void |
8170 | ftrace_pid_follow_sched_process_fork(void *data, | |
8171 | struct task_struct *self, | |
8172 | struct task_struct *task) | |
8173 | { | |
8174 | struct trace_pid_list *pid_list; | |
8175 | struct trace_array *tr = data; | |
8176 | ||
8177 | pid_list = rcu_dereference_sched(tr->function_pids); | |
8178 | trace_filter_add_remove_task(pid_list, self, task); | |
b3b1e6ed SRV |
8179 | |
8180 | pid_list = rcu_dereference_sched(tr->function_no_pids); | |
8181 | trace_filter_add_remove_task(pid_list, self, task); | |
1e10486f NK |
8182 | } |
8183 | ||
8184 | static void | |
8185 | ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task) | |
8186 | { | |
8187 | struct trace_pid_list *pid_list; | |
8188 | struct trace_array *tr = data; | |
8189 | ||
8190 | pid_list = rcu_dereference_sched(tr->function_pids); | |
8191 | trace_filter_add_remove_task(pid_list, NULL, task); | |
b3b1e6ed SRV |
8192 | |
8193 | pid_list = rcu_dereference_sched(tr->function_no_pids); | |
8194 | trace_filter_add_remove_task(pid_list, NULL, task); | |
1e10486f NK |
8195 | } |
8196 | ||
8197 | void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) | |
8198 | { | |
8199 | if (enable) { | |
8200 | register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, | |
8201 | tr); | |
afcab636 | 8202 | register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit, |
1e10486f NK |
8203 | tr); |
8204 | } else { | |
8205 | unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, | |
8206 | tr); | |
afcab636 | 8207 | unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit, |
1e10486f NK |
8208 | tr); |
8209 | } | |
8210 | } | |
8211 | ||
b3b1e6ed | 8212 | static void clear_ftrace_pids(struct trace_array *tr, int type) |
e32d8956 | 8213 | { |
345ddcc8 | 8214 | struct trace_pid_list *pid_list; |
b3b1e6ed | 8215 | struct trace_pid_list *no_pid_list; |
345ddcc8 | 8216 | int cpu; |
e32d8956 | 8217 | |
345ddcc8 SRRH |
8218 | pid_list = rcu_dereference_protected(tr->function_pids, |
8219 | lockdep_is_held(&ftrace_lock)); | |
b3b1e6ed SRV |
8220 | no_pid_list = rcu_dereference_protected(tr->function_no_pids, |
8221 | lockdep_is_held(&ftrace_lock)); | |
8222 | ||
8223 | /* Make sure there's something to do */ | |
27683626 | 8224 | if (!pid_type_enabled(type, pid_list, no_pid_list)) |
345ddcc8 | 8225 | return; |
229c4ef8 | 8226 | |
b3b1e6ed | 8227 | /* See if the pids still need to be checked after this */ |
27683626 | 8228 | if (!still_need_pid_events(type, pid_list, no_pid_list)) { |
b3b1e6ed SRV |
8229 | unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); |
8230 | for_each_possible_cpu(cpu) | |
8231 | per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE; | |
8232 | } | |
e32d8956 | 8233 | |
b3b1e6ed SRV |
8234 | if (type & TRACE_PIDS) |
8235 | rcu_assign_pointer(tr->function_pids, NULL); | |
978f3a45 | 8236 | |
b3b1e6ed SRV |
8237 | if (type & TRACE_NO_PIDS) |
8238 | rcu_assign_pointer(tr->function_no_pids, NULL); | |
978f3a45 | 8239 | |
345ddcc8 | 8240 | /* Wait till all users are no longer using pid filtering */ |
74401729 | 8241 | synchronize_rcu(); |
e32d8956 | 8242 | |
b3b1e6ed | 8243 | if ((type & TRACE_PIDS) && pid_list) |
6954e415 | 8244 | trace_pid_list_free(pid_list); |
b3b1e6ed SRV |
8245 | |
8246 | if ((type & TRACE_NO_PIDS) && no_pid_list) | |
6954e415 | 8247 | trace_pid_list_free(no_pid_list); |
e32d8956 SR |
8248 | } |
8249 | ||
d879d0b8 NK |
8250 | void ftrace_clear_pids(struct trace_array *tr) |
8251 | { | |
8252 | mutex_lock(&ftrace_lock); | |
8253 | ||
b3b1e6ed | 8254 | clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS); |
d879d0b8 NK |
8255 | |
8256 | mutex_unlock(&ftrace_lock); | |
8257 | } | |
8258 | ||
b3b1e6ed | 8259 | static void ftrace_pid_reset(struct trace_array *tr, int type) |
df4fc315 | 8260 | { |
756d17ee | 8261 | mutex_lock(&ftrace_lock); |
b3b1e6ed | 8262 | clear_ftrace_pids(tr, type); |
978f3a45 | 8263 | |
756d17ee | 8264 | ftrace_update_pid_func(); |
e1effa01 | 8265 | ftrace_startup_all(0); |
756d17ee | 8266 | |
8267 | mutex_unlock(&ftrace_lock); | |
756d17ee | 8268 | } |
8269 | ||
345ddcc8 SRRH |
8270 | /* Greater than any max PID */ |
8271 | #define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1) | |
df4fc315 | 8272 | |
756d17ee | 8273 | static void *fpid_start(struct seq_file *m, loff_t *pos) |
345ddcc8 | 8274 | __acquires(RCU) |
756d17ee | 8275 | { |
345ddcc8 SRRH |
8276 | struct trace_pid_list *pid_list; |
8277 | struct trace_array *tr = m->private; | |
8278 | ||
756d17ee | 8279 | mutex_lock(&ftrace_lock); |
345ddcc8 SRRH |
8280 | rcu_read_lock_sched(); |
8281 | ||
8282 | pid_list = rcu_dereference_sched(tr->function_pids); | |
756d17ee | 8283 | |
345ddcc8 SRRH |
8284 | if (!pid_list) |
8285 | return !(*pos) ? FTRACE_NO_PIDS : NULL; | |
756d17ee | 8286 | |
345ddcc8 | 8287 | return trace_pid_start(pid_list, pos); |
756d17ee | 8288 | } |
8289 | ||
8290 | static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) | |
8291 | { | |
345ddcc8 SRRH |
8292 | struct trace_array *tr = m->private; |
8293 | struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids); | |
8294 | ||
e4075e8b VA |
8295 | if (v == FTRACE_NO_PIDS) { |
8296 | (*pos)++; | |
756d17ee | 8297 | return NULL; |
e4075e8b | 8298 | } |
345ddcc8 | 8299 | return trace_pid_next(pid_list, v, pos); |
756d17ee | 8300 | } |
8301 | ||
8302 | static void fpid_stop(struct seq_file *m, void *p) | |
345ddcc8 | 8303 | __releases(RCU) |
756d17ee | 8304 | { |
345ddcc8 | 8305 | rcu_read_unlock_sched(); |
756d17ee | 8306 | mutex_unlock(&ftrace_lock); |
8307 | } | |
8308 | ||
8309 | static int fpid_show(struct seq_file *m, void *v) | |
8310 | { | |
345ddcc8 | 8311 | if (v == FTRACE_NO_PIDS) { |
fa6f0cc7 | 8312 | seq_puts(m, "no pid\n"); |
756d17ee | 8313 | return 0; |
8314 | } | |
8315 | ||
345ddcc8 | 8316 | return trace_pid_show(m, v); |
756d17ee | 8317 | } |
8318 | ||
8319 | static const struct seq_operations ftrace_pid_sops = { | |
8320 | .start = fpid_start, | |
8321 | .next = fpid_next, | |
8322 | .stop = fpid_stop, | |
8323 | .show = fpid_show, | |
8324 | }; | |
8325 | ||
b3b1e6ed SRV |
8326 | static void *fnpid_start(struct seq_file *m, loff_t *pos) |
8327 | __acquires(RCU) | |
8328 | { | |
8329 | struct trace_pid_list *pid_list; | |
8330 | struct trace_array *tr = m->private; | |
8331 | ||
8332 | mutex_lock(&ftrace_lock); | |
8333 | rcu_read_lock_sched(); | |
8334 | ||
8335 | pid_list = rcu_dereference_sched(tr->function_no_pids); | |
8336 | ||
8337 | if (!pid_list) | |
8338 | return !(*pos) ? FTRACE_NO_PIDS : NULL; | |
8339 | ||
8340 | return trace_pid_start(pid_list, pos); | |
8341 | } | |
8342 | ||
8343 | static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos) | |
756d17ee | 8344 | { |
b3b1e6ed SRV |
8345 | struct trace_array *tr = m->private; |
8346 | struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids); | |
8347 | ||
8348 | if (v == FTRACE_NO_PIDS) { | |
8349 | (*pos)++; | |
8350 | return NULL; | |
8351 | } | |
8352 | return trace_pid_next(pid_list, v, pos); | |
8353 | } | |
8354 | ||
8355 | static const struct seq_operations ftrace_no_pid_sops = { | |
8356 | .start = fnpid_start, | |
8357 | .next = fnpid_next, | |
8358 | .stop = fpid_stop, | |
8359 | .show = fpid_show, | |
8360 | }; | |
8361 | ||
8362 | static int pid_open(struct inode *inode, struct file *file, int type) | |
8363 | { | |
8364 | const struct seq_operations *seq_ops; | |
345ddcc8 SRRH |
8365 | struct trace_array *tr = inode->i_private; |
8366 | struct seq_file *m; | |
756d17ee | 8367 | int ret = 0; |
8368 | ||
8530dec6 SRV |
8369 | ret = tracing_check_open_get_tr(tr); |
8370 | if (ret) | |
8371 | return ret; | |
345ddcc8 | 8372 | |
756d17ee | 8373 | if ((file->f_mode & FMODE_WRITE) && |
8374 | (file->f_flags & O_TRUNC)) | |
b3b1e6ed SRV |
8375 | ftrace_pid_reset(tr, type); |
8376 | ||
8377 | switch (type) { | |
8378 | case TRACE_PIDS: | |
8379 | seq_ops = &ftrace_pid_sops; | |
8380 | break; | |
8381 | case TRACE_NO_PIDS: | |
8382 | seq_ops = &ftrace_no_pid_sops; | |
8383 | break; | |
026bb845 KC |
8384 | default: |
8385 | trace_array_put(tr); | |
8386 | WARN_ON_ONCE(1); | |
8387 | return -EINVAL; | |
b3b1e6ed | 8388 | } |
756d17ee | 8389 | |
b3b1e6ed | 8390 | ret = seq_open(file, seq_ops); |
345ddcc8 SRRH |
8391 | if (ret < 0) { |
8392 | trace_array_put(tr); | |
8393 | } else { | |
8394 | m = file->private_data; | |
8395 | /* copy tr over to seq ops */ | |
8396 | m->private = tr; | |
8397 | } | |
756d17ee | 8398 | |
8399 | return ret; | |
8400 | } | |
8401 | ||
b3b1e6ed SRV |
8402 | static int |
8403 | ftrace_pid_open(struct inode *inode, struct file *file) | |
8404 | { | |
8405 | return pid_open(inode, file, TRACE_PIDS); | |
8406 | } | |
8407 | ||
8408 | static int | |
8409 | ftrace_no_pid_open(struct inode *inode, struct file *file) | |
8410 | { | |
8411 | return pid_open(inode, file, TRACE_NO_PIDS); | |
8412 | } | |
8413 | ||
345ddcc8 SRRH |
8414 | static void ignore_task_cpu(void *data) |
8415 | { | |
8416 | struct trace_array *tr = data; | |
8417 | struct trace_pid_list *pid_list; | |
b3b1e6ed | 8418 | struct trace_pid_list *no_pid_list; |
345ddcc8 SRRH |
8419 | |
8420 | /* | |
8421 | * This function is called by on_each_cpu() while the | |
8422 | * event_mutex is held. | |
8423 | */ | |
8424 | pid_list = rcu_dereference_protected(tr->function_pids, | |
8425 | mutex_is_locked(&ftrace_lock)); | |
b3b1e6ed SRV |
8426 | no_pid_list = rcu_dereference_protected(tr->function_no_pids, |
8427 | mutex_is_locked(&ftrace_lock)); | |
345ddcc8 | 8428 | |
b3b1e6ed | 8429 | if (trace_ignore_this_task(pid_list, no_pid_list, current)) |
717e3f5e SRV |
8430 | this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, |
8431 | FTRACE_PID_IGNORE); | |
8432 | else | |
8433 | this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, | |
8434 | current->pid); | |
345ddcc8 SRRH |
8435 | } |
8436 | ||
df4fc315 | 8437 | static ssize_t |
b3b1e6ed SRV |
8438 | pid_write(struct file *filp, const char __user *ubuf, |
8439 | size_t cnt, loff_t *ppos, int type) | |
df4fc315 | 8440 | { |
345ddcc8 SRRH |
8441 | struct seq_file *m = filp->private_data; |
8442 | struct trace_array *tr = m->private; | |
b3b1e6ed SRV |
8443 | struct trace_pid_list *filtered_pids; |
8444 | struct trace_pid_list *other_pids; | |
345ddcc8 SRRH |
8445 | struct trace_pid_list *pid_list; |
8446 | ssize_t ret; | |
df4fc315 | 8447 | |
345ddcc8 SRRH |
8448 | if (!cnt) |
8449 | return 0; | |
8450 | ||
1d95fd9d | 8451 | guard(mutex)(&ftrace_lock); |
345ddcc8 | 8452 | |
b3b1e6ed SRV |
8453 | switch (type) { |
8454 | case TRACE_PIDS: | |
8455 | filtered_pids = rcu_dereference_protected(tr->function_pids, | |
345ddcc8 | 8456 | lockdep_is_held(&ftrace_lock)); |
b3b1e6ed SRV |
8457 | other_pids = rcu_dereference_protected(tr->function_no_pids, |
8458 | lockdep_is_held(&ftrace_lock)); | |
8459 | break; | |
8460 | case TRACE_NO_PIDS: | |
8461 | filtered_pids = rcu_dereference_protected(tr->function_no_pids, | |
8462 | lockdep_is_held(&ftrace_lock)); | |
8463 | other_pids = rcu_dereference_protected(tr->function_pids, | |
345ddcc8 | 8464 | lockdep_is_held(&ftrace_lock)); |
b3b1e6ed | 8465 | break; |
026bb845 | 8466 | default: |
026bb845 | 8467 | WARN_ON_ONCE(1); |
1d95fd9d | 8468 | return -EINVAL; |
b3b1e6ed | 8469 | } |
345ddcc8 SRRH |
8470 | |
8471 | ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); | |
8472 | if (ret < 0) | |
1d95fd9d | 8473 | return ret; |
df4fc315 | 8474 | |
b3b1e6ed SRV |
8475 | switch (type) { |
8476 | case TRACE_PIDS: | |
8477 | rcu_assign_pointer(tr->function_pids, pid_list); | |
8478 | break; | |
8479 | case TRACE_NO_PIDS: | |
8480 | rcu_assign_pointer(tr->function_no_pids, pid_list); | |
8481 | break; | |
8482 | } | |
8483 | ||
df4fc315 | 8484 | |
345ddcc8 | 8485 | if (filtered_pids) { |
74401729 | 8486 | synchronize_rcu(); |
6954e415 | 8487 | trace_pid_list_free(filtered_pids); |
b3b1e6ed | 8488 | } else if (pid_list && !other_pids) { |
345ddcc8 SRRH |
8489 | /* Register a probe to set whether to ignore the tracing of a task */ |
8490 | register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); | |
8491 | } | |
df4fc315 | 8492 | |
756d17ee | 8493 | /* |
345ddcc8 SRRH |
8494 | * Ignoring of pids is done at task switch. But we have to |
8495 | * check for those tasks that are currently running. | |
8496 | * Always do this in case a pid was appended or removed. | |
756d17ee | 8497 | */ |
345ddcc8 | 8498 | on_each_cpu(ignore_task_cpu, tr, 1); |
756d17ee | 8499 | |
345ddcc8 SRRH |
8500 | ftrace_update_pid_func(); |
8501 | ftrace_startup_all(0); | |
df4fc315 | 8502 | |
1d95fd9d | 8503 | *ppos += ret; |
df4fc315 | 8504 | |
345ddcc8 | 8505 | return ret; |
756d17ee | 8506 | } |
df4fc315 | 8507 | |
b3b1e6ed SRV |
8508 | static ssize_t |
8509 | ftrace_pid_write(struct file *filp, const char __user *ubuf, | |
8510 | size_t cnt, loff_t *ppos) | |
8511 | { | |
8512 | return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS); | |
8513 | } | |
8514 | ||
8515 | static ssize_t | |
8516 | ftrace_no_pid_write(struct file *filp, const char __user *ubuf, | |
8517 | size_t cnt, loff_t *ppos) | |
8518 | { | |
8519 | return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS); | |
8520 | } | |
8521 | ||
756d17ee | 8522 | static int |
8523 | ftrace_pid_release(struct inode *inode, struct file *file) | |
8524 | { | |
345ddcc8 | 8525 | struct trace_array *tr = inode->i_private; |
df4fc315 | 8526 | |
345ddcc8 SRRH |
8527 | trace_array_put(tr); |
8528 | ||
8529 | return seq_release(inode, file); | |
df4fc315 SR |
8530 | } |
8531 | ||
5e2336a0 | 8532 | static const struct file_operations ftrace_pid_fops = { |
756d17ee | 8533 | .open = ftrace_pid_open, |
8534 | .write = ftrace_pid_write, | |
8535 | .read = seq_read, | |
098c879e | 8536 | .llseek = tracing_lseek, |
756d17ee | 8537 | .release = ftrace_pid_release, |
df4fc315 SR |
8538 | }; |
8539 | ||
b3b1e6ed SRV |
8540 | static const struct file_operations ftrace_no_pid_fops = { |
8541 | .open = ftrace_no_pid_open, | |
8542 | .write = ftrace_no_pid_write, | |
8543 | .read = seq_read, | |
8544 | .llseek = tracing_lseek, | |
8545 | .release = ftrace_pid_release, | |
8546 | }; | |
8547 | ||
345ddcc8 | 8548 | void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer) |
df4fc315 | 8549 | { |
21ccc9cd | 8550 | trace_create_file("set_ftrace_pid", TRACE_MODE_WRITE, d_tracer, |
345ddcc8 | 8551 | tr, &ftrace_pid_fops); |
21ccc9cd SRV |
8552 | trace_create_file("set_ftrace_notrace_pid", TRACE_MODE_WRITE, |
8553 | d_tracer, tr, &ftrace_no_pid_fops); | |
df4fc315 | 8554 | } |
df4fc315 | 8555 | |
501c2375 SRRH |
8556 | void __init ftrace_init_tracefs_toplevel(struct trace_array *tr, |
8557 | struct dentry *d_tracer) | |
8558 | { | |
8559 | /* Only the top level directory has the dyn_tracefs and profile */ | |
8560 | WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); | |
8561 | ||
8562 | ftrace_init_dyn_tracefs(d_tracer); | |
8563 | ftrace_profile_tracefs(d_tracer); | |
8564 | } | |
8565 | ||
a2bb6a3d | 8566 | /** |
81adbdc0 | 8567 | * ftrace_kill - kill ftrace |
a2bb6a3d SR |
8568 | * |
8569 | * This function should be used by panic code. It stops ftrace | |
8570 | * but in a not so nice way. If you need to simply kill ftrace | |
8571 | * from a non-atomic section, use ftrace_kill. | |
8572 | */ | |
81adbdc0 | 8573 | void ftrace_kill(void) |
a2bb6a3d SR |
8574 | { |
8575 | ftrace_disabled = 1; | |
8576 | ftrace_enabled = 0; | |
5ccba64a | 8577 | ftrace_trace_function = ftrace_stub; |
1a7d0890 | 8578 | kprobe_ftrace_kill(); |
a2bb6a3d SR |
8579 | } |
8580 | ||
e0a413f6 | 8581 | /** |
6130722f SRV |
8582 | * ftrace_is_dead - Test if ftrace is dead or not. |
8583 | * | |
d1530413 | 8584 | * Returns: 1 if ftrace is "dead", zero otherwise. |
e0a413f6 SR |
8585 | */ |
8586 | int ftrace_is_dead(void) | |
8587 | { | |
8588 | return ftrace_disabled; | |
8589 | } | |
8590 | ||
53cd885b SL |
8591 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
8592 | /* | |
8593 | * When registering ftrace_ops with IPMODIFY, it is necessary to make sure | |
8594 | * it doesn't conflict with any direct ftrace_ops. If there is existing | |
8595 | * direct ftrace_ops on a kernel function being patched, call | |
8596 | * FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER on it to enable sharing. | |
8597 | * | |
8598 | * @ops: ftrace_ops being registered. | |
8599 | * | |
8600 | * Returns: | |
8601 | * 0 on success; | |
8602 | * Negative on failure. | |
8603 | */ | |
8604 | static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops) | |
8605 | { | |
8606 | struct ftrace_func_entry *entry; | |
8607 | struct ftrace_hash *hash; | |
8608 | struct ftrace_ops *op; | |
8609 | int size, i, ret; | |
8610 | ||
8611 | lockdep_assert_held_once(&direct_mutex); | |
8612 | ||
8613 | if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) | |
8614 | return 0; | |
8615 | ||
8616 | hash = ops->func_hash->filter_hash; | |
8617 | size = 1 << hash->size_bits; | |
8618 | for (i = 0; i < size; i++) { | |
8619 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { | |
8620 | unsigned long ip = entry->ip; | |
8621 | bool found_op = false; | |
8622 | ||
8623 | mutex_lock(&ftrace_lock); | |
8624 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
8625 | if (!(op->flags & FTRACE_OPS_FL_DIRECT)) | |
8626 | continue; | |
8627 | if (ops_references_ip(op, ip)) { | |
8628 | found_op = true; | |
8629 | break; | |
8630 | } | |
8631 | } while_for_each_ftrace_op(op); | |
8632 | mutex_unlock(&ftrace_lock); | |
8633 | ||
8634 | if (found_op) { | |
8635 | if (!op->ops_func) | |
8636 | return -EBUSY; | |
8637 | ||
8638 | ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER); | |
8639 | if (ret) | |
8640 | return ret; | |
8641 | } | |
8642 | } | |
8643 | } | |
8644 | ||
8645 | return 0; | |
8646 | } | |
8647 | ||
8648 | /* | |
8649 | * Similar to prepare_direct_functions_for_ipmodify, clean up after ops | |
8650 | * with IPMODIFY is unregistered. The cleanup is optional for most DIRECT | |
8651 | * ops. | |
8652 | */ | |
8653 | static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops) | |
8654 | { | |
8655 | struct ftrace_func_entry *entry; | |
8656 | struct ftrace_hash *hash; | |
8657 | struct ftrace_ops *op; | |
8658 | int size, i; | |
8659 | ||
8660 | if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) | |
8661 | return; | |
8662 | ||
8663 | mutex_lock(&direct_mutex); | |
8664 | ||
8665 | hash = ops->func_hash->filter_hash; | |
8666 | size = 1 << hash->size_bits; | |
8667 | for (i = 0; i < size; i++) { | |
8668 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { | |
8669 | unsigned long ip = entry->ip; | |
8670 | bool found_op = false; | |
8671 | ||
8672 | mutex_lock(&ftrace_lock); | |
8673 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
8674 | if (!(op->flags & FTRACE_OPS_FL_DIRECT)) | |
8675 | continue; | |
8676 | if (ops_references_ip(op, ip)) { | |
8677 | found_op = true; | |
8678 | break; | |
8679 | } | |
8680 | } while_for_each_ftrace_op(op); | |
8681 | mutex_unlock(&ftrace_lock); | |
8682 | ||
8683 | /* The cleanup is optional, ignore any errors */ | |
8684 | if (found_op && op->ops_func) | |
8685 | op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER); | |
8686 | } | |
8687 | } | |
8688 | mutex_unlock(&direct_mutex); | |
8689 | } | |
8690 | ||
8691 | #define lock_direct_mutex() mutex_lock(&direct_mutex) | |
8692 | #define unlock_direct_mutex() mutex_unlock(&direct_mutex) | |
8693 | ||
8694 | #else /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ | |
8695 | ||
8696 | static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops) | |
8697 | { | |
8698 | return 0; | |
8699 | } | |
8700 | ||
8701 | static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops) | |
8702 | { | |
8703 | } | |
8704 | ||
8705 | #define lock_direct_mutex() do { } while (0) | |
8706 | #define unlock_direct_mutex() do { } while (0) | |
8707 | ||
8708 | #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ | |
8709 | ||
8710 | /* | |
8711 | * Similar to register_ftrace_function, except we don't lock direct_mutex. | |
8712 | */ | |
8713 | static int register_ftrace_function_nolock(struct ftrace_ops *ops) | |
8714 | { | |
8715 | int ret; | |
8716 | ||
8717 | ftrace_ops_init(ops); | |
8718 | ||
8719 | mutex_lock(&ftrace_lock); | |
8720 | ||
8721 | ret = ftrace_startup(ops, 0); | |
8722 | ||
8723 | mutex_unlock(&ftrace_lock); | |
8724 | ||
8725 | return ret; | |
8726 | } | |
8727 | ||
16444a8a | 8728 | /** |
3d083395 | 8729 | * register_ftrace_function - register a function for profiling |
78cbc651 | 8730 | * @ops: ops structure that holds the function for profiling. |
16444a8a | 8731 | * |
3d083395 SR |
8732 | * Register a function to be called by all functions in the |
8733 | * kernel. | |
8734 | * | |
8735 | * Note: @ops->func and all the functions it calls must be labeled | |
8736 | * with "notrace", otherwise it will go into a | |
8737 | * recursive loop. | |
16444a8a | 8738 | */ |
3d083395 | 8739 | int register_ftrace_function(struct ftrace_ops *ops) |
16444a8a | 8740 | { |
3b1a8f45 | 8741 | int ret; |
4eebcc81 | 8742 | |
53cd885b SL |
8743 | lock_direct_mutex(); |
8744 | ret = prepare_direct_functions_for_ipmodify(ops); | |
8745 | if (ret < 0) | |
8746 | goto out_unlock; | |
b848914c | 8747 | |
53cd885b | 8748 | ret = register_ftrace_function_nolock(ops); |
8d240dd8 | 8749 | |
53cd885b SL |
8750 | out_unlock: |
8751 | unlock_direct_mutex(); | |
b0fc494f | 8752 | return ret; |
3d083395 | 8753 | } |
cdbe61bf | 8754 | EXPORT_SYMBOL_GPL(register_ftrace_function); |
3d083395 SR |
8755 | |
8756 | /** | |
32632920 | 8757 | * unregister_ftrace_function - unregister a function for profiling. |
78cbc651 | 8758 | * @ops: ops structure that holds the function to unregister |
3d083395 SR |
8759 | * |
8760 | * Unregister a function that was added to be called by ftrace profiling. | |
8761 | */ | |
8762 | int unregister_ftrace_function(struct ftrace_ops *ops) | |
8763 | { | |
8764 | int ret; | |
8765 | ||
e6ea44e9 | 8766 | mutex_lock(&ftrace_lock); |
8a56d776 | 8767 | ret = ftrace_shutdown(ops, 0); |
e6ea44e9 | 8768 | mutex_unlock(&ftrace_lock); |
b0fc494f | 8769 | |
53cd885b | 8770 | cleanup_direct_functions_after_ipmodify(ops); |
b0fc494f SR |
8771 | return ret; |
8772 | } | |
cdbe61bf | 8773 | EXPORT_SYMBOL_GPL(unregister_ftrace_function); |
b0fc494f | 8774 | |
bed0d9a5 JO |
8775 | static int symbols_cmp(const void *a, const void *b) |
8776 | { | |
8777 | const char **str_a = (const char **) a; | |
8778 | const char **str_b = (const char **) b; | |
8779 | ||
8780 | return strcmp(*str_a, *str_b); | |
8781 | } | |
8782 | ||
8783 | struct kallsyms_data { | |
8784 | unsigned long *addrs; | |
8785 | const char **syms; | |
8786 | size_t cnt; | |
8787 | size_t found; | |
8788 | }; | |
8789 | ||
3640bf85 JO |
8790 | /* This function gets called for all kernel and module symbols |
8791 | * and returns 1 in case we resolved all the requested symbols, | |
8792 | * 0 otherwise. | |
8793 | */ | |
3703bd54 | 8794 | static int kallsyms_callback(void *data, const char *name, unsigned long addr) |
bed0d9a5 JO |
8795 | { |
8796 | struct kallsyms_data *args = data; | |
eb1b2985 JO |
8797 | const char **sym; |
8798 | int idx; | |
bed0d9a5 | 8799 | |
eb1b2985 JO |
8800 | sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp); |
8801 | if (!sym) | |
8802 | return 0; | |
8803 | ||
8804 | idx = sym - args->syms; | |
8805 | if (args->addrs[idx]) | |
bed0d9a5 JO |
8806 | return 0; |
8807 | ||
9d68c19c | 8808 | if (!ftrace_location(addr)) |
bed0d9a5 JO |
8809 | return 0; |
8810 | ||
eb1b2985 JO |
8811 | args->addrs[idx] = addr; |
8812 | args->found++; | |
bed0d9a5 JO |
8813 | return args->found == args->cnt ? 1 : 0; |
8814 | } | |
8815 | ||
8816 | /** | |
8817 | * ftrace_lookup_symbols - Lookup addresses for array of symbols | |
8818 | * | |
8819 | * @sorted_syms: array of symbols pointers symbols to resolve, | |
8820 | * must be alphabetically sorted | |
8821 | * @cnt: number of symbols/addresses in @syms/@addrs arrays | |
8822 | * @addrs: array for storing resulting addresses | |
8823 | * | |
8824 | * This function looks up addresses for array of symbols provided in | |
8825 | * @syms array (must be alphabetically sorted) and stores them in | |
8826 | * @addrs array, which needs to be big enough to store at least @cnt | |
8827 | * addresses. | |
8828 | * | |
d1530413 | 8829 | * Returns: 0 if all provided symbols are found, -ESRCH otherwise. |
bed0d9a5 JO |
8830 | */ |
8831 | int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs) | |
8832 | { | |
8833 | struct kallsyms_data args; | |
3640bf85 | 8834 | int found_all; |
bed0d9a5 | 8835 | |
eb1b2985 | 8836 | memset(addrs, 0, sizeof(*addrs) * cnt); |
bed0d9a5 JO |
8837 | args.addrs = addrs; |
8838 | args.syms = sorted_syms; | |
8839 | args.cnt = cnt; | |
8840 | args.found = 0; | |
3640bf85 JO |
8841 | |
8842 | found_all = kallsyms_on_each_symbol(kallsyms_callback, &args); | |
8843 | if (found_all) | |
8844 | return 0; | |
07cc2c93 | 8845 | found_all = module_kallsyms_on_each_symbol(NULL, kallsyms_callback, &args); |
3640bf85 | 8846 | return found_all ? 0 : -ESRCH; |
bed0d9a5 | 8847 | } |
44d35720 | 8848 | |
5d79fa0d | 8849 | #ifdef CONFIG_SYSCTL |
8fd7c214 LC |
8850 | |
8851 | #ifdef CONFIG_DYNAMIC_FTRACE | |
f8b7d2b4 LC |
8852 | static void ftrace_startup_sysctl(void) |
8853 | { | |
8854 | int command; | |
8855 | ||
8856 | if (unlikely(ftrace_disabled)) | |
8857 | return; | |
8858 | ||
8859 | /* Force update next time */ | |
8860 | saved_ftrace_func = NULL; | |
8861 | /* ftrace_start_up is true if we want ftrace running */ | |
8862 | if (ftrace_start_up) { | |
8863 | command = FTRACE_UPDATE_CALLS; | |
8864 | if (ftrace_graph_active) | |
8865 | command |= FTRACE_START_FUNC_RET; | |
8866 | ftrace_startup_enable(command); | |
8867 | } | |
8868 | } | |
8869 | ||
8870 | static void ftrace_shutdown_sysctl(void) | |
8871 | { | |
8872 | int command; | |
8873 | ||
8874 | if (unlikely(ftrace_disabled)) | |
8875 | return; | |
8876 | ||
8877 | /* ftrace_start_up is true if ftrace is running */ | |
8878 | if (ftrace_start_up) { | |
8879 | command = FTRACE_DISABLE_CALLS; | |
8880 | if (ftrace_graph_active) | |
8881 | command |= FTRACE_STOP_FUNC_RET; | |
8882 | ftrace_run_update_code(command); | |
8883 | } | |
8884 | } | |
8fd7c214 LC |
8885 | #else |
8886 | # define ftrace_startup_sysctl() do { } while (0) | |
8887 | # define ftrace_shutdown_sysctl() do { } while (0) | |
8888 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
f8b7d2b4 | 8889 | |
7162431d MB |
8890 | static bool is_permanent_ops_registered(void) |
8891 | { | |
8892 | struct ftrace_ops *op; | |
8893 | ||
8894 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
8895 | if (op->flags & FTRACE_OPS_FL_PERMANENT) | |
8896 | return true; | |
8897 | } while_for_each_ftrace_op(op); | |
8898 | ||
8899 | return false; | |
8900 | } | |
8901 | ||
8e4e83b2 | 8902 | static int |
78eb4ea2 | 8903 | ftrace_enable_sysctl(const struct ctl_table *table, int write, |
54fa9ba5 | 8904 | void *buffer, size_t *lenp, loff_t *ppos) |
b0fc494f | 8905 | { |
1d95fd9d | 8906 | int ret; |
4eebcc81 | 8907 | |
1d95fd9d | 8908 | guard(mutex)(&ftrace_lock); |
b0fc494f | 8909 | |
45a4a237 | 8910 | if (unlikely(ftrace_disabled)) |
1d95fd9d | 8911 | return -ENODEV; |
45a4a237 SR |
8912 | |
8913 | ret = proc_dointvec(table, write, buffer, lenp, ppos); | |
b0fc494f | 8914 | |
a32c7765 | 8915 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) |
1d95fd9d | 8916 | return ret; |
b0fc494f | 8917 | |
b0fc494f SR |
8918 | if (ftrace_enabled) { |
8919 | ||
b0fc494f | 8920 | /* we are starting ftrace again */ |
f86f4180 CZ |
8921 | if (rcu_dereference_protected(ftrace_ops_list, |
8922 | lockdep_is_held(&ftrace_lock)) != &ftrace_list_end) | |
5000c418 | 8923 | update_ftrace_function(); |
b0fc494f | 8924 | |
524a3868 SRRH |
8925 | ftrace_startup_sysctl(); |
8926 | ||
b0fc494f | 8927 | } else { |
7162431d MB |
8928 | if (is_permanent_ops_registered()) { |
8929 | ftrace_enabled = true; | |
1d95fd9d | 8930 | return -EBUSY; |
7162431d MB |
8931 | } |
8932 | ||
b0fc494f SR |
8933 | /* stopping ftrace calls (just send to ftrace_stub) */ |
8934 | ftrace_trace_function = ftrace_stub; | |
8935 | ||
8936 | ftrace_shutdown_sysctl(); | |
8937 | } | |
8938 | ||
7162431d | 8939 | last_ftrace_enabled = !!ftrace_enabled; |
1d95fd9d | 8940 | return 0; |
16444a8a | 8941 | } |
8e4e83b2 | 8942 | |
1751f872 | 8943 | static const struct ctl_table ftrace_sysctls[] = { |
8e4e83b2 WX |
8944 | { |
8945 | .procname = "ftrace_enabled", | |
8946 | .data = &ftrace_enabled, | |
8947 | .maxlen = sizeof(int), | |
8948 | .mode = 0644, | |
8949 | .proc_handler = ftrace_enable_sysctl, | |
8950 | }, | |
8e4e83b2 WX |
8951 | }; |
8952 | ||
8953 | static int __init ftrace_sysctl_init(void) | |
8954 | { | |
8955 | register_sysctl_init("kernel", ftrace_sysctls); | |
8956 | return 0; | |
8957 | } | |
8958 | late_initcall(ftrace_sysctl_init); | |
8959 | #endif |