tracing/perf: Fix double put of trace event when init fails
[linux-2.6-block.git] / kernel / trace / ftrace.c
CommitLineData
bcea3f96 1// SPDX-License-Identifier: GPL-2.0
16444a8a
ACM
2/*
3 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally ported from the -rt patch by:
9 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code in the latency_tracer, that is:
12 *
13 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 14 * Copyright (C) 2004 Nadia Yvette Chambers
16444a8a
ACM
15 */
16
3d083395
SR
17#include <linux/stop_machine.h>
18#include <linux/clocksource.h>
29930025 19#include <linux/sched/task.h>
3d083395 20#include <linux/kallsyms.h>
17911ff3 21#include <linux/security.h>
5072c59f 22#include <linux/seq_file.h>
8434dc93 23#include <linux/tracefs.h>
3d083395 24#include <linux/hardirq.h>
2d8b820b 25#include <linux/kthread.h>
5072c59f 26#include <linux/uaccess.h>
5855fead 27#include <linux/bsearch.h>
56d82e00 28#include <linux/module.h>
2d8b820b 29#include <linux/ftrace.h>
b0fc494f 30#include <linux/sysctl.h>
5a0e3ad6 31#include <linux/slab.h>
5072c59f 32#include <linux/ctype.h>
68950619 33#include <linux/sort.h>
3d083395 34#include <linux/list.h>
59df055f 35#include <linux/hash.h>
3f379b03 36#include <linux/rcupdate.h>
fabe38ab 37#include <linux/kprobes.h>
3d083395 38
ad8d75ff 39#include <trace/events/sched.h>
8aef2d28 40
b80f0f6c 41#include <asm/sections.h>
2af15d6a 42#include <asm/setup.h>
395a59d0 43
3306fc4a 44#include "ftrace_internal.h"
0706f1c4 45#include "trace_output.h"
bac429f0 46#include "trace_stat.h"
16444a8a 47
b39181f7
SRG
48#define FTRACE_INVALID_FUNCTION "__ftrace_invalid_address__"
49
6912896e 50#define FTRACE_WARN_ON(cond) \
0778d9ad
SR
51 ({ \
52 int ___r = cond; \
53 if (WARN_ON(___r)) \
6912896e 54 ftrace_kill(); \
0778d9ad
SR
55 ___r; \
56 })
6912896e
SR
57
58#define FTRACE_WARN_ON_ONCE(cond) \
0778d9ad
SR
59 ({ \
60 int ___r = cond; \
61 if (WARN_ON_ONCE(___r)) \
6912896e 62 ftrace_kill(); \
0778d9ad
SR
63 ___r; \
64 })
6912896e 65
8fc0c701 66/* hash bits for specific function selection */
33dc9b12
SR
67#define FTRACE_HASH_DEFAULT_BITS 10
68#define FTRACE_HASH_MAX_BITS 12
8fc0c701 69
f04f24fb 70#ifdef CONFIG_DYNAMIC_FTRACE
33b7f99c
SRRH
71#define INIT_OPS_HASH(opsname) \
72 .func_hash = &opsname.local_hash, \
73 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
f04f24fb 74#else
33b7f99c 75#define INIT_OPS_HASH(opsname)
f04f24fb
MH
76#endif
77
a0572f68
SRV
78enum {
79 FTRACE_MODIFY_ENABLE_FL = (1 << 0),
80 FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1),
81};
82
3306fc4a 83struct ftrace_ops ftrace_list_end __read_mostly = {
2f5f6ad9 84 .func = ftrace_stub,
a25d036d 85 .flags = FTRACE_OPS_FL_STUB,
33b7f99c 86 INIT_OPS_HASH(ftrace_list_end)
2f5f6ad9
SR
87};
88
4eebcc81
SR
89/* ftrace_enabled is a method to turn ftrace on or off */
90int ftrace_enabled __read_mostly;
5d79fa0d 91static int __maybe_unused last_ftrace_enabled;
b0fc494f 92
2f5f6ad9
SR
93/* Current function tracing op */
94struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
405e1d83
SRRH
95/* What to set function_trace_op to */
96static struct ftrace_ops *set_function_trace_op;
60a7ecf4 97
345ddcc8 98static bool ftrace_pids_enabled(struct ftrace_ops *ops)
e3eea140 99{
345ddcc8
SRRH
100 struct trace_array *tr;
101
102 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
103 return false;
104
105 tr = ops->private;
106
b3b1e6ed 107 return tr->function_pids != NULL || tr->function_no_pids != NULL;
e3eea140
SRRH
108}
109
110static void ftrace_update_trampoline(struct ftrace_ops *ops);
111
4eebcc81
SR
112/*
113 * ftrace_disabled is set when an anomaly is discovered.
114 * ftrace_disabled is much stronger than ftrace_enabled.
115 */
116static int ftrace_disabled __read_mostly;
117
3306fc4a 118DEFINE_MUTEX(ftrace_lock);
b0fc494f 119
3306fc4a 120struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
16444a8a 121ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
3306fc4a 122struct ftrace_ops global_ops;
16444a8a 123
50c69781 124/* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */
34cdd18b
SRV
125void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
126 struct ftrace_ops *op, struct ftrace_regs *fregs);
b848914c 127
f04f24fb
MH
128static inline void ftrace_ops_init(struct ftrace_ops *ops)
129{
130#ifdef CONFIG_DYNAMIC_FTRACE
131 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
33b7f99c
SRRH
132 mutex_init(&ops->local_hash.regex_lock);
133 ops->func_hash = &ops->local_hash;
f04f24fb
MH
134 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
135 }
136#endif
137}
138
2f5f6ad9 139static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
d19ad077 140 struct ftrace_ops *op, struct ftrace_regs *fregs)
df4fc315 141{
345ddcc8 142 struct trace_array *tr = op->private;
717e3f5e 143 int pid;
345ddcc8 144
717e3f5e
SRV
145 if (tr) {
146 pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
147 if (pid == FTRACE_PID_IGNORE)
148 return;
149 if (pid != FTRACE_PID_TRACE &&
150 pid != current->pid)
151 return;
152 }
df4fc315 153
d19ad077 154 op->saved_func(ip, parent_ip, op, fregs);
df4fc315
SR
155}
156
405e1d83
SRRH
157static void ftrace_sync_ipi(void *data)
158{
159 /* Probably not needed, but do it anyway */
160 smp_rmb();
161}
162
00ccbf2f
SRRH
163static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
164{
165 /*
ba27f2bc 166 * If this is a dynamic, RCU, or per CPU ops, or we force list func,
00ccbf2f
SRRH
167 * then it needs to call the list anyway.
168 */
b3a88803
PZ
169 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
170 FTRACE_FORCE_LIST_FUNC)
00ccbf2f
SRRH
171 return ftrace_ops_list_func;
172
173 return ftrace_ops_get_func(ops);
174}
175
2b499381
SR
176static void update_ftrace_function(void)
177{
178 ftrace_func_t func;
179
f7aad4e1
SRRH
180 /*
181 * Prepare the ftrace_ops that the arch callback will use.
182 * If there's only one ftrace_ops registered, the ftrace_ops_list
183 * will point to the ops we want.
184 */
f86f4180
CZ
185 set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
186 lockdep_is_held(&ftrace_lock));
f7aad4e1
SRRH
187
188 /* If there's no ftrace_ops registered, just call the stub function */
f86f4180 189 if (set_function_trace_op == &ftrace_list_end) {
f7aad4e1
SRRH
190 func = ftrace_stub;
191
cdbe61bf
SR
192 /*
193 * If we are at the end of the list and this ops is
4740974a
SR
194 * recursion safe and not dynamic and the arch supports passing ops,
195 * then have the mcount trampoline call the function directly.
cdbe61bf 196 */
f86f4180
CZ
197 } else if (rcu_dereference_protected(ftrace_ops_list->next,
198 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
00ccbf2f 199 func = ftrace_ops_get_list_func(ftrace_ops_list);
f7aad4e1 200
2f5f6ad9
SR
201 } else {
202 /* Just use the default ftrace_ops */
405e1d83 203 set_function_trace_op = &ftrace_list_end;
b848914c 204 func = ftrace_ops_list_func;
2f5f6ad9 205 }
2b499381 206
5f8bf2d2
SRRH
207 update_function_graph_func();
208
405e1d83
SRRH
209 /* If there's no change, then do nothing more here */
210 if (ftrace_trace_function == func)
211 return;
212
213 /*
214 * If we are using the list function, it doesn't care
215 * about the function_trace_ops.
216 */
217 if (func == ftrace_ops_list_func) {
218 ftrace_trace_function = func;
219 /*
220 * Don't even bother setting function_trace_ops,
221 * it would be racy to do so anyway.
222 */
223 return;
224 }
225
226#ifndef CONFIG_DYNAMIC_FTRACE
227 /*
228 * For static tracing, we need to be a bit more careful.
229 * The function change takes affect immediately. Thus,
fdda88d3 230 * we need to coordinate the setting of the function_trace_ops
405e1d83
SRRH
231 * with the setting of the ftrace_trace_function.
232 *
233 * Set the function to the list ops, which will call the
234 * function we want, albeit indirectly, but it handles the
235 * ftrace_ops and doesn't depend on function_trace_op.
236 */
237 ftrace_trace_function = ftrace_ops_list_func;
238 /*
239 * Make sure all CPUs see this. Yes this is slow, but static
240 * tracing is slow and nasty to have enabled.
241 */
e5a971d7 242 synchronize_rcu_tasks_rude();
405e1d83
SRRH
243 /* Now all cpus are using the list ops. */
244 function_trace_op = set_function_trace_op;
245 /* Make sure the function_trace_op is visible on all CPUs */
246 smp_wmb();
247 /* Nasty way to force a rmb on all cpus */
248 smp_call_function(ftrace_sync_ipi, NULL, 1);
249 /* OK, we are all set to update the ftrace_trace_function now! */
250#endif /* !CONFIG_DYNAMIC_FTRACE */
251
491d0dcf 252 ftrace_trace_function = func;
491d0dcf
SR
253}
254
f86f4180
CZ
255static void add_ftrace_ops(struct ftrace_ops __rcu **list,
256 struct ftrace_ops *ops)
3d083395 257{
f86f4180
CZ
258 rcu_assign_pointer(ops->next, *list);
259
16444a8a 260 /*
b848914c 261 * We are entering ops into the list but another
16444a8a
ACM
262 * CPU might be walking that list. We need to make sure
263 * the ops->next pointer is valid before another CPU sees
b848914c 264 * the ops pointer included into the list.
16444a8a 265 */
2b499381 266 rcu_assign_pointer(*list, ops);
16444a8a
ACM
267}
268
f86f4180
CZ
269static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
270 struct ftrace_ops *ops)
16444a8a 271{
16444a8a 272 struct ftrace_ops **p;
16444a8a
ACM
273
274 /*
3d083395
SR
275 * If we are removing the last function, then simply point
276 * to the ftrace_stub.
16444a8a 277 */
f86f4180
CZ
278 if (rcu_dereference_protected(*list,
279 lockdep_is_held(&ftrace_lock)) == ops &&
280 rcu_dereference_protected(ops->next,
281 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
2b499381 282 *list = &ftrace_list_end;
e6ea44e9 283 return 0;
16444a8a
ACM
284 }
285
2b499381 286 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
16444a8a
ACM
287 if (*p == ops)
288 break;
289
e6ea44e9
SR
290 if (*p != ops)
291 return -1;
16444a8a
ACM
292
293 *p = (*p)->next;
2b499381
SR
294 return 0;
295}
16444a8a 296
f3bea491
SRRH
297static void ftrace_update_trampoline(struct ftrace_ops *ops);
298
3306fc4a 299int __register_ftrace_function(struct ftrace_ops *ops)
2b499381 300{
591dffda
SRRH
301 if (ops->flags & FTRACE_OPS_FL_DELETED)
302 return -EINVAL;
303
b848914c
SR
304 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
305 return -EBUSY;
306
06aeaaea 307#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
08f6fba5
SR
308 /*
309 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
310 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
311 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
312 */
313 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
314 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
315 return -EINVAL;
316
317 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
318 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
319#endif
7162431d
MB
320 if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
321 return -EBUSY;
08f6fba5 322
a20deb3a 323 if (!is_kernel_core_data((unsigned long)ops))
cdbe61bf
SR
324 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
325
ba27f2bc 326 add_ftrace_ops(&ftrace_ops_list, ops);
b848914c 327
e3eea140
SRRH
328 /* Always save the function, and reset at unregistering */
329 ops->saved_func = ops->func;
330
345ddcc8 331 if (ftrace_pids_enabled(ops))
e3eea140
SRRH
332 ops->func = ftrace_pid_func;
333
f3bea491
SRRH
334 ftrace_update_trampoline(ops);
335
2b499381
SR
336 if (ftrace_enabled)
337 update_ftrace_function();
338
339 return 0;
340}
341
3306fc4a 342int __unregister_ftrace_function(struct ftrace_ops *ops)
2b499381
SR
343{
344 int ret;
345
b848914c
SR
346 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
347 return -EBUSY;
348
ba27f2bc 349 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
b848914c 350
2b499381
SR
351 if (ret < 0)
352 return ret;
b848914c 353
491d0dcf
SR
354 if (ftrace_enabled)
355 update_ftrace_function();
16444a8a 356
e3eea140
SRRH
357 ops->func = ops->saved_func;
358
e6ea44e9 359 return 0;
3d083395
SR
360}
361
df4fc315
SR
362static void ftrace_update_pid_func(void)
363{
e3eea140
SRRH
364 struct ftrace_ops *op;
365
491d0dcf 366 /* Only do something if we are tracing something */
df4fc315 367 if (ftrace_trace_function == ftrace_stub)
10dd3ebe 368 return;
df4fc315 369
e3eea140
SRRH
370 do_for_each_ftrace_op(op, ftrace_ops_list) {
371 if (op->flags & FTRACE_OPS_FL_PID) {
345ddcc8
SRRH
372 op->func = ftrace_pids_enabled(op) ?
373 ftrace_pid_func : op->saved_func;
e3eea140
SRRH
374 ftrace_update_trampoline(op);
375 }
376 } while_for_each_ftrace_op(op);
377
491d0dcf 378 update_ftrace_function();
df4fc315
SR
379}
380
493762fc
SR
381#ifdef CONFIG_FUNCTION_PROFILER
382struct ftrace_profile {
383 struct hlist_node node;
384 unsigned long ip;
385 unsigned long counter;
0706f1c4
SR
386#ifdef CONFIG_FUNCTION_GRAPH_TRACER
387 unsigned long long time;
e330b3bc 388 unsigned long long time_squared;
0706f1c4 389#endif
8fc0c701
SR
390};
391
493762fc
SR
392struct ftrace_profile_page {
393 struct ftrace_profile_page *next;
394 unsigned long index;
395 struct ftrace_profile records[];
d61f82d0
SR
396};
397
cafb168a
SR
398struct ftrace_profile_stat {
399 atomic_t disabled;
400 struct hlist_head *hash;
401 struct ftrace_profile_page *pages;
402 struct ftrace_profile_page *start;
403 struct tracer_stat stat;
404};
405
493762fc
SR
406#define PROFILE_RECORDS_SIZE \
407 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
5072c59f 408
493762fc
SR
409#define PROFILES_PER_PAGE \
410 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
3d083395 411
fb9fb015
SR
412static int ftrace_profile_enabled __read_mostly;
413
414/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
bac429f0
SR
415static DEFINE_MUTEX(ftrace_profile_lock);
416
cafb168a 417static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
493762fc 418
20079ebe
NK
419#define FTRACE_PROFILE_HASH_BITS 10
420#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
493762fc 421
bac429f0
SR
422static void *
423function_stat_next(void *v, int idx)
424{
493762fc
SR
425 struct ftrace_profile *rec = v;
426 struct ftrace_profile_page *pg;
bac429f0 427
493762fc 428 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
bac429f0
SR
429
430 again:
0296e425
LZ
431 if (idx != 0)
432 rec++;
433
bac429f0
SR
434 if ((void *)rec >= (void *)&pg->records[pg->index]) {
435 pg = pg->next;
436 if (!pg)
437 return NULL;
438 rec = &pg->records[0];
493762fc
SR
439 if (!rec->counter)
440 goto again;
bac429f0
SR
441 }
442
bac429f0
SR
443 return rec;
444}
445
446static void *function_stat_start(struct tracer_stat *trace)
447{
cafb168a
SR
448 struct ftrace_profile_stat *stat =
449 container_of(trace, struct ftrace_profile_stat, stat);
450
451 if (!stat || !stat->start)
452 return NULL;
453
454 return function_stat_next(&stat->start->records[0], 0);
bac429f0
SR
455}
456
0706f1c4
SR
457#ifdef CONFIG_FUNCTION_GRAPH_TRACER
458/* function graph compares on total time */
80042c8f 459static int function_stat_cmp(const void *p1, const void *p2)
0706f1c4 460{
80042c8f
AS
461 const struct ftrace_profile *a = p1;
462 const struct ftrace_profile *b = p2;
0706f1c4
SR
463
464 if (a->time < b->time)
465 return -1;
466 if (a->time > b->time)
467 return 1;
468 else
469 return 0;
470}
471#else
472/* not function graph compares against hits */
80042c8f 473static int function_stat_cmp(const void *p1, const void *p2)
bac429f0 474{
80042c8f
AS
475 const struct ftrace_profile *a = p1;
476 const struct ftrace_profile *b = p2;
bac429f0
SR
477
478 if (a->counter < b->counter)
479 return -1;
480 if (a->counter > b->counter)
481 return 1;
482 else
483 return 0;
484}
0706f1c4 485#endif
bac429f0
SR
486
487static int function_stat_headers(struct seq_file *m)
488{
0706f1c4 489#ifdef CONFIG_FUNCTION_GRAPH_TRACER
fa6f0cc7
RV
490 seq_puts(m, " Function "
491 "Hit Time Avg s^2\n"
492 " -------- "
493 "--- ---- --- ---\n");
0706f1c4 494#else
fa6f0cc7
RV
495 seq_puts(m, " Function Hit\n"
496 " -------- ---\n");
0706f1c4 497#endif
bac429f0
SR
498 return 0;
499}
500
501static int function_stat_show(struct seq_file *m, void *v)
502{
493762fc 503 struct ftrace_profile *rec = v;
bac429f0 504 char str[KSYM_SYMBOL_LEN];
3aaba20f 505 int ret = 0;
0706f1c4 506#ifdef CONFIG_FUNCTION_GRAPH_TRACER
34886c8b
SR
507 static struct trace_seq s;
508 unsigned long long avg;
e330b3bc 509 unsigned long long stddev;
0706f1c4 510#endif
3aaba20f
LZ
511 mutex_lock(&ftrace_profile_lock);
512
513 /* we raced with function_profile_reset() */
514 if (unlikely(rec->counter == 0)) {
515 ret = -EBUSY;
516 goto out;
517 }
bac429f0 518
8e436ca0 519#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e31f7939 520 avg = div64_ul(rec->time, rec->counter);
8e436ca0
UT
521 if (tracing_thresh && (avg < tracing_thresh))
522 goto out;
523#endif
524
bac429f0 525 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
0706f1c4
SR
526 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
527
528#ifdef CONFIG_FUNCTION_GRAPH_TRACER
fa6f0cc7 529 seq_puts(m, " ");
34886c8b 530
e330b3bc
CD
531 /* Sample standard deviation (s^2) */
532 if (rec->counter <= 1)
533 stddev = 0;
534 else {
52d85d76
JL
535 /*
536 * Apply Welford's method:
537 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
538 */
539 stddev = rec->counter * rec->time_squared -
540 rec->time * rec->time;
541
e330b3bc
CD
542 /*
543 * Divide only 1000 for ns^2 -> us^2 conversion.
544 * trace_print_graph_duration will divide 1000 again.
545 */
e31f7939
WY
546 stddev = div64_ul(stddev,
547 rec->counter * (rec->counter - 1) * 1000);
e330b3bc
CD
548 }
549
34886c8b
SR
550 trace_seq_init(&s);
551 trace_print_graph_duration(rec->time, &s);
552 trace_seq_puts(&s, " ");
553 trace_print_graph_duration(avg, &s);
e330b3bc
CD
554 trace_seq_puts(&s, " ");
555 trace_print_graph_duration(stddev, &s);
0706f1c4 556 trace_print_seq(m, &s);
0706f1c4
SR
557#endif
558 seq_putc(m, '\n');
3aaba20f
LZ
559out:
560 mutex_unlock(&ftrace_profile_lock);
bac429f0 561
3aaba20f 562 return ret;
bac429f0
SR
563}
564
cafb168a 565static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
bac429f0 566{
493762fc 567 struct ftrace_profile_page *pg;
bac429f0 568
cafb168a 569 pg = stat->pages = stat->start;
bac429f0 570
493762fc
SR
571 while (pg) {
572 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
573 pg->index = 0;
574 pg = pg->next;
bac429f0
SR
575 }
576
cafb168a 577 memset(stat->hash, 0,
493762fc
SR
578 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
579}
bac429f0 580
172f7ba9 581static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
493762fc
SR
582{
583 struct ftrace_profile_page *pg;
318e0a73
SR
584 int functions;
585 int pages;
493762fc 586 int i;
bac429f0 587
493762fc 588 /* If we already allocated, do nothing */
cafb168a 589 if (stat->pages)
493762fc 590 return 0;
bac429f0 591
cafb168a
SR
592 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
593 if (!stat->pages)
493762fc 594 return -ENOMEM;
bac429f0 595
318e0a73
SR
596#ifdef CONFIG_DYNAMIC_FTRACE
597 functions = ftrace_update_tot_cnt;
598#else
599 /*
600 * We do not know the number of functions that exist because
601 * dynamic tracing is what counts them. With past experience
602 * we have around 20K functions. That should be more than enough.
603 * It is highly unlikely we will execute every function in
604 * the kernel.
605 */
606 functions = 20000;
607#endif
608
cafb168a 609 pg = stat->start = stat->pages;
bac429f0 610
318e0a73
SR
611 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
612
39e30cd1 613 for (i = 1; i < pages; i++) {
493762fc 614 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
493762fc 615 if (!pg->next)
318e0a73 616 goto out_free;
493762fc
SR
617 pg = pg->next;
618 }
619
620 return 0;
318e0a73
SR
621
622 out_free:
623 pg = stat->start;
624 while (pg) {
625 unsigned long tmp = (unsigned long)pg;
626
627 pg = pg->next;
628 free_page(tmp);
629 }
630
318e0a73
SR
631 stat->pages = NULL;
632 stat->start = NULL;
633
634 return -ENOMEM;
bac429f0
SR
635}
636
cafb168a 637static int ftrace_profile_init_cpu(int cpu)
bac429f0 638{
cafb168a 639 struct ftrace_profile_stat *stat;
493762fc 640 int size;
bac429f0 641
cafb168a
SR
642 stat = &per_cpu(ftrace_profile_stats, cpu);
643
644 if (stat->hash) {
493762fc 645 /* If the profile is already created, simply reset it */
cafb168a 646 ftrace_profile_reset(stat);
493762fc
SR
647 return 0;
648 }
bac429f0 649
493762fc
SR
650 /*
651 * We are profiling all functions, but usually only a few thousand
652 * functions are hit. We'll make a hash of 1024 items.
653 */
654 size = FTRACE_PROFILE_HASH_SIZE;
bac429f0 655
6396bb22 656 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
493762fc 657
cafb168a 658 if (!stat->hash)
493762fc
SR
659 return -ENOMEM;
660
318e0a73 661 /* Preallocate the function profiling pages */
cafb168a
SR
662 if (ftrace_profile_pages_init(stat) < 0) {
663 kfree(stat->hash);
664 stat->hash = NULL;
493762fc
SR
665 return -ENOMEM;
666 }
667
668 return 0;
bac429f0
SR
669}
670
cafb168a
SR
671static int ftrace_profile_init(void)
672{
673 int cpu;
674 int ret = 0;
675
c4602c1c 676 for_each_possible_cpu(cpu) {
cafb168a
SR
677 ret = ftrace_profile_init_cpu(cpu);
678 if (ret)
679 break;
680 }
681
682 return ret;
683}
684
493762fc 685/* interrupts must be disabled */
cafb168a
SR
686static struct ftrace_profile *
687ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
bac429f0 688{
493762fc 689 struct ftrace_profile *rec;
bac429f0 690 struct hlist_head *hhd;
bac429f0
SR
691 unsigned long key;
692
20079ebe 693 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
cafb168a 694 hhd = &stat->hash[key];
bac429f0
SR
695
696 if (hlist_empty(hhd))
697 return NULL;
698
1bb539ca 699 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
bac429f0 700 if (rec->ip == ip)
493762fc
SR
701 return rec;
702 }
703
704 return NULL;
705}
706
cafb168a
SR
707static void ftrace_add_profile(struct ftrace_profile_stat *stat,
708 struct ftrace_profile *rec)
493762fc
SR
709{
710 unsigned long key;
711
20079ebe 712 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
cafb168a 713 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
493762fc
SR
714}
715
318e0a73
SR
716/*
717 * The memory is already allocated, this simply finds a new record to use.
718 */
493762fc 719static struct ftrace_profile *
318e0a73 720ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
493762fc
SR
721{
722 struct ftrace_profile *rec = NULL;
723
318e0a73 724 /* prevent recursion (from NMIs) */
cafb168a 725 if (atomic_inc_return(&stat->disabled) != 1)
493762fc
SR
726 goto out;
727
493762fc 728 /*
318e0a73
SR
729 * Try to find the function again since an NMI
730 * could have added it
493762fc 731 */
cafb168a 732 rec = ftrace_find_profiled_func(stat, ip);
493762fc 733 if (rec)
cafb168a 734 goto out;
493762fc 735
cafb168a
SR
736 if (stat->pages->index == PROFILES_PER_PAGE) {
737 if (!stat->pages->next)
738 goto out;
739 stat->pages = stat->pages->next;
bac429f0 740 }
493762fc 741
cafb168a 742 rec = &stat->pages->records[stat->pages->index++];
493762fc 743 rec->ip = ip;
cafb168a 744 ftrace_add_profile(stat, rec);
493762fc 745
bac429f0 746 out:
cafb168a 747 atomic_dec(&stat->disabled);
bac429f0
SR
748
749 return rec;
750}
751
752static void
2f5f6ad9 753function_profile_call(unsigned long ip, unsigned long parent_ip,
d19ad077 754 struct ftrace_ops *ops, struct ftrace_regs *fregs)
bac429f0 755{
cafb168a 756 struct ftrace_profile_stat *stat;
493762fc 757 struct ftrace_profile *rec;
bac429f0
SR
758 unsigned long flags;
759
760 if (!ftrace_profile_enabled)
761 return;
762
763 local_irq_save(flags);
cafb168a 764
bdffd893 765 stat = this_cpu_ptr(&ftrace_profile_stats);
0f6ce3de 766 if (!stat->hash || !ftrace_profile_enabled)
cafb168a
SR
767 goto out;
768
769 rec = ftrace_find_profiled_func(stat, ip);
493762fc 770 if (!rec) {
318e0a73 771 rec = ftrace_profile_alloc(stat, ip);
493762fc
SR
772 if (!rec)
773 goto out;
774 }
bac429f0
SR
775
776 rec->counter++;
777 out:
778 local_irq_restore(flags);
779}
780
0706f1c4 781#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e73e679f
SRV
782static bool fgraph_graph_time = true;
783
784void ftrace_graph_graph_time_control(bool enable)
785{
786 fgraph_graph_time = enable;
787}
788
0706f1c4
SR
789static int profile_graph_entry(struct ftrace_graph_ent *trace)
790{
b0e21a61 791 struct ftrace_ret_stack *ret_stack;
8861dd30 792
a1e2e31d 793 function_profile_call(trace->func, 0, NULL, NULL);
8861dd30 794
a8f0f9e4
SRV
795 /* If function graph is shutting down, ret_stack can be NULL */
796 if (!current->ret_stack)
797 return 0;
798
b0e21a61
SRV
799 ret_stack = ftrace_graph_get_ret_stack(current, 0);
800 if (ret_stack)
801 ret_stack->subtime = 0;
8861dd30 802
0706f1c4
SR
803 return 1;
804}
805
806static void profile_graph_return(struct ftrace_graph_ret *trace)
807{
b0e21a61 808 struct ftrace_ret_stack *ret_stack;
cafb168a 809 struct ftrace_profile_stat *stat;
a2a16d6a 810 unsigned long long calltime;
0706f1c4 811 struct ftrace_profile *rec;
cafb168a 812 unsigned long flags;
0706f1c4
SR
813
814 local_irq_save(flags);
bdffd893 815 stat = this_cpu_ptr(&ftrace_profile_stats);
0f6ce3de 816 if (!stat->hash || !ftrace_profile_enabled)
cafb168a
SR
817 goto out;
818
37e44bc5
SR
819 /* If the calltime was zero'd ignore it */
820 if (!trace->calltime)
821 goto out;
822
a2a16d6a
SR
823 calltime = trace->rettime - trace->calltime;
824
55577204 825 if (!fgraph_graph_time) {
a2a16d6a
SR
826
827 /* Append this call time to the parent time to subtract */
b0e21a61
SRV
828 ret_stack = ftrace_graph_get_ret_stack(current, 1);
829 if (ret_stack)
830 ret_stack->subtime += calltime;
a2a16d6a 831
b0e21a61
SRV
832 ret_stack = ftrace_graph_get_ret_stack(current, 0);
833 if (ret_stack && ret_stack->subtime < calltime)
834 calltime -= ret_stack->subtime;
a2a16d6a
SR
835 else
836 calltime = 0;
837 }
838
cafb168a 839 rec = ftrace_find_profiled_func(stat, trace->func);
e330b3bc 840 if (rec) {
a2a16d6a 841 rec->time += calltime;
e330b3bc
CD
842 rec->time_squared += calltime * calltime;
843 }
a2a16d6a 844
cafb168a 845 out:
0706f1c4
SR
846 local_irq_restore(flags);
847}
848
688f7089
SRV
849static struct fgraph_ops fprofiler_ops = {
850 .entryfunc = &profile_graph_entry,
851 .retfunc = &profile_graph_return,
852};
853
0706f1c4
SR
854static int register_ftrace_profiler(void)
855{
688f7089 856 return register_ftrace_graph(&fprofiler_ops);
0706f1c4
SR
857}
858
859static void unregister_ftrace_profiler(void)
860{
688f7089 861 unregister_ftrace_graph(&fprofiler_ops);
0706f1c4
SR
862}
863#else
bd38c0e6 864static struct ftrace_ops ftrace_profile_ops __read_mostly = {
fb9fb015 865 .func = function_profile_call,
a25d036d 866 .flags = FTRACE_OPS_FL_INITIALIZED,
33b7f99c 867 INIT_OPS_HASH(ftrace_profile_ops)
bac429f0
SR
868};
869
0706f1c4
SR
870static int register_ftrace_profiler(void)
871{
872 return register_ftrace_function(&ftrace_profile_ops);
873}
874
875static void unregister_ftrace_profiler(void)
876{
877 unregister_ftrace_function(&ftrace_profile_ops);
878}
879#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
880
bac429f0
SR
881static ssize_t
882ftrace_profile_write(struct file *filp, const char __user *ubuf,
883 size_t cnt, loff_t *ppos)
884{
885 unsigned long val;
bac429f0
SR
886 int ret;
887
22fe9b54
PH
888 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
889 if (ret)
bac429f0
SR
890 return ret;
891
892 val = !!val;
893
894 mutex_lock(&ftrace_profile_lock);
895 if (ftrace_profile_enabled ^ val) {
896 if (val) {
493762fc
SR
897 ret = ftrace_profile_init();
898 if (ret < 0) {
899 cnt = ret;
900 goto out;
901 }
902
0706f1c4
SR
903 ret = register_ftrace_profiler();
904 if (ret < 0) {
905 cnt = ret;
906 goto out;
907 }
bac429f0
SR
908 ftrace_profile_enabled = 1;
909 } else {
910 ftrace_profile_enabled = 0;
0f6ce3de
SR
911 /*
912 * unregister_ftrace_profiler calls stop_machine
74401729 913 * so this acts like an synchronize_rcu.
0f6ce3de 914 */
0706f1c4 915 unregister_ftrace_profiler();
bac429f0
SR
916 }
917 }
493762fc 918 out:
bac429f0
SR
919 mutex_unlock(&ftrace_profile_lock);
920
cf8517cf 921 *ppos += cnt;
bac429f0
SR
922
923 return cnt;
924}
925
493762fc
SR
926static ssize_t
927ftrace_profile_read(struct file *filp, char __user *ubuf,
928 size_t cnt, loff_t *ppos)
929{
fb9fb015 930 char buf[64]; /* big enough to hold a number */
493762fc
SR
931 int r;
932
933 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
934 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
935}
936
bac429f0
SR
937static const struct file_operations ftrace_profile_fops = {
938 .open = tracing_open_generic,
939 .read = ftrace_profile_read,
940 .write = ftrace_profile_write,
6038f373 941 .llseek = default_llseek,
bac429f0
SR
942};
943
cafb168a
SR
944/* used to initialize the real stat files */
945static struct tracer_stat function_stats __initdata = {
fb9fb015
SR
946 .name = "functions",
947 .stat_start = function_stat_start,
948 .stat_next = function_stat_next,
949 .stat_cmp = function_stat_cmp,
950 .stat_headers = function_stat_headers,
951 .stat_show = function_stat_show
cafb168a
SR
952};
953
8434dc93 954static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
bac429f0 955{
cafb168a 956 struct ftrace_profile_stat *stat;
cafb168a 957 char *name;
bac429f0 958 int ret;
cafb168a
SR
959 int cpu;
960
961 for_each_possible_cpu(cpu) {
962 stat = &per_cpu(ftrace_profile_stats, cpu);
963
6363c6b5 964 name = kasprintf(GFP_KERNEL, "function%d", cpu);
cafb168a
SR
965 if (!name) {
966 /*
967 * The files created are permanent, if something happens
968 * we still do not free memory.
969 */
cafb168a
SR
970 WARN(1,
971 "Could not allocate stat file for cpu %d\n",
972 cpu);
973 return;
974 }
975 stat->stat = function_stats;
cafb168a
SR
976 stat->stat.name = name;
977 ret = register_stat_tracer(&stat->stat);
978 if (ret) {
979 WARN(1,
980 "Could not register function stat for cpu %d\n",
981 cpu);
982 kfree(name);
983 return;
984 }
bac429f0
SR
985 }
986
e4931b82
YW
987 trace_create_file("function_profile_enabled",
988 TRACE_MODE_WRITE, d_tracer, NULL,
989 &ftrace_profile_fops);
bac429f0
SR
990}
991
bac429f0 992#else /* CONFIG_FUNCTION_PROFILER */
8434dc93 993static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
bac429f0
SR
994{
995}
bac429f0
SR
996#endif /* CONFIG_FUNCTION_PROFILER */
997
493762fc
SR
998#ifdef CONFIG_DYNAMIC_FTRACE
999
79922b80
SRRH
1000static struct ftrace_ops *removed_ops;
1001
e1effa01
SRRH
1002/*
1003 * Set when doing a global update, like enabling all recs or disabling them.
1004 * It is not set when just updating a single ftrace_ops.
1005 */
1006static bool update_all_ops;
1007
493762fc
SR
1008#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1009# error Dynamic ftrace depends on MCOUNT_RECORD
1010#endif
1011
7b60f3d8
SRV
1012struct ftrace_func_probe {
1013 struct ftrace_probe_ops *probe_ops;
1014 struct ftrace_ops ops;
1015 struct trace_array *tr;
1016 struct list_head list;
6e444319 1017 void *data;
7b60f3d8
SRV
1018 int ref;
1019};
1020
33dc9b12
SR
1021/*
1022 * We make these constant because no one should touch them,
1023 * but they are used as the default "empty hash", to avoid allocating
1024 * it all the time. These are in a read only section such that if
1025 * anyone does try to modify it, it will cause an exception.
1026 */
1027static const struct hlist_head empty_buckets[1];
1028static const struct ftrace_hash empty_hash = {
1029 .buckets = (struct hlist_head *)empty_buckets,
1cf41dd7 1030};
33dc9b12 1031#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
493762fc 1032
3306fc4a 1033struct ftrace_ops global_ops = {
33b7f99c
SRRH
1034 .func = ftrace_stub,
1035 .local_hash.notrace_hash = EMPTY_HASH,
1036 .local_hash.filter_hash = EMPTY_HASH,
1037 INIT_OPS_HASH(global_ops)
a25d036d 1038 .flags = FTRACE_OPS_FL_INITIALIZED |
e3eea140 1039 FTRACE_OPS_FL_PID,
f45948e8
SR
1040};
1041
aec0be2d 1042/*
f2cc020d 1043 * Used by the stack unwinder to know about dynamic ftrace trampolines.
aec0be2d 1044 */
6be7fa3c 1045struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
aec0be2d 1046{
6be7fa3c 1047 struct ftrace_ops *op = NULL;
aec0be2d
SRRH
1048
1049 /*
1050 * Some of the ops may be dynamically allocated,
74401729 1051 * they are freed after a synchronize_rcu().
aec0be2d
SRRH
1052 */
1053 preempt_disable_notrace();
1054
1055 do_for_each_ftrace_op(op, ftrace_ops_list) {
1056 /*
1057 * This is to check for dynamically allocated trampolines.
1058 * Trampolines that are in kernel text will have
1059 * core_kernel_text() return true.
1060 */
1061 if (op->trampoline && op->trampoline_size)
1062 if (addr >= op->trampoline &&
1063 addr < op->trampoline + op->trampoline_size) {
6be7fa3c
SRV
1064 preempt_enable_notrace();
1065 return op;
aec0be2d
SRRH
1066 }
1067 } while_for_each_ftrace_op(op);
aec0be2d
SRRH
1068 preempt_enable_notrace();
1069
6be7fa3c
SRV
1070 return NULL;
1071}
1072
1073/*
1074 * This is used by __kernel_text_address() to return true if the
1075 * address is on a dynamically allocated trampoline that would
1076 * not return true for either core_kernel_text() or
1077 * is_module_text_address().
1078 */
1079bool is_ftrace_trampoline(unsigned long addr)
1080{
1081 return ftrace_ops_trampoline(addr) != NULL;
aec0be2d
SRRH
1082}
1083
493762fc
SR
1084struct ftrace_page {
1085 struct ftrace_page *next;
a7900875 1086 struct dyn_ftrace *records;
493762fc 1087 int index;
db42523b 1088 int order;
493762fc
SR
1089};
1090
a7900875
SR
1091#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1092#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
493762fc 1093
493762fc
SR
1094static struct ftrace_page *ftrace_pages_start;
1095static struct ftrace_page *ftrace_pages;
1096
2b0cce0e
SRV
1097static __always_inline unsigned long
1098ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1099{
1100 if (hash->size_bits > 0)
1101 return hash_long(ip, hash->size_bits);
1102
1103 return 0;
1104}
1105
2b2c279c
SRV
1106/* Only use this function if ftrace_hash_empty() has already been tested */
1107static __always_inline struct ftrace_func_entry *
1108__ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
b448c4e3
SR
1109{
1110 unsigned long key;
1111 struct ftrace_func_entry *entry;
1112 struct hlist_head *hhd;
b448c4e3 1113
2b0cce0e 1114 key = ftrace_hash_key(hash, ip);
b448c4e3
SR
1115 hhd = &hash->buckets[key];
1116
1bb539ca 1117 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
b448c4e3
SR
1118 if (entry->ip == ip)
1119 return entry;
1120 }
1121 return NULL;
1122}
1123
2b2c279c
SRV
1124/**
1125 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1126 * @hash: The hash to look at
1127 * @ip: The instruction pointer to test
1128 *
1129 * Search a given @hash to see if a given instruction pointer (@ip)
1130 * exists in it.
1131 *
1132 * Returns the entry that holds the @ip if found. NULL otherwise.
1133 */
1134struct ftrace_func_entry *
1135ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1136{
1137 if (ftrace_hash_empty(hash))
1138 return NULL;
1139
1140 return __ftrace_lookup_ip(hash, ip);
1141}
1142
33dc9b12
SR
1143static void __add_hash_entry(struct ftrace_hash *hash,
1144 struct ftrace_func_entry *entry)
b448c4e3 1145{
b448c4e3
SR
1146 struct hlist_head *hhd;
1147 unsigned long key;
1148
2b0cce0e 1149 key = ftrace_hash_key(hash, entry->ip);
b448c4e3
SR
1150 hhd = &hash->buckets[key];
1151 hlist_add_head(&entry->hlist, hhd);
1152 hash->count++;
33dc9b12
SR
1153}
1154
1155static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1156{
1157 struct ftrace_func_entry *entry;
1158
1159 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1160 if (!entry)
1161 return -ENOMEM;
1162
1163 entry->ip = ip;
1164 __add_hash_entry(hash, entry);
b448c4e3
SR
1165
1166 return 0;
1167}
1168
1169static void
33dc9b12 1170free_hash_entry(struct ftrace_hash *hash,
b448c4e3
SR
1171 struct ftrace_func_entry *entry)
1172{
1173 hlist_del(&entry->hlist);
1174 kfree(entry);
1175 hash->count--;
1176}
1177
33dc9b12
SR
1178static void
1179remove_hash_entry(struct ftrace_hash *hash,
1180 struct ftrace_func_entry *entry)
1181{
eee8ded1 1182 hlist_del_rcu(&entry->hlist);
33dc9b12
SR
1183 hash->count--;
1184}
1185
b448c4e3
SR
1186static void ftrace_hash_clear(struct ftrace_hash *hash)
1187{
1188 struct hlist_head *hhd;
b67bfe0d 1189 struct hlist_node *tn;
b448c4e3
SR
1190 struct ftrace_func_entry *entry;
1191 int size = 1 << hash->size_bits;
1192 int i;
1193
33dc9b12
SR
1194 if (!hash->count)
1195 return;
1196
b448c4e3
SR
1197 for (i = 0; i < size; i++) {
1198 hhd = &hash->buckets[i];
b67bfe0d 1199 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
33dc9b12 1200 free_hash_entry(hash, entry);
b448c4e3
SR
1201 }
1202 FTRACE_WARN_ON(hash->count);
1203}
1204
673feb9d
SRV
1205static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1206{
1207 list_del(&ftrace_mod->list);
1208 kfree(ftrace_mod->module);
1209 kfree(ftrace_mod->func);
1210 kfree(ftrace_mod);
1211}
1212
1213static void clear_ftrace_mod_list(struct list_head *head)
1214{
1215 struct ftrace_mod_load *p, *n;
1216
1217 /* stack tracer isn't supported yet */
1218 if (!head)
1219 return;
1220
1221 mutex_lock(&ftrace_lock);
1222 list_for_each_entry_safe(p, n, head, list)
1223 free_ftrace_mod(p);
1224 mutex_unlock(&ftrace_lock);
1225}
1226
33dc9b12
SR
1227static void free_ftrace_hash(struct ftrace_hash *hash)
1228{
1229 if (!hash || hash == EMPTY_HASH)
1230 return;
1231 ftrace_hash_clear(hash);
1232 kfree(hash->buckets);
1233 kfree(hash);
1234}
1235
07fd5515
SR
1236static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1237{
1238 struct ftrace_hash *hash;
1239
1240 hash = container_of(rcu, struct ftrace_hash, rcu);
1241 free_ftrace_hash(hash);
1242}
1243
1244static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1245{
1246 if (!hash || hash == EMPTY_HASH)
1247 return;
74401729 1248 call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
07fd5515
SR
1249}
1250
5500fa51
JO
1251void ftrace_free_filter(struct ftrace_ops *ops)
1252{
f04f24fb 1253 ftrace_ops_init(ops);
33b7f99c
SRRH
1254 free_ftrace_hash(ops->func_hash->filter_hash);
1255 free_ftrace_hash(ops->func_hash->notrace_hash);
5500fa51
JO
1256}
1257
33dc9b12
SR
1258static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1259{
1260 struct ftrace_hash *hash;
1261 int size;
1262
1263 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1264 if (!hash)
1265 return NULL;
1266
1267 size = 1 << size_bits;
47b0edcb 1268 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
33dc9b12
SR
1269
1270 if (!hash->buckets) {
1271 kfree(hash);
1272 return NULL;
1273 }
1274
1275 hash->size_bits = size_bits;
1276
1277 return hash;
1278}
1279
673feb9d
SRV
1280
1281static int ftrace_add_mod(struct trace_array *tr,
1282 const char *func, const char *module,
1283 int enable)
1284{
1285 struct ftrace_mod_load *ftrace_mod;
1286 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1287
1288 ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1289 if (!ftrace_mod)
1290 return -ENOMEM;
1291
1292 ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1293 ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1294 ftrace_mod->enable = enable;
1295
1296 if (!ftrace_mod->func || !ftrace_mod->module)
1297 goto out_free;
1298
1299 list_add(&ftrace_mod->list, mod_head);
1300
1301 return 0;
1302
1303 out_free:
1304 free_ftrace_mod(ftrace_mod);
1305
1306 return -ENOMEM;
1307}
1308
33dc9b12
SR
1309static struct ftrace_hash *
1310alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1311{
1312 struct ftrace_func_entry *entry;
1313 struct ftrace_hash *new_hash;
33dc9b12
SR
1314 int size;
1315 int ret;
1316 int i;
1317
1318 new_hash = alloc_ftrace_hash(size_bits);
1319 if (!new_hash)
1320 return NULL;
1321
8c08f0d5
SRV
1322 if (hash)
1323 new_hash->flags = hash->flags;
1324
33dc9b12 1325 /* Empty hash? */
06a51d93 1326 if (ftrace_hash_empty(hash))
33dc9b12
SR
1327 return new_hash;
1328
1329 size = 1 << hash->size_bits;
1330 for (i = 0; i < size; i++) {
b67bfe0d 1331 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
33dc9b12
SR
1332 ret = add_hash_entry(new_hash, entry->ip);
1333 if (ret < 0)
1334 goto free_hash;
1335 }
1336 }
1337
1338 FTRACE_WARN_ON(new_hash->count != hash->count);
1339
1340 return new_hash;
1341
1342 free_hash:
1343 free_ftrace_hash(new_hash);
1344 return NULL;
1345}
1346
41fb61c2 1347static void
84261912 1348ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
41fb61c2 1349static void
84261912 1350ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
41fb61c2 1351
f8b8be8a
MH
1352static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1353 struct ftrace_hash *new_hash);
1354
714641c3 1355static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size)
33dc9b12
SR
1356{
1357 struct ftrace_func_entry *entry;
07fd5515 1358 struct ftrace_hash *new_hash;
714641c3
SRV
1359 struct hlist_head *hhd;
1360 struct hlist_node *tn;
33dc9b12
SR
1361 int bits = 0;
1362 int i;
1363
33dc9b12 1364 /*
be493132
SRV
1365 * Use around half the size (max bit of it), but
1366 * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits).
33dc9b12 1367 */
be493132 1368 bits = fls(size / 2);
33dc9b12
SR
1369
1370 /* Don't allocate too much */
1371 if (bits > FTRACE_HASH_MAX_BITS)
1372 bits = FTRACE_HASH_MAX_BITS;
1373
07fd5515
SR
1374 new_hash = alloc_ftrace_hash(bits);
1375 if (!new_hash)
3e278c0d 1376 return NULL;
33dc9b12 1377
8c08f0d5
SRV
1378 new_hash->flags = src->flags;
1379
33dc9b12
SR
1380 size = 1 << src->size_bits;
1381 for (i = 0; i < size; i++) {
1382 hhd = &src->buckets[i];
b67bfe0d 1383 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
33dc9b12 1384 remove_hash_entry(src, entry);
07fd5515 1385 __add_hash_entry(new_hash, entry);
33dc9b12
SR
1386 }
1387 }
3e278c0d
NK
1388 return new_hash;
1389}
1390
714641c3
SRV
1391static struct ftrace_hash *
1392__ftrace_hash_move(struct ftrace_hash *src)
1393{
1394 int size = src->count;
1395
1396 /*
1397 * If the new source is empty, just return the empty_hash.
1398 */
1399 if (ftrace_hash_empty(src))
1400 return EMPTY_HASH;
1401
1402 return dup_hash(src, size);
1403}
1404
3e278c0d
NK
1405static int
1406ftrace_hash_move(struct ftrace_ops *ops, int enable,
1407 struct ftrace_hash **dst, struct ftrace_hash *src)
1408{
1409 struct ftrace_hash *new_hash;
1410 int ret;
1411
1412 /* Reject setting notrace hash on IPMODIFY ftrace_ops */
1413 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1414 return -EINVAL;
1415
1416 new_hash = __ftrace_hash_move(src);
1417 if (!new_hash)
1418 return -ENOMEM;
1419
f8b8be8a
MH
1420 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1421 if (enable) {
1422 /* IPMODIFY should be updated only when filter_hash updating */
1423 ret = ftrace_hash_ipmodify_update(ops, new_hash);
1424 if (ret < 0) {
1425 free_ftrace_hash(new_hash);
1426 return ret;
1427 }
1428 }
1429
5c27c775
MH
1430 /*
1431 * Remove the current set, update the hash and add
1432 * them back.
1433 */
84261912 1434 ftrace_hash_rec_disable_modify(ops, enable);
5c27c775 1435
07fd5515 1436 rcu_assign_pointer(*dst, new_hash);
07fd5515 1437
84261912 1438 ftrace_hash_rec_enable_modify(ops, enable);
41fb61c2 1439
5c27c775 1440 return 0;
33dc9b12
SR
1441}
1442
fef5aeee
SRRH
1443static bool hash_contains_ip(unsigned long ip,
1444 struct ftrace_ops_hash *hash)
1445{
1446 /*
1447 * The function record is a match if it exists in the filter
fdda88d3 1448 * hash and not in the notrace hash. Note, an empty hash is
fef5aeee
SRRH
1449 * considered a match for the filter hash, but an empty
1450 * notrace hash is considered not in the notrace hash.
1451 */
1452 return (ftrace_hash_empty(hash->filter_hash) ||
2b2c279c 1453 __ftrace_lookup_ip(hash->filter_hash, ip)) &&
fef5aeee 1454 (ftrace_hash_empty(hash->notrace_hash) ||
2b2c279c 1455 !__ftrace_lookup_ip(hash->notrace_hash, ip));
fef5aeee
SRRH
1456}
1457
b848914c
SR
1458/*
1459 * Test the hashes for this ops to see if we want to call
1460 * the ops->func or not.
1461 *
1462 * It's a match if the ip is in the ops->filter_hash or
1463 * the filter_hash does not exist or is empty,
1464 * AND
1465 * the ip is not in the ops->notrace_hash.
cdbe61bf
SR
1466 *
1467 * This needs to be called with preemption disabled as
74401729 1468 * the hashes are freed with call_rcu().
b848914c 1469 */
3306fc4a 1470int
195a8afc 1471ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
b848914c 1472{
fef5aeee 1473 struct ftrace_ops_hash hash;
b848914c
SR
1474 int ret;
1475
195a8afc
SRRH
1476#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1477 /*
1478 * There's a small race when adding ops that the ftrace handler
1479 * that wants regs, may be called without them. We can not
1480 * allow that handler to be called if regs is NULL.
1481 */
1482 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1483 return 0;
1484#endif
1485
f86f4180
CZ
1486 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1487 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
b848914c 1488
fef5aeee 1489 if (hash_contains_ip(ip, &hash))
b848914c
SR
1490 ret = 1;
1491 else
1492 ret = 0;
b848914c
SR
1493
1494 return ret;
1495}
1496
493762fc
SR
1497/*
1498 * This is a double for. Do not use 'break' to break out of the loop,
1499 * you must use a goto.
1500 */
1501#define do_for_each_ftrace_rec(pg, rec) \
1502 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1503 int _____i; \
1504 for (_____i = 0; _____i < pg->index; _____i++) { \
1505 rec = &pg->records[_____i];
1506
1507#define while_for_each_ftrace_rec() \
1508 } \
1509 }
1510
5855fead
SR
1511
1512static int ftrace_cmp_recs(const void *a, const void *b)
1513{
a650e02a
SR
1514 const struct dyn_ftrace *key = a;
1515 const struct dyn_ftrace *rec = b;
5855fead 1516
a650e02a 1517 if (key->flags < rec->ip)
5855fead 1518 return -1;
a650e02a
SR
1519 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1520 return 1;
5855fead
SR
1521 return 0;
1522}
1523
7e16f581
SRV
1524static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
1525{
1526 struct ftrace_page *pg;
1527 struct dyn_ftrace *rec = NULL;
1528 struct dyn_ftrace key;
1529
1530 key.ip = start;
1531 key.flags = end; /* overload flags, as it is unsigned long */
1532
1533 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1534 if (end < pg->records[0].ip ||
1535 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1536 continue;
1537 rec = bsearch(&key, pg->records, pg->index,
1538 sizeof(struct dyn_ftrace),
1539 ftrace_cmp_recs);
d9815bff
AS
1540 if (rec)
1541 break;
7e16f581
SRV
1542 }
1543 return rec;
1544}
1545
04cf31a7
ME
1546/**
1547 * ftrace_location_range - return the first address of a traced location
1548 * if it touches the given ip range
1549 * @start: start of range to search.
1550 * @end: end of range to search (inclusive). @end points to the last byte
1551 * to check.
1552 *
1553 * Returns rec->ip if the related ftrace location is a least partly within
1554 * the given address range. That is, the first address of the instruction
1555 * that is either a NOP or call to the function tracer. It checks the ftrace
1556 * internal tables to determine if the address belongs or not.
1557 */
1558unsigned long ftrace_location_range(unsigned long start, unsigned long end)
c88fd863 1559{
c88fd863 1560 struct dyn_ftrace *rec;
5855fead 1561
7e16f581
SRV
1562 rec = lookup_rec(start, end);
1563 if (rec)
1564 return rec->ip;
c88fd863
SR
1565
1566 return 0;
1567}
1568
a650e02a 1569/**
aebfd125 1570 * ftrace_location - return the ftrace location
a650e02a
SR
1571 * @ip: the instruction pointer to check
1572 *
aebfd125
PZ
1573 * If @ip matches the ftrace location, return @ip.
1574 * If @ip matches sym+0, return sym's ftrace location.
1575 * Otherwise, return 0.
a650e02a 1576 */
f0cf973a 1577unsigned long ftrace_location(unsigned long ip)
a650e02a 1578{
aebfd125
PZ
1579 struct dyn_ftrace *rec;
1580 unsigned long offset;
1581 unsigned long size;
1582
1583 rec = lookup_rec(ip, ip);
1584 if (!rec) {
1585 if (!kallsyms_lookup_size_offset(ip, &size, &offset))
1586 goto out;
1587
1588 /* map sym+0 to __fentry__ */
1589 if (!offset)
1590 rec = lookup_rec(ip, ip + size - 1);
1591 }
1592
1593 if (rec)
1594 return rec->ip;
1595
1596out:
1597 return 0;
a650e02a
SR
1598}
1599
1600/**
1601 * ftrace_text_reserved - return true if range contains an ftrace location
1602 * @start: start of range to search
1603 * @end: end of range to search (inclusive). @end points to the last byte to check.
1604 *
1605 * Returns 1 if @start and @end contains a ftrace location.
1606 * That is, the instruction that is either a NOP or call to
1607 * the function tracer. It checks the ftrace internal tables to
1608 * determine if the address belongs or not.
1609 */
d88471cb 1610int ftrace_text_reserved(const void *start, const void *end)
a650e02a 1611{
f0cf973a
SR
1612 unsigned long ret;
1613
1614 ret = ftrace_location_range((unsigned long)start,
1615 (unsigned long)end);
1616
1617 return (int)!!ret;
a650e02a
SR
1618}
1619
4fbb48cb
SRRH
1620/* Test if ops registered to this rec needs regs */
1621static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1622{
1623 struct ftrace_ops *ops;
1624 bool keep_regs = false;
1625
1626 for (ops = ftrace_ops_list;
1627 ops != &ftrace_list_end; ops = ops->next) {
1628 /* pass rec in as regs to have non-NULL val */
1629 if (ftrace_ops_test(ops, rec->ip, rec)) {
1630 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1631 keep_regs = true;
1632 break;
1633 }
1634 }
1635 }
1636
1637 return keep_regs;
1638}
1639
a124692b
CJ
1640static struct ftrace_ops *
1641ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1642static struct ftrace_ops *
4c75b0ff
NR
1643ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
1644static struct ftrace_ops *
a124692b
CJ
1645ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
1646
84b6d3e6 1647static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
ed926f9b
SR
1648 int filter_hash,
1649 bool inc)
1650{
1651 struct ftrace_hash *hash;
1652 struct ftrace_hash *other_hash;
1653 struct ftrace_page *pg;
1654 struct dyn_ftrace *rec;
84b6d3e6 1655 bool update = false;
ed926f9b 1656 int count = 0;
8c08f0d5 1657 int all = false;
ed926f9b
SR
1658
1659 /* Only update if the ops has been registered */
1660 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
84b6d3e6 1661 return false;
ed926f9b
SR
1662
1663 /*
1664 * In the filter_hash case:
1665 * If the count is zero, we update all records.
1666 * Otherwise we just update the items in the hash.
1667 *
1668 * In the notrace_hash case:
1669 * We enable the update in the hash.
1670 * As disabling notrace means enabling the tracing,
1671 * and enabling notrace means disabling, the inc variable
1672 * gets inversed.
1673 */
1674 if (filter_hash) {
33b7f99c
SRRH
1675 hash = ops->func_hash->filter_hash;
1676 other_hash = ops->func_hash->notrace_hash;
06a51d93 1677 if (ftrace_hash_empty(hash))
8c08f0d5 1678 all = true;
ed926f9b
SR
1679 } else {
1680 inc = !inc;
33b7f99c
SRRH
1681 hash = ops->func_hash->notrace_hash;
1682 other_hash = ops->func_hash->filter_hash;
ed926f9b
SR
1683 /*
1684 * If the notrace hash has no items,
1685 * then there's nothing to do.
1686 */
06a51d93 1687 if (ftrace_hash_empty(hash))
84b6d3e6 1688 return false;
ed926f9b
SR
1689 }
1690
1691 do_for_each_ftrace_rec(pg, rec) {
1692 int in_other_hash = 0;
1693 int in_hash = 0;
1694 int match = 0;
1695
b7ffffbb
SRRH
1696 if (rec->flags & FTRACE_FL_DISABLED)
1697 continue;
1698
ed926f9b
SR
1699 if (all) {
1700 /*
1701 * Only the filter_hash affects all records.
1702 * Update if the record is not in the notrace hash.
1703 */
b848914c 1704 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
ed926f9b
SR
1705 match = 1;
1706 } else {
06a51d93
SR
1707 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1708 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
ed926f9b
SR
1709
1710 /*
19eab4a4
SRRH
1711 * If filter_hash is set, we want to match all functions
1712 * that are in the hash but not in the other hash.
ed926f9b 1713 *
19eab4a4
SRRH
1714 * If filter_hash is not set, then we are decrementing.
1715 * That means we match anything that is in the hash
1716 * and also in the other_hash. That is, we need to turn
1717 * off functions in the other hash because they are disabled
1718 * by this hash.
ed926f9b
SR
1719 */
1720 if (filter_hash && in_hash && !in_other_hash)
1721 match = 1;
1722 else if (!filter_hash && in_hash &&
06a51d93 1723 (in_other_hash || ftrace_hash_empty(other_hash)))
ed926f9b
SR
1724 match = 1;
1725 }
1726 if (!match)
1727 continue;
1728
1729 if (inc) {
1730 rec->flags++;
0376bde1 1731 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
84b6d3e6 1732 return false;
79922b80 1733
763e34e7
SRV
1734 if (ops->flags & FTRACE_OPS_FL_DIRECT)
1735 rec->flags |= FTRACE_FL_DIRECT;
1736
79922b80
SRRH
1737 /*
1738 * If there's only a single callback registered to a
1739 * function, and the ops has a trampoline registered
1740 * for it, then we can call it directly.
1741 */
fef5aeee 1742 if (ftrace_rec_count(rec) == 1 && ops->trampoline)
79922b80 1743 rec->flags |= FTRACE_FL_TRAMP;
fef5aeee 1744 else
79922b80
SRRH
1745 /*
1746 * If we are adding another function callback
1747 * to this function, and the previous had a
bce0b6c5
SRRH
1748 * custom trampoline in use, then we need to go
1749 * back to the default trampoline.
79922b80 1750 */
fef5aeee 1751 rec->flags &= ~FTRACE_FL_TRAMP;
79922b80 1752
08f6fba5
SR
1753 /*
1754 * If any ops wants regs saved for this function
1755 * then all ops will get saved regs.
1756 */
1757 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1758 rec->flags |= FTRACE_FL_REGS;
ed926f9b 1759 } else {
0376bde1 1760 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
84b6d3e6 1761 return false;
ed926f9b 1762 rec->flags--;
79922b80 1763
763e34e7
SRV
1764 /*
1765 * Only the internal direct_ops should have the
1766 * DIRECT flag set. Thus, if it is removing a
1767 * function, then that function should no longer
1768 * be direct.
1769 */
1770 if (ops->flags & FTRACE_OPS_FL_DIRECT)
1771 rec->flags &= ~FTRACE_FL_DIRECT;
1772
4fbb48cb
SRRH
1773 /*
1774 * If the rec had REGS enabled and the ops that is
1775 * being removed had REGS set, then see if there is
1776 * still any ops for this record that wants regs.
1777 * If not, we can stop recording them.
1778 */
0376bde1 1779 if (ftrace_rec_count(rec) > 0 &&
4fbb48cb
SRRH
1780 rec->flags & FTRACE_FL_REGS &&
1781 ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1782 if (!test_rec_ops_needs_regs(rec))
1783 rec->flags &= ~FTRACE_FL_REGS;
1784 }
79922b80 1785
fef5aeee 1786 /*
a124692b
CJ
1787 * The TRAMP needs to be set only if rec count
1788 * is decremented to one, and the ops that is
1789 * left has a trampoline. As TRAMP can only be
1790 * enabled if there is only a single ops attached
1791 * to it.
fef5aeee 1792 */
a124692b 1793 if (ftrace_rec_count(rec) == 1 &&
4c75b0ff 1794 ftrace_find_tramp_ops_any_other(rec, ops))
a124692b
CJ
1795 rec->flags |= FTRACE_FL_TRAMP;
1796 else
1797 rec->flags &= ~FTRACE_FL_TRAMP;
fef5aeee 1798
79922b80
SRRH
1799 /*
1800 * flags will be cleared in ftrace_check_record()
1801 * if rec count is zero.
1802 */
ed926f9b
SR
1803 }
1804 count++;
84b6d3e6
JO
1805
1806 /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
7375dca1 1807 update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
84b6d3e6 1808
ed926f9b
SR
1809 /* Shortcut, if we handled all records, we are done. */
1810 if (!all && count == hash->count)
84b6d3e6 1811 return update;
ed926f9b 1812 } while_for_each_ftrace_rec();
84b6d3e6
JO
1813
1814 return update;
ed926f9b
SR
1815}
1816
84b6d3e6 1817static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
ed926f9b
SR
1818 int filter_hash)
1819{
84b6d3e6 1820 return __ftrace_hash_rec_update(ops, filter_hash, 0);
ed926f9b
SR
1821}
1822
84b6d3e6 1823static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
ed926f9b
SR
1824 int filter_hash)
1825{
84b6d3e6 1826 return __ftrace_hash_rec_update(ops, filter_hash, 1);
ed926f9b
SR
1827}
1828
84261912
SRRH
1829static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1830 int filter_hash, int inc)
1831{
1832 struct ftrace_ops *op;
1833
1834 __ftrace_hash_rec_update(ops, filter_hash, inc);
1835
1836 if (ops->func_hash != &global_ops.local_hash)
1837 return;
1838
1839 /*
1840 * If the ops shares the global_ops hash, then we need to update
1841 * all ops that are enabled and use this hash.
1842 */
1843 do_for_each_ftrace_op(op, ftrace_ops_list) {
1844 /* Already done */
1845 if (op == ops)
1846 continue;
1847 if (op->func_hash == &global_ops.local_hash)
1848 __ftrace_hash_rec_update(op, filter_hash, inc);
1849 } while_for_each_ftrace_op(op);
1850}
1851
1852static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1853 int filter_hash)
1854{
1855 ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1856}
1857
1858static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1859 int filter_hash)
1860{
1861 ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1862}
1863
f8b8be8a
MH
1864/*
1865 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1866 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1867 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1868 * Note that old_hash and new_hash has below meanings
1869 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1870 * - If the hash is EMPTY_HASH, it hits nothing
1871 * - Anything else hits the recs which match the hash entries.
1872 */
1873static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1874 struct ftrace_hash *old_hash,
1875 struct ftrace_hash *new_hash)
1876{
1877 struct ftrace_page *pg;
1878 struct dyn_ftrace *rec, *end = NULL;
1879 int in_old, in_new;
1880
1881 /* Only update if the ops has been registered */
1882 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1883 return 0;
1884
1885 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
1886 return 0;
1887
1888 /*
1889 * Since the IPMODIFY is a very address sensitive action, we do not
1890 * allow ftrace_ops to set all functions to new hash.
1891 */
1892 if (!new_hash || !old_hash)
1893 return -EINVAL;
1894
1895 /* Update rec->flags */
1896 do_for_each_ftrace_rec(pg, rec) {
546fece4
SRRH
1897
1898 if (rec->flags & FTRACE_FL_DISABLED)
1899 continue;
1900
f8b8be8a
MH
1901 /* We need to update only differences of filter_hash */
1902 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1903 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1904 if (in_old == in_new)
1905 continue;
1906
1907 if (in_new) {
1908 /* New entries must ensure no others are using it */
1909 if (rec->flags & FTRACE_FL_IPMODIFY)
1910 goto rollback;
1911 rec->flags |= FTRACE_FL_IPMODIFY;
1912 } else /* Removed entry */
1913 rec->flags &= ~FTRACE_FL_IPMODIFY;
1914 } while_for_each_ftrace_rec();
1915
1916 return 0;
1917
1918rollback:
1919 end = rec;
1920
1921 /* Roll back what we did above */
1922 do_for_each_ftrace_rec(pg, rec) {
546fece4
SRRH
1923
1924 if (rec->flags & FTRACE_FL_DISABLED)
1925 continue;
1926
f8b8be8a
MH
1927 if (rec == end)
1928 goto err_out;
1929
1930 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1931 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1932 if (in_old == in_new)
1933 continue;
1934
1935 if (in_new)
1936 rec->flags &= ~FTRACE_FL_IPMODIFY;
1937 else
1938 rec->flags |= FTRACE_FL_IPMODIFY;
1939 } while_for_each_ftrace_rec();
1940
1941err_out:
1942 return -EBUSY;
1943}
1944
1945static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1946{
1947 struct ftrace_hash *hash = ops->func_hash->filter_hash;
1948
1949 if (ftrace_hash_empty(hash))
1950 hash = NULL;
1951
1952 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1953}
1954
1955/* Disabling always succeeds */
1956static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1957{
1958 struct ftrace_hash *hash = ops->func_hash->filter_hash;
1959
1960 if (ftrace_hash_empty(hash))
1961 hash = NULL;
1962
1963 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
1964}
1965
1966static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1967 struct ftrace_hash *new_hash)
1968{
1969 struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
1970
1971 if (ftrace_hash_empty(old_hash))
1972 old_hash = NULL;
1973
1974 if (ftrace_hash_empty(new_hash))
1975 new_hash = NULL;
1976
1977 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
1978}
1979
b05086c7 1980static void print_ip_ins(const char *fmt, const unsigned char *p)
b17e8a37 1981{
6c14133d 1982 char ins[MCOUNT_INSN_SIZE];
b17e8a37
SR
1983 int i;
1984
6c14133d
SRV
1985 if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
1986 printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
1987 return;
1988 }
1989
b17e8a37
SR
1990 printk(KERN_CONT "%s", fmt);
1991
1992 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
6c14133d 1993 printk(KERN_CONT "%s%02x", i ? ":" : "", ins[i]);
b17e8a37
SR
1994}
1995
02a392a0 1996enum ftrace_bug_type ftrace_bug_type;
b05086c7 1997const void *ftrace_expected;
02a392a0
SRRH
1998
1999static void print_bug_type(void)
2000{
2001 switch (ftrace_bug_type) {
2002 case FTRACE_BUG_UNKNOWN:
2003 break;
2004 case FTRACE_BUG_INIT:
2005 pr_info("Initializing ftrace call sites\n");
2006 break;
2007 case FTRACE_BUG_NOP:
2008 pr_info("Setting ftrace call site to NOP\n");
2009 break;
2010 case FTRACE_BUG_CALL:
2011 pr_info("Setting ftrace call site to call ftrace function\n");
2012 break;
2013 case FTRACE_BUG_UPDATE:
2014 pr_info("Updating ftrace call site to call a different ftrace function\n");
2015 break;
2016 }
2017}
2018
c88fd863
SR
2019/**
2020 * ftrace_bug - report and shutdown function tracer
2021 * @failed: The failed type (EFAULT, EINVAL, EPERM)
4fd3279b 2022 * @rec: The record that failed
c88fd863
SR
2023 *
2024 * The arch code that enables or disables the function tracing
2025 * can call ftrace_bug() when it has detected a problem in
2026 * modifying the code. @failed should be one of either:
2027 * EFAULT - if the problem happens on reading the @ip address
2028 * EINVAL - if what is read at @ip is not what was expected
9efb85c5 2029 * EPERM - if the problem happens on writing to the @ip address
c88fd863 2030 */
4fd3279b 2031void ftrace_bug(int failed, struct dyn_ftrace *rec)
b17e8a37 2032{
4fd3279b
SRRH
2033 unsigned long ip = rec ? rec->ip : 0;
2034
c143b775
CJ
2035 pr_info("------------[ ftrace bug ]------------\n");
2036
b17e8a37
SR
2037 switch (failed) {
2038 case -EFAULT:
b17e8a37 2039 pr_info("ftrace faulted on modifying ");
2062a4e8 2040 print_ip_sym(KERN_INFO, ip);
b17e8a37
SR
2041 break;
2042 case -EINVAL:
b17e8a37 2043 pr_info("ftrace failed to modify ");
2062a4e8 2044 print_ip_sym(KERN_INFO, ip);
b05086c7 2045 print_ip_ins(" actual: ", (unsigned char *)ip);
4fd3279b 2046 pr_cont("\n");
b05086c7
SRRH
2047 if (ftrace_expected) {
2048 print_ip_ins(" expected: ", ftrace_expected);
2049 pr_cont("\n");
2050 }
b17e8a37
SR
2051 break;
2052 case -EPERM:
b17e8a37 2053 pr_info("ftrace faulted on writing ");
2062a4e8 2054 print_ip_sym(KERN_INFO, ip);
b17e8a37
SR
2055 break;
2056 default:
b17e8a37 2057 pr_info("ftrace faulted on unknown error ");
2062a4e8 2058 print_ip_sym(KERN_INFO, ip);
b17e8a37 2059 }
02a392a0 2060 print_bug_type();
4fd3279b
SRRH
2061 if (rec) {
2062 struct ftrace_ops *ops = NULL;
2063
2064 pr_info("ftrace record flags: %lx\n", rec->flags);
2065 pr_cont(" (%ld)%s", ftrace_rec_count(rec),
2066 rec->flags & FTRACE_FL_REGS ? " R" : " ");
2067 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2068 ops = ftrace_find_tramp_ops_any(rec);
39daa7b9
SRRH
2069 if (ops) {
2070 do {
2071 pr_cont("\ttramp: %pS (%pS)",
2072 (void *)ops->trampoline,
2073 (void *)ops->func);
2074 ops = ftrace_find_tramp_ops_next(rec, ops);
2075 } while (ops);
2076 } else
4fd3279b
SRRH
2077 pr_cont("\ttramp: ERROR!");
2078
2079 }
2080 ip = ftrace_get_addr_curr(rec);
39daa7b9 2081 pr_cont("\n expected tramp: %lx\n", ip);
4fd3279b 2082 }
c143b775
CJ
2083
2084 FTRACE_WARN_ON_ONCE(1);
b17e8a37
SR
2085}
2086
7375dca1 2087static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
5072c59f 2088{
64fbcd16 2089 unsigned long flag = 0UL;
e7d3737e 2090
02a392a0
SRRH
2091 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2092
b7ffffbb
SRRH
2093 if (rec->flags & FTRACE_FL_DISABLED)
2094 return FTRACE_UPDATE_IGNORE;
2095
982c350b 2096 /*
30fb6aa7 2097 * If we are updating calls:
982c350b 2098 *
ed926f9b
SR
2099 * If the record has a ref count, then we need to enable it
2100 * because someone is using it.
982c350b 2101 *
ed926f9b
SR
2102 * Otherwise we make sure its disabled.
2103 *
30fb6aa7 2104 * If we are disabling calls, then disable all records that
ed926f9b 2105 * are enabled.
982c350b 2106 */
0376bde1 2107 if (enable && ftrace_rec_count(rec))
ed926f9b 2108 flag = FTRACE_FL_ENABLED;
982c350b 2109
08f6fba5 2110 /*
79922b80
SRRH
2111 * If enabling and the REGS flag does not match the REGS_EN, or
2112 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2113 * this record. Set flags to fail the compare against ENABLED.
763e34e7 2114 * Same for direct calls.
08f6fba5 2115 */
79922b80 2116 if (flag) {
763e34e7 2117 if (!(rec->flags & FTRACE_FL_REGS) !=
79922b80
SRRH
2118 !(rec->flags & FTRACE_FL_REGS_EN))
2119 flag |= FTRACE_FL_REGS;
2120
763e34e7 2121 if (!(rec->flags & FTRACE_FL_TRAMP) !=
79922b80
SRRH
2122 !(rec->flags & FTRACE_FL_TRAMP_EN))
2123 flag |= FTRACE_FL_TRAMP;
763e34e7
SRV
2124
2125 /*
2126 * Direct calls are special, as count matters.
2127 * We must test the record for direct, if the
2128 * DIRECT and DIRECT_EN do not match, but only
2129 * if the count is 1. That's because, if the
2130 * count is something other than one, we do not
2131 * want the direct enabled (it will be done via the
2132 * direct helper). But if DIRECT_EN is set, and
2133 * the count is not one, we need to clear it.
2134 */
2135 if (ftrace_rec_count(rec) == 1) {
2136 if (!(rec->flags & FTRACE_FL_DIRECT) !=
2137 !(rec->flags & FTRACE_FL_DIRECT_EN))
2138 flag |= FTRACE_FL_DIRECT;
2139 } else if (rec->flags & FTRACE_FL_DIRECT_EN) {
2140 flag |= FTRACE_FL_DIRECT;
2141 }
79922b80 2142 }
08f6fba5 2143
64fbcd16
XG
2144 /* If the state of this record hasn't changed, then do nothing */
2145 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
c88fd863 2146 return FTRACE_UPDATE_IGNORE;
982c350b 2147
64fbcd16 2148 if (flag) {
08f6fba5
SR
2149 /* Save off if rec is being enabled (for return value) */
2150 flag ^= rec->flags & FTRACE_FL_ENABLED;
2151
2152 if (update) {
c88fd863 2153 rec->flags |= FTRACE_FL_ENABLED;
08f6fba5
SR
2154 if (flag & FTRACE_FL_REGS) {
2155 if (rec->flags & FTRACE_FL_REGS)
2156 rec->flags |= FTRACE_FL_REGS_EN;
2157 else
2158 rec->flags &= ~FTRACE_FL_REGS_EN;
2159 }
79922b80
SRRH
2160 if (flag & FTRACE_FL_TRAMP) {
2161 if (rec->flags & FTRACE_FL_TRAMP)
2162 rec->flags |= FTRACE_FL_TRAMP_EN;
2163 else
2164 rec->flags &= ~FTRACE_FL_TRAMP_EN;
2165 }
d19ad077 2166
763e34e7
SRV
2167 if (flag & FTRACE_FL_DIRECT) {
2168 /*
2169 * If there's only one user (direct_ops helper)
2170 * then we can call the direct function
2171 * directly (no ftrace trampoline).
2172 */
2173 if (ftrace_rec_count(rec) == 1) {
2174 if (rec->flags & FTRACE_FL_DIRECT)
2175 rec->flags |= FTRACE_FL_DIRECT_EN;
2176 else
2177 rec->flags &= ~FTRACE_FL_DIRECT_EN;
2178 } else {
2179 /*
2180 * Can only call directly if there's
2181 * only one callback to the function.
2182 */
2183 rec->flags &= ~FTRACE_FL_DIRECT_EN;
2184 }
2185 }
08f6fba5
SR
2186 }
2187
2188 /*
2189 * If this record is being updated from a nop, then
2190 * return UPDATE_MAKE_CALL.
08f6fba5
SR
2191 * Otherwise,
2192 * return UPDATE_MODIFY_CALL to tell the caller to convert
f1b2f2bd 2193 * from the save regs, to a non-save regs function or
79922b80 2194 * vice versa, or from a trampoline call.
08f6fba5 2195 */
02a392a0
SRRH
2196 if (flag & FTRACE_FL_ENABLED) {
2197 ftrace_bug_type = FTRACE_BUG_CALL;
08f6fba5 2198 return FTRACE_UPDATE_MAKE_CALL;
02a392a0 2199 }
f1b2f2bd 2200
02a392a0 2201 ftrace_bug_type = FTRACE_BUG_UPDATE;
f1b2f2bd 2202 return FTRACE_UPDATE_MODIFY_CALL;
c88fd863
SR
2203 }
2204
08f6fba5
SR
2205 if (update) {
2206 /* If there's no more users, clear all flags */
0376bde1 2207 if (!ftrace_rec_count(rec))
08f6fba5
SR
2208 rec->flags = 0;
2209 else
b24d443b
SRRH
2210 /*
2211 * Just disable the record, but keep the ops TRAMP
2212 * and REGS states. The _EN flags must be disabled though.
2213 */
2214 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
763e34e7 2215 FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN);
08f6fba5 2216 }
c88fd863 2217
02a392a0 2218 ftrace_bug_type = FTRACE_BUG_NOP;
c88fd863
SR
2219 return FTRACE_UPDATE_MAKE_NOP;
2220}
2221
2222/**
6130722f 2223 * ftrace_update_record - set a record that now is tracing or not
c88fd863 2224 * @rec: the record to update
7375dca1 2225 * @enable: set to true if the record is tracing, false to force disable
c88fd863
SR
2226 *
2227 * The records that represent all functions that can be traced need
2228 * to be updated when tracing has been enabled.
2229 */
7375dca1 2230int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
c88fd863 2231{
7375dca1 2232 return ftrace_check_record(rec, enable, true);
c88fd863
SR
2233}
2234
2235/**
6130722f 2236 * ftrace_test_record - check if the record has been enabled or not
c88fd863 2237 * @rec: the record to test
7375dca1 2238 * @enable: set to true to check if enabled, false if it is disabled
c88fd863
SR
2239 *
2240 * The arch code may need to test if a record is already set to
2241 * tracing to determine how to modify the function code that it
2242 * represents.
2243 */
7375dca1 2244int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
c88fd863 2245{
7375dca1 2246 return ftrace_check_record(rec, enable, false);
c88fd863
SR
2247}
2248
5fecaa04
SRRH
2249static struct ftrace_ops *
2250ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2251{
2252 struct ftrace_ops *op;
fef5aeee 2253 unsigned long ip = rec->ip;
5fecaa04
SRRH
2254
2255 do_for_each_ftrace_op(op, ftrace_ops_list) {
2256
2257 if (!op->trampoline)
2258 continue;
2259
fef5aeee 2260 if (hash_contains_ip(ip, op->func_hash))
5fecaa04
SRRH
2261 return op;
2262 } while_for_each_ftrace_op(op);
2263
2264 return NULL;
2265}
2266
4c75b0ff
NR
2267static struct ftrace_ops *
2268ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
2269{
2270 struct ftrace_ops *op;
2271 unsigned long ip = rec->ip;
2272
2273 do_for_each_ftrace_op(op, ftrace_ops_list) {
2274
2275 if (op == op_exclude || !op->trampoline)
2276 continue;
2277
2278 if (hash_contains_ip(ip, op->func_hash))
2279 return op;
2280 } while_for_each_ftrace_op(op);
2281
2282 return NULL;
2283}
2284
39daa7b9
SRRH
2285static struct ftrace_ops *
2286ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2287 struct ftrace_ops *op)
2288{
2289 unsigned long ip = rec->ip;
2290
2291 while_for_each_ftrace_op(op) {
2292
2293 if (!op->trampoline)
2294 continue;
2295
2296 if (hash_contains_ip(ip, op->func_hash))
2297 return op;
026bb845 2298 }
39daa7b9
SRRH
2299
2300 return NULL;
2301}
2302
79922b80
SRRH
2303static struct ftrace_ops *
2304ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2305{
2306 struct ftrace_ops *op;
fef5aeee 2307 unsigned long ip = rec->ip;
79922b80 2308
fef5aeee
SRRH
2309 /*
2310 * Need to check removed ops first.
2311 * If they are being removed, and this rec has a tramp,
2312 * and this rec is in the ops list, then it would be the
2313 * one with the tramp.
2314 */
2315 if (removed_ops) {
2316 if (hash_contains_ip(ip, &removed_ops->old_hash))
79922b80
SRRH
2317 return removed_ops;
2318 }
2319
fef5aeee
SRRH
2320 /*
2321 * Need to find the current trampoline for a rec.
2322 * Now, a trampoline is only attached to a rec if there
2323 * was a single 'ops' attached to it. But this can be called
2324 * when we are adding another op to the rec or removing the
2325 * current one. Thus, if the op is being added, we can
2326 * ignore it because it hasn't attached itself to the rec
4fc40904
SRRH
2327 * yet.
2328 *
2329 * If an ops is being modified (hooking to different functions)
2330 * then we don't care about the new functions that are being
2331 * added, just the old ones (that are probably being removed).
2332 *
2333 * If we are adding an ops to a function that already is using
2334 * a trampoline, it needs to be removed (trampolines are only
2335 * for single ops connected), then an ops that is not being
2336 * modified also needs to be checked.
fef5aeee 2337 */
79922b80 2338 do_for_each_ftrace_op(op, ftrace_ops_list) {
fef5aeee
SRRH
2339
2340 if (!op->trampoline)
2341 continue;
2342
2343 /*
2344 * If the ops is being added, it hasn't gotten to
2345 * the point to be removed from this tree yet.
2346 */
2347 if (op->flags & FTRACE_OPS_FL_ADDING)
79922b80
SRRH
2348 continue;
2349
4fc40904 2350
fef5aeee 2351 /*
4fc40904
SRRH
2352 * If the ops is being modified and is in the old
2353 * hash, then it is probably being removed from this
2354 * function.
fef5aeee 2355 */
fef5aeee
SRRH
2356 if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2357 hash_contains_ip(ip, &op->old_hash))
79922b80 2358 return op;
4fc40904
SRRH
2359 /*
2360 * If the ops is not being added or modified, and it's
2361 * in its normal filter hash, then this must be the one
2362 * we want!
2363 */
2364 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2365 hash_contains_ip(ip, op->func_hash))
2366 return op;
79922b80
SRRH
2367
2368 } while_for_each_ftrace_op(op);
2369
2370 return NULL;
2371}
2372
2373static struct ftrace_ops *
2374ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2375{
2376 struct ftrace_ops *op;
fef5aeee 2377 unsigned long ip = rec->ip;
79922b80
SRRH
2378
2379 do_for_each_ftrace_op(op, ftrace_ops_list) {
2380 /* pass rec in as regs to have non-NULL val */
fef5aeee 2381 if (hash_contains_ip(ip, op->func_hash))
79922b80
SRRH
2382 return op;
2383 } while_for_each_ftrace_op(op);
2384
2385 return NULL;
2386}
2387
763e34e7
SRV
2388#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
2389/* Protected by rcu_tasks for reading, and direct_mutex for writing */
2390static struct ftrace_hash *direct_functions = EMPTY_HASH;
2391static DEFINE_MUTEX(direct_mutex);
a3ad1a7e 2392int ftrace_direct_func_count;
763e34e7
SRV
2393
2394/*
2395 * Search the direct_functions hash to see if the given instruction pointer
2396 * has a direct caller attached to it.
2397 */
ff205766 2398unsigned long ftrace_find_rec_direct(unsigned long ip)
763e34e7
SRV
2399{
2400 struct ftrace_func_entry *entry;
2401
2402 entry = __ftrace_lookup_ip(direct_functions, ip);
2403 if (!entry)
2404 return 0;
2405
2406 return entry->direct;
2407}
2408
1904a814
JO
2409static struct ftrace_func_entry*
2410ftrace_add_rec_direct(unsigned long ip, unsigned long addr,
2411 struct ftrace_hash **free_hash)
2412{
2413 struct ftrace_func_entry *entry;
2414
2415 if (ftrace_hash_empty(direct_functions) ||
2416 direct_functions->count > 2 * (1 << direct_functions->size_bits)) {
2417 struct ftrace_hash *new_hash;
2418 int size = ftrace_hash_empty(direct_functions) ? 0 :
2419 direct_functions->count + 1;
2420
2421 if (size < 32)
2422 size = 32;
2423
2424 new_hash = dup_hash(direct_functions, size);
2425 if (!new_hash)
2426 return NULL;
2427
2428 *free_hash = direct_functions;
2429 direct_functions = new_hash;
2430 }
2431
2432 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2433 if (!entry)
2434 return NULL;
2435
2436 entry->ip = ip;
2437 entry->direct = addr;
2438 __add_hash_entry(direct_functions, entry);
2439 return entry;
2440}
2441
763e34e7 2442static void call_direct_funcs(unsigned long ip, unsigned long pip,
d19ad077 2443 struct ftrace_ops *ops, struct ftrace_regs *fregs)
763e34e7 2444{
d19ad077 2445 struct pt_regs *regs = ftrace_get_regs(fregs);
763e34e7
SRV
2446 unsigned long addr;
2447
ff205766 2448 addr = ftrace_find_rec_direct(ip);
763e34e7
SRV
2449 if (!addr)
2450 return;
2451
2452 arch_ftrace_set_direct_caller(regs, addr);
2453}
2454
2455struct ftrace_ops direct_ops = {
2456 .func = call_direct_funcs,
a25d036d 2457 .flags = FTRACE_OPS_FL_IPMODIFY
763e34e7
SRV
2458 | FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
2459 | FTRACE_OPS_FL_PERMANENT,
5da7cd11
SRV
2460 /*
2461 * By declaring the main trampoline as this trampoline
2462 * it will never have one allocated for it. Allocated
2463 * trampolines should not call direct functions.
2464 * The direct_ops should only be called by the builtin
2465 * ftrace_regs_caller trampoline.
2466 */
2467 .trampoline = FTRACE_REGS_ADDR,
763e34e7 2468};
763e34e7
SRV
2469#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
2470
7413af1f
SRRH
2471/**
2472 * ftrace_get_addr_new - Get the call address to set to
2473 * @rec: The ftrace record descriptor
2474 *
2475 * If the record has the FTRACE_FL_REGS set, that means that it
2476 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
5c8c206e 2477 * is not set, then it wants to convert to the normal callback.
7413af1f
SRRH
2478 *
2479 * Returns the address of the trampoline to set to
2480 */
2481unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2482{
79922b80 2483 struct ftrace_ops *ops;
763e34e7
SRV
2484 unsigned long addr;
2485
2486 if ((rec->flags & FTRACE_FL_DIRECT) &&
2487 (ftrace_rec_count(rec) == 1)) {
ff205766 2488 addr = ftrace_find_rec_direct(rec->ip);
763e34e7
SRV
2489 if (addr)
2490 return addr;
2491 WARN_ON_ONCE(1);
2492 }
79922b80
SRRH
2493
2494 /* Trampolines take precedence over regs */
2495 if (rec->flags & FTRACE_FL_TRAMP) {
2496 ops = ftrace_find_tramp_ops_new(rec);
2497 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
bce0b6c5
SRRH
2498 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2499 (void *)rec->ip, (void *)rec->ip, rec->flags);
79922b80
SRRH
2500 /* Ftrace is shutting down, return anything */
2501 return (unsigned long)FTRACE_ADDR;
2502 }
2503 return ops->trampoline;
2504 }
2505
7413af1f
SRRH
2506 if (rec->flags & FTRACE_FL_REGS)
2507 return (unsigned long)FTRACE_REGS_ADDR;
2508 else
2509 return (unsigned long)FTRACE_ADDR;
2510}
2511
2512/**
2513 * ftrace_get_addr_curr - Get the call address that is already there
2514 * @rec: The ftrace record descriptor
2515 *
2516 * The FTRACE_FL_REGS_EN is set when the record already points to
2517 * a function that saves all the regs. Basically the '_EN' version
2518 * represents the current state of the function.
2519 *
2520 * Returns the address of the trampoline that is currently being called
2521 */
2522unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2523{
79922b80 2524 struct ftrace_ops *ops;
763e34e7
SRV
2525 unsigned long addr;
2526
2527 /* Direct calls take precedence over trampolines */
2528 if (rec->flags & FTRACE_FL_DIRECT_EN) {
ff205766 2529 addr = ftrace_find_rec_direct(rec->ip);
763e34e7
SRV
2530 if (addr)
2531 return addr;
2532 WARN_ON_ONCE(1);
2533 }
79922b80
SRRH
2534
2535 /* Trampolines take precedence over regs */
2536 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2537 ops = ftrace_find_tramp_ops_curr(rec);
2538 if (FTRACE_WARN_ON(!ops)) {
a395d6a7
JP
2539 pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2540 (void *)rec->ip, (void *)rec->ip);
79922b80
SRRH
2541 /* Ftrace is shutting down, return anything */
2542 return (unsigned long)FTRACE_ADDR;
2543 }
2544 return ops->trampoline;
2545 }
2546
7413af1f
SRRH
2547 if (rec->flags & FTRACE_FL_REGS_EN)
2548 return (unsigned long)FTRACE_REGS_ADDR;
2549 else
2550 return (unsigned long)FTRACE_ADDR;
2551}
2552
c88fd863 2553static int
7375dca1 2554__ftrace_replace_code(struct dyn_ftrace *rec, bool enable)
c88fd863 2555{
08f6fba5 2556 unsigned long ftrace_old_addr;
c88fd863
SR
2557 unsigned long ftrace_addr;
2558 int ret;
2559
7c0868e0 2560 ftrace_addr = ftrace_get_addr_new(rec);
c88fd863 2561
7c0868e0
SRRH
2562 /* This needs to be done before we call ftrace_update_record */
2563 ftrace_old_addr = ftrace_get_addr_curr(rec);
2564
2565 ret = ftrace_update_record(rec, enable);
08f6fba5 2566
02a392a0
SRRH
2567 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2568
c88fd863
SR
2569 switch (ret) {
2570 case FTRACE_UPDATE_IGNORE:
2571 return 0;
2572
2573 case FTRACE_UPDATE_MAKE_CALL:
02a392a0 2574 ftrace_bug_type = FTRACE_BUG_CALL;
64fbcd16 2575 return ftrace_make_call(rec, ftrace_addr);
c88fd863
SR
2576
2577 case FTRACE_UPDATE_MAKE_NOP:
02a392a0 2578 ftrace_bug_type = FTRACE_BUG_NOP;
39b5552c 2579 return ftrace_make_nop(NULL, rec, ftrace_old_addr);
08f6fba5 2580
08f6fba5 2581 case FTRACE_UPDATE_MODIFY_CALL:
02a392a0 2582 ftrace_bug_type = FTRACE_BUG_UPDATE;
08f6fba5 2583 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
5072c59f
SR
2584 }
2585
9efb85c5 2586 return -1; /* unknown ftrace bug */
5072c59f
SR
2587}
2588
a0572f68 2589void __weak ftrace_replace_code(int mod_flags)
3c1720f0 2590{
3c1720f0
SR
2591 struct dyn_ftrace *rec;
2592 struct ftrace_page *pg;
7375dca1 2593 bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
a0572f68 2594 int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
6a24a244 2595 int failed;
3c1720f0 2596
45a4a237
SR
2597 if (unlikely(ftrace_disabled))
2598 return;
2599
265c831c 2600 do_for_each_ftrace_rec(pg, rec) {
546fece4
SRRH
2601
2602 if (rec->flags & FTRACE_FL_DISABLED)
2603 continue;
2604
e4f5d544 2605 failed = __ftrace_replace_code(rec, enable);
fa9d13cf 2606 if (failed) {
4fd3279b 2607 ftrace_bug(failed, rec);
3279ba37
SR
2608 /* Stop processing */
2609 return;
3c1720f0 2610 }
a0572f68
SRV
2611 if (schedulable)
2612 cond_resched();
265c831c 2613 } while_for_each_ftrace_rec();
3c1720f0
SR
2614}
2615
c88fd863
SR
2616struct ftrace_rec_iter {
2617 struct ftrace_page *pg;
2618 int index;
2619};
2620
2621/**
6130722f 2622 * ftrace_rec_iter_start - start up iterating over traced functions
c88fd863
SR
2623 *
2624 * Returns an iterator handle that is used to iterate over all
2625 * the records that represent address locations where functions
2626 * are traced.
2627 *
2628 * May return NULL if no records are available.
2629 */
2630struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2631{
2632 /*
2633 * We only use a single iterator.
2634 * Protected by the ftrace_lock mutex.
2635 */
2636 static struct ftrace_rec_iter ftrace_rec_iter;
2637 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2638
2639 iter->pg = ftrace_pages_start;
2640 iter->index = 0;
2641
2642 /* Could have empty pages */
2643 while (iter->pg && !iter->pg->index)
2644 iter->pg = iter->pg->next;
2645
2646 if (!iter->pg)
2647 return NULL;
2648
2649 return iter;
2650}
2651
2652/**
6130722f 2653 * ftrace_rec_iter_next - get the next record to process.
c88fd863
SR
2654 * @iter: The handle to the iterator.
2655 *
2656 * Returns the next iterator after the given iterator @iter.
2657 */
2658struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2659{
2660 iter->index++;
2661
2662 if (iter->index >= iter->pg->index) {
2663 iter->pg = iter->pg->next;
2664 iter->index = 0;
2665
2666 /* Could have empty pages */
2667 while (iter->pg && !iter->pg->index)
2668 iter->pg = iter->pg->next;
2669 }
2670
2671 if (!iter->pg)
2672 return NULL;
2673
2674 return iter;
2675}
2676
2677/**
6130722f 2678 * ftrace_rec_iter_record - get the record at the iterator location
c88fd863
SR
2679 * @iter: The current iterator location
2680 *
2681 * Returns the record that the current @iter is at.
2682 */
2683struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2684{
2685 return &iter->pg->records[iter->index];
2686}
2687
492a7ea5 2688static int
fbf6c73c 2689ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
3c1720f0 2690{
593eb8a2 2691 int ret;
3c1720f0 2692
45a4a237
SR
2693 if (unlikely(ftrace_disabled))
2694 return 0;
2695
fbf6c73c 2696 ret = ftrace_init_nop(mod, rec);
593eb8a2 2697 if (ret) {
02a392a0 2698 ftrace_bug_type = FTRACE_BUG_INIT;
4fd3279b 2699 ftrace_bug(ret, rec);
492a7ea5 2700 return 0;
37ad5084 2701 }
492a7ea5 2702 return 1;
3c1720f0
SR
2703}
2704
000ab691
SR
2705/*
2706 * archs can override this function if they must do something
2707 * before the modifying code is performed.
2708 */
3a2bfec0 2709void __weak ftrace_arch_code_modify_prepare(void)
000ab691 2710{
000ab691
SR
2711}
2712
2713/*
2714 * archs can override this function if they must do something
2715 * after the modifying code is performed.
2716 */
3a2bfec0 2717void __weak ftrace_arch_code_modify_post_process(void)
000ab691 2718{
000ab691
SR
2719}
2720
8ed3e2cf 2721void ftrace_modify_all_code(int command)
3d083395 2722{
59338f75 2723 int update = command & FTRACE_UPDATE_TRACE_FUNC;
a0572f68 2724 int mod_flags = 0;
cd21067f 2725 int err = 0;
59338f75 2726
a0572f68
SRV
2727 if (command & FTRACE_MAY_SLEEP)
2728 mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
2729
59338f75
SRRH
2730 /*
2731 * If the ftrace_caller calls a ftrace_ops func directly,
2732 * we need to make sure that it only traces functions it
2733 * expects to trace. When doing the switch of functions,
2734 * we need to update to the ftrace_ops_list_func first
2735 * before the transition between old and new calls are set,
2736 * as the ftrace_ops_list_func will check the ops hashes
2737 * to make sure the ops are having the right functions
2738 * traced.
2739 */
cd21067f
PM
2740 if (update) {
2741 err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2742 if (FTRACE_WARN_ON(err))
2743 return;
2744 }
59338f75 2745
8ed3e2cf 2746 if (command & FTRACE_UPDATE_CALLS)
a0572f68 2747 ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
8ed3e2cf 2748 else if (command & FTRACE_DISABLE_CALLS)
a0572f68 2749 ftrace_replace_code(mod_flags);
d61f82d0 2750
405e1d83
SRRH
2751 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2752 function_trace_op = set_function_trace_op;
2753 smp_wmb();
2754 /* If irqs are disabled, we are in stop machine */
2755 if (!irqs_disabled())
2756 smp_call_function(ftrace_sync_ipi, NULL, 1);
cd21067f
PM
2757 err = ftrace_update_ftrace_func(ftrace_trace_function);
2758 if (FTRACE_WARN_ON(err))
2759 return;
405e1d83 2760 }
d61f82d0 2761
8ed3e2cf 2762 if (command & FTRACE_START_FUNC_RET)
cd21067f 2763 err = ftrace_enable_ftrace_graph_caller();
8ed3e2cf 2764 else if (command & FTRACE_STOP_FUNC_RET)
cd21067f
PM
2765 err = ftrace_disable_ftrace_graph_caller();
2766 FTRACE_WARN_ON(err);
8ed3e2cf
SR
2767}
2768
2769static int __ftrace_modify_code(void *data)
2770{
2771 int *command = data;
2772
2773 ftrace_modify_all_code(*command);
5a45cfe1 2774
d61f82d0 2775 return 0;
3d083395
SR
2776}
2777
c88fd863 2778/**
6130722f 2779 * ftrace_run_stop_machine - go back to the stop machine method
c88fd863
SR
2780 * @command: The command to tell ftrace what to do
2781 *
2782 * If an arch needs to fall back to the stop machine method, the
2783 * it can call this function.
2784 */
2785void ftrace_run_stop_machine(int command)
2786{
2787 stop_machine(__ftrace_modify_code, &command, NULL);
2788}
2789
2790/**
6130722f 2791 * arch_ftrace_update_code - modify the code to trace or not trace
c88fd863
SR
2792 * @command: The command that needs to be done
2793 *
2794 * Archs can override this function if it does not need to
2795 * run stop_machine() to modify code.
2796 */
2797void __weak arch_ftrace_update_code(int command)
2798{
2799 ftrace_run_stop_machine(command);
2800}
2801
e309b41d 2802static void ftrace_run_update_code(int command)
3d083395 2803{
3a2bfec0 2804 ftrace_arch_code_modify_prepare();
000ab691 2805
c88fd863
SR
2806 /*
2807 * By default we use stop_machine() to modify the code.
2808 * But archs can do what ever they want as long as it
2809 * is safe. The stop_machine() is the safest, but also
2810 * produces the most overhead.
2811 */
2812 arch_ftrace_update_code(command);
2813
3a2bfec0 2814 ftrace_arch_code_modify_post_process();
3d083395
SR
2815}
2816
8252ecf3 2817static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
7485058e 2818 struct ftrace_ops_hash *old_hash)
e1effa01
SRRH
2819{
2820 ops->flags |= FTRACE_OPS_FL_MODIFYING;
7485058e
SRRH
2821 ops->old_hash.filter_hash = old_hash->filter_hash;
2822 ops->old_hash.notrace_hash = old_hash->notrace_hash;
e1effa01 2823 ftrace_run_update_code(command);
8252ecf3 2824 ops->old_hash.filter_hash = NULL;
7485058e 2825 ops->old_hash.notrace_hash = NULL;
e1effa01
SRRH
2826 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2827}
2828
d61f82d0 2829static ftrace_func_t saved_ftrace_func;
60a7ecf4 2830static int ftrace_start_up;
df4fc315 2831
12cce594
SRRH
2832void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2833{
2834}
2835
fc0ea795
AH
2836/* List of trace_ops that have allocated trampolines */
2837static LIST_HEAD(ftrace_ops_trampoline_list);
2838
2839static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops)
2840{
2841 lockdep_assert_held(&ftrace_lock);
2842 list_add_rcu(&ops->list, &ftrace_ops_trampoline_list);
2843}
2844
2845static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops)
2846{
2847 lockdep_assert_held(&ftrace_lock);
2848 list_del_rcu(&ops->list);
478ece95 2849 synchronize_rcu();
fc0ea795
AH
2850}
2851
2852/*
2853 * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols
2854 * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is
2855 * not a module.
2856 */
2857#define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace"
2858#define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline"
2859
2860static void ftrace_trampoline_free(struct ftrace_ops *ops)
2861{
2862 if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) &&
dd9ddf46 2863 ops->trampoline) {
548e1f6c
AH
2864 /*
2865 * Record the text poke event before the ksymbol unregister
2866 * event.
2867 */
2868 perf_event_text_poke((void *)ops->trampoline,
2869 (void *)ops->trampoline,
2870 ops->trampoline_size, NULL, 0);
dd9ddf46
AH
2871 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
2872 ops->trampoline, ops->trampoline_size,
2873 true, FTRACE_TRAMPOLINE_SYM);
2874 /* Remove from kallsyms after the perf events */
fc0ea795 2875 ftrace_remove_trampoline_from_kallsyms(ops);
dd9ddf46 2876 }
fc0ea795
AH
2877
2878 arch_ftrace_trampoline_free(ops);
2879}
2880
df4fc315
SR
2881static void ftrace_startup_enable(int command)
2882{
2883 if (saved_ftrace_func != ftrace_trace_function) {
2884 saved_ftrace_func = ftrace_trace_function;
2885 command |= FTRACE_UPDATE_TRACE_FUNC;
2886 }
2887
2888 if (!command || !ftrace_enabled)
2889 return;
2890
2891 ftrace_run_update_code(command);
2892}
d61f82d0 2893
e1effa01
SRRH
2894static void ftrace_startup_all(int command)
2895{
2896 update_all_ops = true;
2897 ftrace_startup_enable(command);
2898 update_all_ops = false;
2899}
2900
3306fc4a 2901int ftrace_startup(struct ftrace_ops *ops, int command)
3d083395 2902{
8a56d776 2903 int ret;
b848914c 2904
4eebcc81 2905 if (unlikely(ftrace_disabled))
a1cd6173 2906 return -ENODEV;
4eebcc81 2907
8a56d776
SRRH
2908 ret = __register_ftrace_function(ops);
2909 if (ret)
2910 return ret;
2911
60a7ecf4 2912 ftrace_start_up++;
d61f82d0 2913
e1effa01
SRRH
2914 /*
2915 * Note that ftrace probes uses this to start up
2916 * and modify functions it will probe. But we still
2917 * set the ADDING flag for modification, as probes
2918 * do not have trampolines. If they add them in the
2919 * future, then the probes will need to distinguish
2920 * between adding and updating probes.
2921 */
2922 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
66209a5b 2923
f8b8be8a
MH
2924 ret = ftrace_hash_ipmodify_enable(ops);
2925 if (ret < 0) {
2926 /* Rollback registration process */
2927 __unregister_ftrace_function(ops);
2928 ftrace_start_up--;
2929 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
d5e47505
MB
2930 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
2931 ftrace_trampoline_free(ops);
f8b8be8a
MH
2932 return ret;
2933 }
2934
7f50d06b
JO
2935 if (ftrace_hash_rec_enable(ops, 1))
2936 command |= FTRACE_UPDATE_CALLS;
ed926f9b 2937
df4fc315 2938 ftrace_startup_enable(command);
a1cd6173 2939
e1effa01
SRRH
2940 ops->flags &= ~FTRACE_OPS_FL_ADDING;
2941
a1cd6173 2942 return 0;
3d083395
SR
2943}
2944
3306fc4a 2945int ftrace_shutdown(struct ftrace_ops *ops, int command)
3d083395 2946{
8a56d776 2947 int ret;
b848914c 2948
4eebcc81 2949 if (unlikely(ftrace_disabled))
8a56d776
SRRH
2950 return -ENODEV;
2951
2952 ret = __unregister_ftrace_function(ops);
2953 if (ret)
2954 return ret;
4eebcc81 2955
60a7ecf4 2956 ftrace_start_up--;
9ea1a153
FW
2957 /*
2958 * Just warn in case of unbalance, no need to kill ftrace, it's not
2959 * critical but the ftrace_call callers may be never nopped again after
2960 * further ftrace uses.
2961 */
2962 WARN_ON_ONCE(ftrace_start_up < 0);
2963
f8b8be8a
MH
2964 /* Disabling ipmodify never fails */
2965 ftrace_hash_ipmodify_disable(ops);
ed926f9b 2966
7f50d06b
JO
2967 if (ftrace_hash_rec_disable(ops, 1))
2968 command |= FTRACE_UPDATE_CALLS;
b848914c 2969
7f50d06b 2970 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
3d083395 2971
d61f82d0
SR
2972 if (saved_ftrace_func != ftrace_trace_function) {
2973 saved_ftrace_func = ftrace_trace_function;
2974 command |= FTRACE_UPDATE_TRACE_FUNC;
2975 }
3d083395 2976
a4c35ed2
SRRH
2977 if (!command || !ftrace_enabled) {
2978 /*
edb096e0
SRV
2979 * If these are dynamic or per_cpu ops, they still
2980 * need their data freed. Since, function tracing is
a4c35ed2
SRRH
2981 * not currently active, we can just free them
2982 * without synchronizing all CPUs.
2983 */
b3a88803 2984 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
edb096e0
SRV
2985 goto free_ops;
2986
8a56d776 2987 return 0;
a4c35ed2 2988 }
d61f82d0 2989
79922b80
SRRH
2990 /*
2991 * If the ops uses a trampoline, then it needs to be
2992 * tested first on update.
2993 */
e1effa01 2994 ops->flags |= FTRACE_OPS_FL_REMOVING;
79922b80
SRRH
2995 removed_ops = ops;
2996
fef5aeee
SRRH
2997 /* The trampoline logic checks the old hashes */
2998 ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2999 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
3000
d61f82d0 3001 ftrace_run_update_code(command);
a4c35ed2 3002
84bde62c
SRRH
3003 /*
3004 * If there's no more ops registered with ftrace, run a
3005 * sanity check to make sure all rec flags are cleared.
3006 */
f86f4180
CZ
3007 if (rcu_dereference_protected(ftrace_ops_list,
3008 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
84bde62c
SRRH
3009 struct ftrace_page *pg;
3010 struct dyn_ftrace *rec;
3011
3012 do_for_each_ftrace_rec(pg, rec) {
977c1f9c 3013 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
84bde62c
SRRH
3014 pr_warn(" %pS flags:%lx\n",
3015 (void *)rec->ip, rec->flags);
3016 } while_for_each_ftrace_rec();
3017 }
3018
fef5aeee
SRRH
3019 ops->old_hash.filter_hash = NULL;
3020 ops->old_hash.notrace_hash = NULL;
3021
3022 removed_ops = NULL;
e1effa01 3023 ops->flags &= ~FTRACE_OPS_FL_REMOVING;
79922b80 3024
a4c35ed2
SRRH
3025 /*
3026 * Dynamic ops may be freed, we must make sure that all
3027 * callers are done before leaving this function.
ba27f2bc 3028 * The same goes for freeing the per_cpu data of the per_cpu
a4c35ed2 3029 * ops.
a4c35ed2 3030 */
b3a88803 3031 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
0598e4f0
SRV
3032 /*
3033 * We need to do a hard force of sched synchronization.
3034 * This is because we use preempt_disable() to do RCU, but
3035 * the function tracers can be called where RCU is not watching
3036 * (like before user_exit()). We can not rely on the RCU
3037 * infrastructure to do the synchronization, thus we must do it
3038 * ourselves.
3039 */
e5a971d7 3040 synchronize_rcu_tasks_rude();
a4c35ed2 3041
0598e4f0 3042 /*
fdda88d3 3043 * When the kernel is preemptive, tasks can be preempted
0598e4f0
SRV
3044 * while on a ftrace trampoline. Just scheduling a task on
3045 * a CPU is not good enough to flush them. Calling
f2cc020d 3046 * synchronize_rcu_tasks() will wait for those tasks to
0598e4f0
SRV
3047 * execute and either schedule voluntarily or enter user space.
3048 */
30c93704 3049 if (IS_ENABLED(CONFIG_PREEMPTION))
0598e4f0
SRV
3050 synchronize_rcu_tasks();
3051
edb096e0 3052 free_ops:
fc0ea795 3053 ftrace_trampoline_free(ops);
a4c35ed2
SRRH
3054 }
3055
8a56d776 3056 return 0;
3d083395
SR
3057}
3058
a5a1d1c2 3059static u64 ftrace_update_time;
3d083395 3060unsigned long ftrace_update_tot_cnt;
da537f0a
SRV
3061unsigned long ftrace_number_of_pages;
3062unsigned long ftrace_number_of_groups;
3d083395 3063
8c4f3c3f 3064static inline int ops_traces_mod(struct ftrace_ops *ops)
f7bc8b61 3065{
8c4f3c3f
SRRH
3066 /*
3067 * Filter_hash being empty will default to trace module.
3068 * But notrace hash requires a test of individual module functions.
3069 */
33b7f99c
SRRH
3070 return ftrace_hash_empty(ops->func_hash->filter_hash) &&
3071 ftrace_hash_empty(ops->func_hash->notrace_hash);
8c4f3c3f
SRRH
3072}
3073
3074/*
3075 * Check if the current ops references the record.
3076 *
3077 * If the ops traces all functions, then it was already accounted for.
3078 * If the ops does not trace the current record function, skip it.
3079 * If the ops ignores the function via notrace filter, skip it.
3080 */
3081static inline bool
3082ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3083{
3084 /* If ops isn't enabled, ignore it */
3085 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
44ec3ec0 3086 return false;
8c4f3c3f 3087
b7ffffbb 3088 /* If ops traces all then it includes this function */
8c4f3c3f 3089 if (ops_traces_mod(ops))
44ec3ec0 3090 return true;
8c4f3c3f
SRRH
3091
3092 /* The function must be in the filter */
33b7f99c 3093 if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
2b2c279c 3094 !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
44ec3ec0 3095 return false;
f7bc8b61 3096
8c4f3c3f 3097 /* If in notrace hash, we ignore it too */
33b7f99c 3098 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
44ec3ec0 3099 return false;
8c4f3c3f 3100
44ec3ec0 3101 return true;
8c4f3c3f
SRRH
3102}
3103
1dc43cf0 3104static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
3d083395 3105{
67ccddf8 3106 bool init_nop = ftrace_need_init_nop();
85ae32ae 3107 struct ftrace_page *pg;
e94142a6 3108 struct dyn_ftrace *p;
a5a1d1c2 3109 u64 start, stop;
1dc43cf0 3110 unsigned long update_cnt = 0;
b7ffffbb 3111 unsigned long rec_flags = 0;
85ae32ae 3112 int i;
f7bc8b61 3113
b7ffffbb
SRRH
3114 start = ftrace_now(raw_smp_processor_id());
3115
f7bc8b61 3116 /*
b7ffffbb
SRRH
3117 * When a module is loaded, this function is called to convert
3118 * the calls to mcount in its text to nops, and also to create
3119 * an entry in the ftrace data. Now, if ftrace is activated
3120 * after this call, but before the module sets its text to
3121 * read-only, the modification of enabling ftrace can fail if
3122 * the read-only is done while ftrace is converting the calls.
3123 * To prevent this, the module's records are set as disabled
3124 * and will be enabled after the call to set the module's text
3125 * to read-only.
f7bc8b61 3126 */
b7ffffbb
SRRH
3127 if (mod)
3128 rec_flags |= FTRACE_FL_DISABLED;
3d083395 3129
1dc43cf0 3130 for (pg = new_pgs; pg; pg = pg->next) {
3d083395 3131
85ae32ae 3132 for (i = 0; i < pg->index; i++) {
8c4f3c3f 3133
85ae32ae
SR
3134 /* If something went wrong, bail without enabling anything */
3135 if (unlikely(ftrace_disabled))
3136 return -1;
f22f9a89 3137
85ae32ae 3138 p = &pg->records[i];
b7ffffbb 3139 p->flags = rec_flags;
f22f9a89 3140
85ae32ae
SR
3141 /*
3142 * Do the initial record conversion from mcount jump
3143 * to the NOP instructions.
3144 */
67ccddf8 3145 if (init_nop && !ftrace_nop_initialize(mod, p))
85ae32ae 3146 break;
5cb084bb 3147
1dc43cf0 3148 update_cnt++;
5cb084bb 3149 }
3d083395
SR
3150 }
3151
750ed1a4 3152 stop = ftrace_now(raw_smp_processor_id());
3d083395 3153 ftrace_update_time = stop - start;
1dc43cf0 3154 ftrace_update_tot_cnt += update_cnt;
3d083395 3155
16444a8a
ACM
3156 return 0;
3157}
3158
a7900875 3159static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3c1720f0 3160{
a7900875 3161 int order;
7ba031e8 3162 int pages;
3c1720f0 3163 int cnt;
3c1720f0 3164
a7900875
SR
3165 if (WARN_ON(!count))
3166 return -EINVAL;
3167
ceaaa129 3168 /* We want to fill as much as possible, with no empty pages */
b40c6eab 3169 pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
ceaaa129 3170 order = fls(pages) - 1;
3c1720f0 3171
a7900875
SR
3172 again:
3173 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3c1720f0 3174
a7900875
SR
3175 if (!pg->records) {
3176 /* if we can't allocate this size, try something smaller */
3177 if (!order)
3178 return -ENOMEM;
3179 order >>= 1;
3180 goto again;
3181 }
3c1720f0 3182
da537f0a
SRV
3183 ftrace_number_of_pages += 1 << order;
3184 ftrace_number_of_groups++;
3185
a7900875 3186 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
db42523b 3187 pg->order = order;
3c1720f0 3188
a7900875
SR
3189 if (cnt > count)
3190 cnt = count;
3191
3192 return cnt;
3193}
3194
3195static struct ftrace_page *
3196ftrace_allocate_pages(unsigned long num_to_init)
3197{
3198 struct ftrace_page *start_pg;
3199 struct ftrace_page *pg;
a7900875
SR
3200 int cnt;
3201
3202 if (!num_to_init)
9efb85c5 3203 return NULL;
a7900875
SR
3204
3205 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3206 if (!pg)
3207 return NULL;
3208
3209 /*
3210 * Try to allocate as much as possible in one continues
3211 * location that fills in all of the space. We want to
3212 * waste as little space as possible.
3213 */
3214 for (;;) {
3215 cnt = ftrace_allocate_records(pg, num_to_init);
3216 if (cnt < 0)
3217 goto free_pages;
3218
3219 num_to_init -= cnt;
3220 if (!num_to_init)
3c1720f0
SR
3221 break;
3222
a7900875
SR
3223 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3224 if (!pg->next)
3225 goto free_pages;
3226
3c1720f0
SR
3227 pg = pg->next;
3228 }
3229
a7900875
SR
3230 return start_pg;
3231
3232 free_pages:
1f61be00
NK
3233 pg = start_pg;
3234 while (pg) {
db42523b
LT
3235 if (pg->records) {
3236 free_pages((unsigned long)pg->records, pg->order);
3237 ftrace_number_of_pages -= 1 << pg->order;
3238 }
a7900875
SR
3239 start_pg = pg->next;
3240 kfree(pg);
3241 pg = start_pg;
da537f0a 3242 ftrace_number_of_groups--;
a7900875
SR
3243 }
3244 pr_info("ftrace: FAILED to allocate memory for functions\n");
3245 return NULL;
3246}
3247
5072c59f
SR
3248#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3249
3250struct ftrace_iterator {
98c4fd04 3251 loff_t pos;
4aeb6967 3252 loff_t func_pos;
5985ea8b 3253 loff_t mod_pos;
4aeb6967
SR
3254 struct ftrace_page *pg;
3255 struct dyn_ftrace *func;
3256 struct ftrace_func_probe *probe;
eee8ded1 3257 struct ftrace_func_entry *probe_entry;
4aeb6967 3258 struct trace_parser parser;
1cf41dd7 3259 struct ftrace_hash *hash;
33dc9b12 3260 struct ftrace_ops *ops;
5985ea8b
SRV
3261 struct trace_array *tr;
3262 struct list_head *mod_list;
eee8ded1 3263 int pidx;
4aeb6967
SR
3264 int idx;
3265 unsigned flags;
5072c59f
SR
3266};
3267
8fc0c701 3268static void *
eee8ded1 3269t_probe_next(struct seq_file *m, loff_t *pos)
8fc0c701
SR
3270{
3271 struct ftrace_iterator *iter = m->private;
d2afd57a 3272 struct trace_array *tr = iter->ops->private;
04ec7bb6 3273 struct list_head *func_probes;
eee8ded1
SRV
3274 struct ftrace_hash *hash;
3275 struct list_head *next;
4aeb6967 3276 struct hlist_node *hnd = NULL;
8fc0c701 3277 struct hlist_head *hhd;
eee8ded1 3278 int size;
8fc0c701 3279
8fc0c701 3280 (*pos)++;
98c4fd04 3281 iter->pos = *pos;
8fc0c701 3282
04ec7bb6 3283 if (!tr)
8fc0c701
SR
3284 return NULL;
3285
04ec7bb6
SRV
3286 func_probes = &tr->func_probes;
3287 if (list_empty(func_probes))
8fc0c701
SR
3288 return NULL;
3289
eee8ded1 3290 if (!iter->probe) {
04ec7bb6 3291 next = func_probes->next;
7b60f3d8 3292 iter->probe = list_entry(next, struct ftrace_func_probe, list);
eee8ded1
SRV
3293 }
3294
3295 if (iter->probe_entry)
3296 hnd = &iter->probe_entry->hlist;
3297
3298 hash = iter->probe->ops.func_hash->filter_hash;
7bd46644 3299
372e0d01
SRV
3300 /*
3301 * A probe being registered may temporarily have an empty hash
3302 * and it's at the end of the func_probes list.
3303 */
3304 if (!hash || hash == EMPTY_HASH)
7bd46644
NR
3305 return NULL;
3306
eee8ded1
SRV
3307 size = 1 << hash->size_bits;
3308
3309 retry:
3310 if (iter->pidx >= size) {
04ec7bb6 3311 if (iter->probe->list.next == func_probes)
eee8ded1
SRV
3312 return NULL;
3313 next = iter->probe->list.next;
7b60f3d8 3314 iter->probe = list_entry(next, struct ftrace_func_probe, list);
eee8ded1
SRV
3315 hash = iter->probe->ops.func_hash->filter_hash;
3316 size = 1 << hash->size_bits;
3317 iter->pidx = 0;
3318 }
3319
3320 hhd = &hash->buckets[iter->pidx];
8fc0c701
SR
3321
3322 if (hlist_empty(hhd)) {
eee8ded1 3323 iter->pidx++;
8fc0c701
SR
3324 hnd = NULL;
3325 goto retry;
3326 }
3327
3328 if (!hnd)
3329 hnd = hhd->first;
3330 else {
3331 hnd = hnd->next;
3332 if (!hnd) {
eee8ded1 3333 iter->pidx++;
8fc0c701
SR
3334 goto retry;
3335 }
3336 }
3337
4aeb6967
SR
3338 if (WARN_ON_ONCE(!hnd))
3339 return NULL;
3340
eee8ded1 3341 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
4aeb6967
SR
3342
3343 return iter;
8fc0c701
SR
3344}
3345
eee8ded1 3346static void *t_probe_start(struct seq_file *m, loff_t *pos)
8fc0c701
SR
3347{
3348 struct ftrace_iterator *iter = m->private;
3349 void *p = NULL;
d82d6244
LZ
3350 loff_t l;
3351
eee8ded1 3352 if (!(iter->flags & FTRACE_ITER_DO_PROBES))
69a3083c
SR
3353 return NULL;
3354
5985ea8b 3355 if (iter->mod_pos > *pos)
2bccfffd 3356 return NULL;
8fc0c701 3357
eee8ded1
SRV
3358 iter->probe = NULL;
3359 iter->probe_entry = NULL;
3360 iter->pidx = 0;
5985ea8b 3361 for (l = 0; l <= (*pos - iter->mod_pos); ) {
eee8ded1 3362 p = t_probe_next(m, &l);
d82d6244
LZ
3363 if (!p)
3364 break;
3365 }
4aeb6967
SR
3366 if (!p)
3367 return NULL;
3368
98c4fd04 3369 /* Only set this if we have an item */
eee8ded1 3370 iter->flags |= FTRACE_ITER_PROBE;
98c4fd04 3371
4aeb6967 3372 return iter;
8fc0c701
SR
3373}
3374
4aeb6967 3375static int
eee8ded1 3376t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
8fc0c701 3377{
eee8ded1 3378 struct ftrace_func_entry *probe_entry;
7b60f3d8
SRV
3379 struct ftrace_probe_ops *probe_ops;
3380 struct ftrace_func_probe *probe;
8fc0c701 3381
eee8ded1
SRV
3382 probe = iter->probe;
3383 probe_entry = iter->probe_entry;
8fc0c701 3384
eee8ded1 3385 if (WARN_ON_ONCE(!probe || !probe_entry))
4aeb6967 3386 return -EIO;
8fc0c701 3387
7b60f3d8 3388 probe_ops = probe->probe_ops;
809dcf29 3389
7b60f3d8 3390 if (probe_ops->print)
6e444319 3391 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
8fc0c701 3392
7b60f3d8
SRV
3393 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3394 (void *)probe_ops->func);
8fc0c701
SR
3395
3396 return 0;
3397}
3398
5985ea8b
SRV
3399static void *
3400t_mod_next(struct seq_file *m, loff_t *pos)
3401{
3402 struct ftrace_iterator *iter = m->private;
3403 struct trace_array *tr = iter->tr;
3404
3405 (*pos)++;
3406 iter->pos = *pos;
3407
3408 iter->mod_list = iter->mod_list->next;
3409
3410 if (iter->mod_list == &tr->mod_trace ||
3411 iter->mod_list == &tr->mod_notrace) {
3412 iter->flags &= ~FTRACE_ITER_MOD;
3413 return NULL;
3414 }
3415
3416 iter->mod_pos = *pos;
3417
3418 return iter;
3419}
3420
3421static void *t_mod_start(struct seq_file *m, loff_t *pos)
3422{
3423 struct ftrace_iterator *iter = m->private;
3424 void *p = NULL;
3425 loff_t l;
3426
3427 if (iter->func_pos > *pos)
3428 return NULL;
3429
3430 iter->mod_pos = iter->func_pos;
3431
3432 /* probes are only available if tr is set */
3433 if (!iter->tr)
3434 return NULL;
3435
3436 for (l = 0; l <= (*pos - iter->func_pos); ) {
3437 p = t_mod_next(m, &l);
3438 if (!p)
3439 break;
3440 }
3441 if (!p) {
3442 iter->flags &= ~FTRACE_ITER_MOD;
3443 return t_probe_start(m, pos);
3444 }
3445
3446 /* Only set this if we have an item */
3447 iter->flags |= FTRACE_ITER_MOD;
3448
3449 return iter;
3450}
3451
3452static int
3453t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
3454{
3455 struct ftrace_mod_load *ftrace_mod;
3456 struct trace_array *tr = iter->tr;
3457
3458 if (WARN_ON_ONCE(!iter->mod_list) ||
3459 iter->mod_list == &tr->mod_trace ||
3460 iter->mod_list == &tr->mod_notrace)
3461 return -EIO;
3462
3463 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
3464
3465 if (ftrace_mod->func)
3466 seq_printf(m, "%s", ftrace_mod->func);
3467 else
3468 seq_putc(m, '*');
3469
3470 seq_printf(m, ":mod:%s\n", ftrace_mod->module);
3471
3472 return 0;
3473}
3474
e309b41d 3475static void *
5bd84629 3476t_func_next(struct seq_file *m, loff_t *pos)
5072c59f
SR
3477{
3478 struct ftrace_iterator *iter = m->private;
3479 struct dyn_ftrace *rec = NULL;
3480
3481 (*pos)++;
0c75a3ed 3482
5072c59f
SR
3483 retry:
3484 if (iter->idx >= iter->pg->index) {
3485 if (iter->pg->next) {
3486 iter->pg = iter->pg->next;
3487 iter->idx = 0;
3488 goto retry;
3489 }
3490 } else {
3491 rec = &iter->pg->records[iter->idx++];
c20489da
SRV
3492 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3493 !ftrace_lookup_ip(iter->hash, rec->ip)) ||
647bcd03
SR
3494
3495 ((iter->flags & FTRACE_ITER_ENABLED) &&
23ea9c4d 3496 !(rec->flags & FTRACE_FL_ENABLED))) {
647bcd03 3497
5072c59f
SR
3498 rec = NULL;
3499 goto retry;
3500 }
3501 }
3502
4aeb6967 3503 if (!rec)
5bd84629 3504 return NULL;
4aeb6967 3505
5bd84629 3506 iter->pos = iter->func_pos = *pos;
4aeb6967
SR
3507 iter->func = rec;
3508
3509 return iter;
5072c59f
SR
3510}
3511
5bd84629
SRV
3512static void *
3513t_next(struct seq_file *m, void *v, loff_t *pos)
3514{
3515 struct ftrace_iterator *iter = m->private;
5985ea8b 3516 loff_t l = *pos; /* t_probe_start() must use original pos */
5bd84629
SRV
3517 void *ret;
3518
3519 if (unlikely(ftrace_disabled))
3520 return NULL;
3521
eee8ded1
SRV
3522 if (iter->flags & FTRACE_ITER_PROBE)
3523 return t_probe_next(m, pos);
5bd84629 3524
5985ea8b
SRV
3525 if (iter->flags & FTRACE_ITER_MOD)
3526 return t_mod_next(m, pos);
3527
5bd84629 3528 if (iter->flags & FTRACE_ITER_PRINTALL) {
eee8ded1 3529 /* next must increment pos, and t_probe_start does not */
5bd84629 3530 (*pos)++;
5985ea8b 3531 return t_mod_start(m, &l);
5bd84629
SRV
3532 }
3533
3534 ret = t_func_next(m, pos);
3535
3536 if (!ret)
5985ea8b 3537 return t_mod_start(m, &l);
5bd84629
SRV
3538
3539 return ret;
3540}
3541
98c4fd04
SR
3542static void reset_iter_read(struct ftrace_iterator *iter)
3543{
3544 iter->pos = 0;
3545 iter->func_pos = 0;
5985ea8b 3546 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
5072c59f
SR
3547}
3548
3549static void *t_start(struct seq_file *m, loff_t *pos)
3550{
3551 struct ftrace_iterator *iter = m->private;
3552 void *p = NULL;
694ce0a5 3553 loff_t l;
5072c59f 3554
8fc0c701 3555 mutex_lock(&ftrace_lock);
45a4a237
SR
3556
3557 if (unlikely(ftrace_disabled))
3558 return NULL;
3559
98c4fd04
SR
3560 /*
3561 * If an lseek was done, then reset and start from beginning.
3562 */
3563 if (*pos < iter->pos)
3564 reset_iter_read(iter);
3565
0c75a3ed
SR
3566 /*
3567 * For set_ftrace_filter reading, if we have the filter
3568 * off, we can short cut and just print out that all
3569 * functions are enabled.
3570 */
c20489da
SRV
3571 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3572 ftrace_hash_empty(iter->hash)) {
43ff926a 3573 iter->func_pos = 1; /* Account for the message */
0c75a3ed 3574 if (*pos > 0)
5985ea8b 3575 return t_mod_start(m, pos);
0c75a3ed 3576 iter->flags |= FTRACE_ITER_PRINTALL;
df091625 3577 /* reset in case of seek/pread */
eee8ded1 3578 iter->flags &= ~FTRACE_ITER_PROBE;
0c75a3ed
SR
3579 return iter;
3580 }
3581
5985ea8b
SRV
3582 if (iter->flags & FTRACE_ITER_MOD)
3583 return t_mod_start(m, pos);
8fc0c701 3584
98c4fd04
SR
3585 /*
3586 * Unfortunately, we need to restart at ftrace_pages_start
3587 * every time we let go of the ftrace_mutex. This is because
3588 * those pointers can change without the lock.
3589 */
694ce0a5
LZ
3590 iter->pg = ftrace_pages_start;
3591 iter->idx = 0;
3592 for (l = 0; l <= *pos; ) {
5bd84629 3593 p = t_func_next(m, &l);
694ce0a5
LZ
3594 if (!p)
3595 break;
50cdaf08 3596 }
5821e1b7 3597
69a3083c 3598 if (!p)
5985ea8b 3599 return t_mod_start(m, pos);
4aeb6967
SR
3600
3601 return iter;
5072c59f
SR
3602}
3603
3604static void t_stop(struct seq_file *m, void *p)
3605{
8fc0c701 3606 mutex_unlock(&ftrace_lock);
5072c59f
SR
3607}
3608
15d5b02c
SRRH
3609void * __weak
3610arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3611{
3612 return NULL;
3613}
3614
3615static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3616 struct dyn_ftrace *rec)
3617{
3618 void *ptr;
3619
3620 ptr = arch_ftrace_trampoline_func(ops, rec);
3621 if (ptr)
3622 seq_printf(m, " ->%pS", ptr);
3623}
3624
b39181f7
SRG
3625#ifdef FTRACE_MCOUNT_MAX_OFFSET
3626/*
3627 * Weak functions can still have an mcount/fentry that is saved in
3628 * the __mcount_loc section. These can be detected by having a
3629 * symbol offset of greater than FTRACE_MCOUNT_MAX_OFFSET, as the
3630 * symbol found by kallsyms is not the function that the mcount/fentry
3631 * is part of. The offset is much greater in these cases.
3632 *
3633 * Test the record to make sure that the ip points to a valid kallsyms
3634 * and if not, mark it disabled.
3635 */
3636static int test_for_valid_rec(struct dyn_ftrace *rec)
3637{
3638 char str[KSYM_SYMBOL_LEN];
3639 unsigned long offset;
3640 const char *ret;
3641
3642 ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str);
3643
3644 /* Weak functions can cause invalid addresses */
3645 if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
3646 rec->flags |= FTRACE_FL_DISABLED;
3647 return 0;
3648 }
3649 return 1;
3650}
3651
3652static struct workqueue_struct *ftrace_check_wq __initdata;
3653static struct work_struct ftrace_check_work __initdata;
3654
3655/*
3656 * Scan all the mcount/fentry entries to make sure they are valid.
3657 */
3658static __init void ftrace_check_work_func(struct work_struct *work)
3659{
3660 struct ftrace_page *pg;
3661 struct dyn_ftrace *rec;
3662
3663 mutex_lock(&ftrace_lock);
3664 do_for_each_ftrace_rec(pg, rec) {
3665 test_for_valid_rec(rec);
3666 } while_for_each_ftrace_rec();
3667 mutex_unlock(&ftrace_lock);
3668}
3669
3670static int __init ftrace_check_for_weak_functions(void)
3671{
3672 INIT_WORK(&ftrace_check_work, ftrace_check_work_func);
3673
3674 ftrace_check_wq = alloc_workqueue("ftrace_check_wq", WQ_UNBOUND, 0);
3675
3676 queue_work(ftrace_check_wq, &ftrace_check_work);
3677 return 0;
3678}
3679
3680static int __init ftrace_check_sync(void)
3681{
3682 /* Make sure the ftrace_check updates are finished */
3683 if (ftrace_check_wq)
3684 destroy_workqueue(ftrace_check_wq);
3685 return 0;
3686}
3687
3688late_initcall_sync(ftrace_check_sync);
3689subsys_initcall(ftrace_check_for_weak_functions);
3690
3691static int print_rec(struct seq_file *m, unsigned long ip)
3692{
3693 unsigned long offset;
3694 char str[KSYM_SYMBOL_LEN];
3695 char *modname;
3696 const char *ret;
3697
3698 ret = kallsyms_lookup(ip, NULL, &offset, &modname, str);
3699 /* Weak functions can cause invalid addresses */
3700 if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
3701 snprintf(str, KSYM_SYMBOL_LEN, "%s_%ld",
3702 FTRACE_INVALID_FUNCTION, offset);
3703 ret = NULL;
3704 }
3705
3706 seq_puts(m, str);
3707 if (modname)
3708 seq_printf(m, " [%s]", modname);
3709 return ret == NULL ? -1 : 0;
3710}
3711#else
3712static inline int test_for_valid_rec(struct dyn_ftrace *rec)
3713{
3714 return 1;
3715}
3716
3717static inline int print_rec(struct seq_file *m, unsigned long ip)
3718{
3719 seq_printf(m, "%ps", (void *)ip);
3720 return 0;
3721}
3722#endif
3723
5072c59f
SR
3724static int t_show(struct seq_file *m, void *v)
3725{
0c75a3ed 3726 struct ftrace_iterator *iter = m->private;
4aeb6967 3727 struct dyn_ftrace *rec;
5072c59f 3728
eee8ded1
SRV
3729 if (iter->flags & FTRACE_ITER_PROBE)
3730 return t_probe_show(m, iter);
8fc0c701 3731
5985ea8b
SRV
3732 if (iter->flags & FTRACE_ITER_MOD)
3733 return t_mod_show(m, iter);
3734
0c75a3ed 3735 if (iter->flags & FTRACE_ITER_PRINTALL) {
8c006cf7 3736 if (iter->flags & FTRACE_ITER_NOTRACE)
fa6f0cc7 3737 seq_puts(m, "#### no functions disabled ####\n");
8c006cf7 3738 else
fa6f0cc7 3739 seq_puts(m, "#### all functions enabled ####\n");
0c75a3ed
SR
3740 return 0;
3741 }
3742
4aeb6967
SR
3743 rec = iter->func;
3744
5072c59f
SR
3745 if (!rec)
3746 return 0;
3747
b39181f7
SRG
3748 if (print_rec(m, rec->ip)) {
3749 /* This should only happen when a rec is disabled */
3750 WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED));
3751 seq_putc(m, '\n');
3752 return 0;
3753 }
3754
9674b2fa 3755 if (iter->flags & FTRACE_ITER_ENABLED) {
030f4e1c 3756 struct ftrace_ops *ops;
15d5b02c 3757
763e34e7 3758 seq_printf(m, " (%ld)%s%s%s",
0376bde1 3759 ftrace_rec_count(rec),
f8b8be8a 3760 rec->flags & FTRACE_FL_REGS ? " R" : " ",
763e34e7
SRV
3761 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ",
3762 rec->flags & FTRACE_FL_DIRECT ? " D" : " ");
9674b2fa 3763 if (rec->flags & FTRACE_FL_TRAMP_EN) {
5fecaa04 3764 ops = ftrace_find_tramp_ops_any(rec);
39daa7b9
SRRH
3765 if (ops) {
3766 do {
3767 seq_printf(m, "\ttramp: %pS (%pS)",
3768 (void *)ops->trampoline,
3769 (void *)ops->func);
030f4e1c 3770 add_trampoline_func(m, ops, rec);
39daa7b9
SRRH
3771 ops = ftrace_find_tramp_ops_next(rec, ops);
3772 } while (ops);
3773 } else
fa6f0cc7 3774 seq_puts(m, "\ttramp: ERROR!");
030f4e1c
SRRH
3775 } else {
3776 add_trampoline_func(m, NULL, rec);
9674b2fa 3777 }
763e34e7
SRV
3778 if (rec->flags & FTRACE_FL_DIRECT) {
3779 unsigned long direct;
3780
ff205766 3781 direct = ftrace_find_rec_direct(rec->ip);
763e34e7
SRV
3782 if (direct)
3783 seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
3784 }
026bb845 3785 }
9674b2fa 3786
fa6f0cc7 3787 seq_putc(m, '\n');
5072c59f
SR
3788
3789 return 0;
3790}
3791
88e9d34c 3792static const struct seq_operations show_ftrace_seq_ops = {
5072c59f
SR
3793 .start = t_start,
3794 .next = t_next,
3795 .stop = t_stop,
3796 .show = t_show,
3797};
3798
e309b41d 3799static int
5072c59f
SR
3800ftrace_avail_open(struct inode *inode, struct file *file)
3801{
3802 struct ftrace_iterator *iter;
17911ff3
SRV
3803 int ret;
3804
3805 ret = security_locked_down(LOCKDOWN_TRACEFS);
3806 if (ret)
3807 return ret;
5072c59f 3808
4eebcc81
SR
3809 if (unlikely(ftrace_disabled))
3810 return -ENODEV;
3811
50e18b94 3812 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
c1bc5919
SRV
3813 if (!iter)
3814 return -ENOMEM;
5072c59f 3815
c1bc5919
SRV
3816 iter->pg = ftrace_pages_start;
3817 iter->ops = &global_ops;
3818
3819 return 0;
5072c59f
SR
3820}
3821
647bcd03
SR
3822static int
3823ftrace_enabled_open(struct inode *inode, struct file *file)
3824{
3825 struct ftrace_iterator *iter;
647bcd03 3826
17911ff3
SRV
3827 /*
3828 * This shows us what functions are currently being
3829 * traced and by what. Not sure if we want lockdown
3830 * to hide such critical information for an admin.
3831 * Although, perhaps it can show information we don't
3832 * want people to see, but if something is tracing
3833 * something, we probably want to know about it.
3834 */
3835
50e18b94 3836 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
c1bc5919
SRV
3837 if (!iter)
3838 return -ENOMEM;
647bcd03 3839
c1bc5919
SRV
3840 iter->pg = ftrace_pages_start;
3841 iter->flags = FTRACE_ITER_ENABLED;
3842 iter->ops = &global_ops;
3843
3844 return 0;
647bcd03
SR
3845}
3846
fc13cb0c
SR
3847/**
3848 * ftrace_regex_open - initialize function tracer filter files
3849 * @ops: The ftrace_ops that hold the hash filters
3850 * @flag: The type of filter to process
3851 * @inode: The inode, usually passed in to your open routine
3852 * @file: The file, usually passed in to your open routine
3853 *
3854 * ftrace_regex_open() initializes the filter files for the
3855 * @ops. Depending on @flag it may process the filter hash or
3856 * the notrace hash of @ops. With this called from the open
3857 * routine, you can use ftrace_filter_write() for the write
3858 * routine if @flag has FTRACE_ITER_FILTER set, or
3859 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
098c879e 3860 * tracing_lseek() should be used as the lseek routine, and
fc13cb0c
SR
3861 * release must call ftrace_regex_release().
3862 */
3863int
f45948e8 3864ftrace_regex_open(struct ftrace_ops *ops, int flag,
1cf41dd7 3865 struct inode *inode, struct file *file)
5072c59f
SR
3866{
3867 struct ftrace_iterator *iter;
f45948e8 3868 struct ftrace_hash *hash;
673feb9d
SRV
3869 struct list_head *mod_head;
3870 struct trace_array *tr = ops->private;
9ef16693 3871 int ret = -ENOMEM;
5072c59f 3872
f04f24fb
MH
3873 ftrace_ops_init(ops);
3874
4eebcc81
SR
3875 if (unlikely(ftrace_disabled))
3876 return -ENODEV;
3877
8530dec6 3878 if (tracing_check_open_get_tr(tr))
9ef16693
SRV
3879 return -ENODEV;
3880
5072c59f
SR
3881 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3882 if (!iter)
9ef16693 3883 goto out;
5072c59f 3884
9ef16693
SRV
3885 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
3886 goto out;
689fd8b6 3887
3f2367ba
MH
3888 iter->ops = ops;
3889 iter->flags = flag;
5985ea8b 3890 iter->tr = tr;
3f2367ba 3891
33b7f99c 3892 mutex_lock(&ops->func_hash->regex_lock);
3f2367ba 3893
673feb9d 3894 if (flag & FTRACE_ITER_NOTRACE) {
33b7f99c 3895 hash = ops->func_hash->notrace_hash;
5985ea8b 3896 mod_head = tr ? &tr->mod_notrace : NULL;
673feb9d 3897 } else {
33b7f99c 3898 hash = ops->func_hash->filter_hash;
5985ea8b 3899 mod_head = tr ? &tr->mod_trace : NULL;
673feb9d 3900 }
f45948e8 3901
5985ea8b
SRV
3902 iter->mod_list = mod_head;
3903
33dc9b12 3904 if (file->f_mode & FMODE_WRITE) {
ef2fbe16
NK
3905 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3906
673feb9d 3907 if (file->f_flags & O_TRUNC) {
ef2fbe16 3908 iter->hash = alloc_ftrace_hash(size_bits);
673feb9d
SRV
3909 clear_ftrace_mod_list(mod_head);
3910 } else {
ef2fbe16 3911 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
673feb9d 3912 }
ef2fbe16 3913
33dc9b12
SR
3914 if (!iter->hash) {
3915 trace_parser_put(&iter->parser);
3f2367ba 3916 goto out_unlock;
33dc9b12 3917 }
c20489da
SRV
3918 } else
3919 iter->hash = hash;
1cf41dd7 3920
9ef16693
SRV
3921 ret = 0;
3922
5072c59f
SR
3923 if (file->f_mode & FMODE_READ) {
3924 iter->pg = ftrace_pages_start;
5072c59f
SR
3925
3926 ret = seq_open(file, &show_ftrace_seq_ops);
3927 if (!ret) {
3928 struct seq_file *m = file->private_data;
3929 m->private = iter;
79fe249c 3930 } else {
33dc9b12
SR
3931 /* Failed */
3932 free_ftrace_hash(iter->hash);
79fe249c 3933 trace_parser_put(&iter->parser);
79fe249c 3934 }
5072c59f
SR
3935 } else
3936 file->private_data = iter;
3f2367ba
MH
3937
3938 out_unlock:
33b7f99c 3939 mutex_unlock(&ops->func_hash->regex_lock);
5072c59f 3940
9ef16693
SRV
3941 out:
3942 if (ret) {
3943 kfree(iter);
3944 if (tr)
3945 trace_array_put(tr);
3946 }
3947
5072c59f
SR
3948 return ret;
3949}
3950
41c52c0d
SR
3951static int
3952ftrace_filter_open(struct inode *inode, struct file *file)
3953{
e3b3e2e8
SRRH
3954 struct ftrace_ops *ops = inode->i_private;
3955
17911ff3 3956 /* Checks for tracefs lockdown */
e3b3e2e8 3957 return ftrace_regex_open(ops,
eee8ded1 3958 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
69a3083c 3959 inode, file);
41c52c0d
SR
3960}
3961
3962static int
3963ftrace_notrace_open(struct inode *inode, struct file *file)
3964{
e3b3e2e8
SRRH
3965 struct ftrace_ops *ops = inode->i_private;
3966
17911ff3 3967 /* Checks for tracefs lockdown */
e3b3e2e8 3968 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
1cf41dd7 3969 inode, file);
41c52c0d
SR
3970}
3971
3ba00929
DS
3972/* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
3973struct ftrace_glob {
3974 char *search;
3975 unsigned len;
3976 int type;
3977};
3978
7132e2d6
TJB
3979/*
3980 * If symbols in an architecture don't correspond exactly to the user-visible
3981 * name of what they represent, it is possible to define this function to
3982 * perform the necessary adjustments.
3983*/
3984char * __weak arch_ftrace_match_adjust(char *str, const char *search)
3985{
3986 return str;
3987}
3988
3ba00929 3989static int ftrace_match(char *str, struct ftrace_glob *g)
9f4801e3 3990{
9f4801e3 3991 int matched = 0;
751e9983 3992 int slen;
9f4801e3 3993
7132e2d6
TJB
3994 str = arch_ftrace_match_adjust(str, g->search);
3995
3ba00929 3996 switch (g->type) {
9f4801e3 3997 case MATCH_FULL:
3ba00929 3998 if (strcmp(str, g->search) == 0)
9f4801e3
SR
3999 matched = 1;
4000 break;
4001 case MATCH_FRONT_ONLY:
3ba00929 4002 if (strncmp(str, g->search, g->len) == 0)
9f4801e3
SR
4003 matched = 1;
4004 break;
4005 case MATCH_MIDDLE_ONLY:
3ba00929 4006 if (strstr(str, g->search))
9f4801e3
SR
4007 matched = 1;
4008 break;
4009 case MATCH_END_ONLY:
751e9983 4010 slen = strlen(str);
3ba00929
DS
4011 if (slen >= g->len &&
4012 memcmp(str + slen - g->len, g->search, g->len) == 0)
9f4801e3
SR
4013 matched = 1;
4014 break;
60f1d5e3
MH
4015 case MATCH_GLOB:
4016 if (glob_match(g->search, str))
4017 matched = 1;
4018 break;
9f4801e3
SR
4019 }
4020
4021 return matched;
4022}
4023
b448c4e3 4024static int
f0a3b154 4025enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
996e87be 4026{
b448c4e3 4027 struct ftrace_func_entry *entry;
b448c4e3
SR
4028 int ret = 0;
4029
1cf41dd7 4030 entry = ftrace_lookup_ip(hash, rec->ip);
f0a3b154 4031 if (clear_filter) {
1cf41dd7
SR
4032 /* Do nothing if it doesn't exist */
4033 if (!entry)
4034 return 0;
b448c4e3 4035
33dc9b12 4036 free_hash_entry(hash, entry);
1cf41dd7
SR
4037 } else {
4038 /* Do nothing if it exists */
4039 if (entry)
4040 return 0;
b448c4e3 4041
1cf41dd7 4042 ret = add_hash_entry(hash, rec->ip);
b448c4e3
SR
4043 }
4044 return ret;
996e87be
SR
4045}
4046
f79b3f33
SRV
4047static int
4048add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
4049 int clear_filter)
4050{
4051 long index = simple_strtoul(func_g->search, NULL, 0);
4052 struct ftrace_page *pg;
4053 struct dyn_ftrace *rec;
4054
4055 /* The index starts at 1 */
4056 if (--index < 0)
4057 return 0;
4058
4059 do_for_each_ftrace_rec(pg, rec) {
4060 if (pg->index <= index) {
4061 index -= pg->index;
4062 /* this is a double loop, break goes to the next page */
4063 break;
4064 }
4065 rec = &pg->records[index];
4066 enter_record(hash, rec, clear_filter);
4067 return 1;
4068 } while_for_each_ftrace_rec();
4069 return 0;
4070}
4071
b39181f7
SRG
4072#ifdef FTRACE_MCOUNT_MAX_OFFSET
4073static int lookup_ip(unsigned long ip, char **modname, char *str)
4074{
4075 unsigned long offset;
4076
4077 kallsyms_lookup(ip, NULL, &offset, modname, str);
4078 if (offset > FTRACE_MCOUNT_MAX_OFFSET)
4079 return -1;
4080 return 0;
4081}
4082#else
4083static int lookup_ip(unsigned long ip, char **modname, char *str)
4084{
4085 kallsyms_lookup(ip, NULL, NULL, modname, str);
4086 return 0;
4087}
4088#endif
4089
64e7c440 4090static int
0b507e1e
DS
4091ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
4092 struct ftrace_glob *mod_g, int exclude_mod)
64e7c440
SR
4093{
4094 char str[KSYM_SYMBOL_LEN];
b9df92d2
SR
4095 char *modname;
4096
b39181f7
SRG
4097 if (lookup_ip(rec->ip, &modname, str)) {
4098 /* This should only happen when a rec is disabled */
4099 WARN_ON_ONCE(system_state == SYSTEM_RUNNING &&
4100 !(rec->flags & FTRACE_FL_DISABLED));
4101 return 0;
4102 }
b9df92d2 4103
0b507e1e
DS
4104 if (mod_g) {
4105 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
4106
4107 /* blank module name to match all modules */
4108 if (!mod_g->len) {
4109 /* blank module globbing: modname xor exclude_mod */
77c0edde 4110 if (!exclude_mod != !modname)
0b507e1e
DS
4111 goto func_match;
4112 return 0;
4113 }
4114
77c0edde
SRV
4115 /*
4116 * exclude_mod is set to trace everything but the given
4117 * module. If it is set and the module matches, then
4118 * return 0. If it is not set, and the module doesn't match
4119 * also return 0. Otherwise, check the function to see if
4120 * that matches.
4121 */
4122 if (!mod_matches == !exclude_mod)
b9df92d2 4123 return 0;
0b507e1e 4124func_match:
b9df92d2 4125 /* blank search means to match all funcs in the mod */
3ba00929 4126 if (!func_g->len)
b9df92d2
SR
4127 return 1;
4128 }
64e7c440 4129
3ba00929 4130 return ftrace_match(str, func_g);
64e7c440
SR
4131}
4132
1cf41dd7 4133static int
3ba00929 4134match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
9f4801e3 4135{
9f4801e3
SR
4136 struct ftrace_page *pg;
4137 struct dyn_ftrace *rec;
3ba00929 4138 struct ftrace_glob func_g = { .type = MATCH_FULL };
0b507e1e
DS
4139 struct ftrace_glob mod_g = { .type = MATCH_FULL };
4140 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
4141 int exclude_mod = 0;
311d16da 4142 int found = 0;
b448c4e3 4143 int ret;
2e028c4f 4144 int clear_filter = 0;
9f4801e3 4145
0b507e1e 4146 if (func) {
3ba00929
DS
4147 func_g.type = filter_parse_regex(func, len, &func_g.search,
4148 &clear_filter);
4149 func_g.len = strlen(func_g.search);
b9df92d2 4150 }
9f4801e3 4151
0b507e1e
DS
4152 if (mod) {
4153 mod_g.type = filter_parse_regex(mod, strlen(mod),
4154 &mod_g.search, &exclude_mod);
4155 mod_g.len = strlen(mod_g.search);
b9df92d2 4156 }
9f4801e3 4157
52baf119 4158 mutex_lock(&ftrace_lock);
265c831c 4159
b9df92d2
SR
4160 if (unlikely(ftrace_disabled))
4161 goto out_unlock;
9f4801e3 4162
f79b3f33
SRV
4163 if (func_g.type == MATCH_INDEX) {
4164 found = add_rec_by_index(hash, &func_g, clear_filter);
4165 goto out_unlock;
4166 }
4167
265c831c 4168 do_for_each_ftrace_rec(pg, rec) {
546fece4
SRRH
4169
4170 if (rec->flags & FTRACE_FL_DISABLED)
4171 continue;
4172
0b507e1e 4173 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
f0a3b154 4174 ret = enter_record(hash, rec, clear_filter);
b448c4e3
SR
4175 if (ret < 0) {
4176 found = ret;
4177 goto out_unlock;
4178 }
311d16da 4179 found = 1;
265c831c
SR
4180 }
4181 } while_for_each_ftrace_rec();
b9df92d2 4182 out_unlock:
52baf119 4183 mutex_unlock(&ftrace_lock);
311d16da
LZ
4184
4185 return found;
5072c59f
SR
4186}
4187
64e7c440 4188static int
1cf41dd7 4189ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
64e7c440 4190{
f0a3b154 4191 return match_records(hash, buff, len, NULL);
64e7c440
SR
4192}
4193
e16b35dd
SRV
4194static void ftrace_ops_update_code(struct ftrace_ops *ops,
4195 struct ftrace_ops_hash *old_hash)
4196{
4197 struct ftrace_ops *op;
4198
4199 if (!ftrace_enabled)
4200 return;
4201
4202 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
4203 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4204 return;
4205 }
4206
4207 /*
4208 * If this is the shared global_ops filter, then we need to
4209 * check if there is another ops that shares it, is enabled.
4210 * If so, we still need to run the modify code.
4211 */
4212 if (ops->func_hash != &global_ops.local_hash)
4213 return;
4214
4215 do_for_each_ftrace_op(op, ftrace_ops_list) {
4216 if (op->func_hash == &global_ops.local_hash &&
4217 op->flags & FTRACE_OPS_FL_ENABLED) {
4218 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
4219 /* Only need to do this once */
4220 return;
4221 }
4222 } while_for_each_ftrace_op(op);
4223}
4224
4225static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
4226 struct ftrace_hash **orig_hash,
4227 struct ftrace_hash *hash,
4228 int enable)
4229{
4230 struct ftrace_ops_hash old_hash_ops;
4231 struct ftrace_hash *old_hash;
4232 int ret;
4233
4234 old_hash = *orig_hash;
4235 old_hash_ops.filter_hash = ops->func_hash->filter_hash;
4236 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
4237 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4238 if (!ret) {
4239 ftrace_ops_update_code(ops, &old_hash_ops);
4240 free_ftrace_hash_rcu(old_hash);
4241 }
4242 return ret;
4243}
64e7c440 4244
673feb9d
SRV
4245static bool module_exists(const char *module)
4246{
4247 /* All modules have the symbol __this_module */
0f5e5a3a 4248 static const char this_mod[] = "__this_module";
419e9fe5 4249 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
673feb9d
SRV
4250 unsigned long val;
4251 int n;
4252
419e9fe5 4253 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
673feb9d 4254
419e9fe5 4255 if (n > sizeof(modname) - 1)
673feb9d
SRV
4256 return false;
4257
4258 val = module_kallsyms_lookup_name(modname);
4259 return val != 0;
4260}
4261
4262static int cache_mod(struct trace_array *tr,
4263 const char *func, char *module, int enable)
4264{
4265 struct ftrace_mod_load *ftrace_mod, *n;
4266 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
4267 int ret;
4268
4269 mutex_lock(&ftrace_lock);
4270
4271 /* We do not cache inverse filters */
4272 if (func[0] == '!') {
4273 func++;
4274 ret = -EINVAL;
4275
4276 /* Look to remove this hash */
4277 list_for_each_entry_safe(ftrace_mod, n, head, list) {
4278 if (strcmp(ftrace_mod->module, module) != 0)
4279 continue;
4280
4281 /* no func matches all */
44925dff 4282 if (strcmp(func, "*") == 0 ||
673feb9d
SRV
4283 (ftrace_mod->func &&
4284 strcmp(ftrace_mod->func, func) == 0)) {
4285 ret = 0;
4286 free_ftrace_mod(ftrace_mod);
4287 continue;
4288 }
4289 }
4290 goto out;
4291 }
4292
4293 ret = -EINVAL;
4294 /* We only care about modules that have not been loaded yet */
4295 if (module_exists(module))
4296 goto out;
4297
4298 /* Save this string off, and execute it when the module is loaded */
4299 ret = ftrace_add_mod(tr, func, module, enable);
4300 out:
4301 mutex_unlock(&ftrace_lock);
4302
4303 return ret;
4304}
4305
d7fbf8df
SRV
4306static int
4307ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4308 int reset, int enable);
4309
69449bbd 4310#ifdef CONFIG_MODULES
d7fbf8df
SRV
4311static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
4312 char *mod, bool enable)
4313{
4314 struct ftrace_mod_load *ftrace_mod, *n;
4315 struct ftrace_hash **orig_hash, *new_hash;
4316 LIST_HEAD(process_mods);
4317 char *func;
d7fbf8df
SRV
4318
4319 mutex_lock(&ops->func_hash->regex_lock);
4320
4321 if (enable)
4322 orig_hash = &ops->func_hash->filter_hash;
4323 else
4324 orig_hash = &ops->func_hash->notrace_hash;
4325
4326 new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
4327 *orig_hash);
4328 if (!new_hash)
3b58a3c7 4329 goto out; /* warn? */
d7fbf8df
SRV
4330
4331 mutex_lock(&ftrace_lock);
4332
4333 list_for_each_entry_safe(ftrace_mod, n, head, list) {
4334
4335 if (strcmp(ftrace_mod->module, mod) != 0)
4336 continue;
4337
4338 if (ftrace_mod->func)
4339 func = kstrdup(ftrace_mod->func, GFP_KERNEL);
4340 else
4341 func = kstrdup("*", GFP_KERNEL);
4342
4343 if (!func) /* warn? */
4344 continue;
4345
3ecda644 4346 list_move(&ftrace_mod->list, &process_mods);
d7fbf8df
SRV
4347
4348 /* Use the newly allocated func, as it may be "*" */
4349 kfree(ftrace_mod->func);
4350 ftrace_mod->func = func;
4351 }
4352
4353 mutex_unlock(&ftrace_lock);
4354
4355 list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
4356
4357 func = ftrace_mod->func;
4358
4359 /* Grabs ftrace_lock, which is why we have this extra step */
4360 match_records(new_hash, func, strlen(func), mod);
4361 free_ftrace_mod(ftrace_mod);
4362 }
4363
8c08f0d5
SRV
4364 if (enable && list_empty(head))
4365 new_hash->flags &= ~FTRACE_HASH_FL_MOD;
4366
d7fbf8df
SRV
4367 mutex_lock(&ftrace_lock);
4368
045e269c 4369 ftrace_hash_move_and_update_ops(ops, orig_hash,
d7fbf8df
SRV
4370 new_hash, enable);
4371 mutex_unlock(&ftrace_lock);
4372
3b58a3c7 4373 out:
d7fbf8df
SRV
4374 mutex_unlock(&ops->func_hash->regex_lock);
4375
4376 free_ftrace_hash(new_hash);
4377}
4378
4379static void process_cached_mods(const char *mod_name)
4380{
4381 struct trace_array *tr;
4382 char *mod;
4383
4384 mod = kstrdup(mod_name, GFP_KERNEL);
4385 if (!mod)
4386 return;
4387
4388 mutex_lock(&trace_types_lock);
4389 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
4390 if (!list_empty(&tr->mod_trace))
4391 process_mod_list(&tr->mod_trace, tr->ops, mod, true);
4392 if (!list_empty(&tr->mod_notrace))
4393 process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
4394 }
4395 mutex_unlock(&trace_types_lock);
4396
4397 kfree(mod);
4398}
69449bbd 4399#endif
d7fbf8df 4400
f6180773
SR
4401/*
4402 * We register the module command as a template to show others how
4403 * to register the a command as well.
4404 */
4405
4406static int
04ec7bb6 4407ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
673feb9d 4408 char *func_orig, char *cmd, char *module, int enable)
f6180773 4409{
673feb9d 4410 char *func;
5e3949f0 4411 int ret;
f6180773 4412
673feb9d
SRV
4413 /* match_records() modifies func, and we need the original */
4414 func = kstrdup(func_orig, GFP_KERNEL);
4415 if (!func)
4416 return -ENOMEM;
4417
f6180773
SR
4418 /*
4419 * cmd == 'mod' because we only registered this func
4420 * for the 'mod' ftrace_func_command.
4421 * But if you register one func with multiple commands,
4422 * you can tell which command was used by the cmd
4423 * parameter.
4424 */
f0a3b154 4425 ret = match_records(hash, func, strlen(func), module);
673feb9d
SRV
4426 kfree(func);
4427
b448c4e3 4428 if (!ret)
673feb9d 4429 return cache_mod(tr, func_orig, module, enable);
b448c4e3
SR
4430 if (ret < 0)
4431 return ret;
b448c4e3 4432 return 0;
f6180773
SR
4433}
4434
4435static struct ftrace_func_command ftrace_mod_cmd = {
4436 .name = "mod",
4437 .func = ftrace_mod_callback,
4438};
4439
4440static int __init ftrace_mod_cmd_init(void)
4441{
4442 return register_ftrace_command(&ftrace_mod_cmd);
4443}
6f415672 4444core_initcall(ftrace_mod_cmd_init);
f6180773 4445
2f5f6ad9 4446static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
d19ad077 4447 struct ftrace_ops *op, struct ftrace_regs *fregs)
59df055f 4448{
eee8ded1 4449 struct ftrace_probe_ops *probe_ops;
7b60f3d8 4450 struct ftrace_func_probe *probe;
59df055f 4451
7b60f3d8
SRV
4452 probe = container_of(op, struct ftrace_func_probe, ops);
4453 probe_ops = probe->probe_ops;
59df055f
SR
4454
4455 /*
4456 * Disable preemption for these calls to prevent a RCU grace
4457 * period. This syncs the hash iteration and freeing of items
4458 * on the hash. rcu_read_lock is too dangerous here.
4459 */
5168ae50 4460 preempt_disable_notrace();
6e444319 4461 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
5168ae50 4462 preempt_enable_notrace();
59df055f
SR
4463}
4464
41794f19
SRV
4465struct ftrace_func_map {
4466 struct ftrace_func_entry entry;
4467 void *data;
59df055f
SR
4468};
4469
41794f19
SRV
4470struct ftrace_func_mapper {
4471 struct ftrace_hash hash;
4472};
59df055f 4473
41794f19
SRV
4474/**
4475 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
4476 *
4477 * Returns a ftrace_func_mapper descriptor that can be used to map ips to data.
4478 */
4479struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
59df055f 4480{
41794f19 4481 struct ftrace_hash *hash;
59df055f 4482
41794f19
SRV
4483 /*
4484 * The mapper is simply a ftrace_hash, but since the entries
4485 * in the hash are not ftrace_func_entry type, we define it
4486 * as a separate structure.
4487 */
4488 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4489 return (struct ftrace_func_mapper *)hash;
4490}
59df055f 4491
41794f19
SRV
4492/**
4493 * ftrace_func_mapper_find_ip - Find some data mapped to an ip
4494 * @mapper: The mapper that has the ip maps
4495 * @ip: the instruction pointer to find the data for
4496 *
4497 * Returns the data mapped to @ip if found otherwise NULL. The return
4498 * is actually the address of the mapper data pointer. The address is
4499 * returned for use cases where the data is no bigger than a long, and
4500 * the user can use the data pointer as its data instead of having to
4501 * allocate more memory for the reference.
4502 */
4503void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
4504 unsigned long ip)
4505{
4506 struct ftrace_func_entry *entry;
4507 struct ftrace_func_map *map;
59df055f 4508
41794f19
SRV
4509 entry = ftrace_lookup_ip(&mapper->hash, ip);
4510 if (!entry)
4511 return NULL;
b848914c 4512
41794f19
SRV
4513 map = (struct ftrace_func_map *)entry;
4514 return &map->data;
59df055f
SR
4515}
4516
41794f19
SRV
4517/**
4518 * ftrace_func_mapper_add_ip - Map some data to an ip
4519 * @mapper: The mapper that has the ip maps
4520 * @ip: The instruction pointer address to map @data to
4521 * @data: The data to map to @ip
4522 *
fdda88d3 4523 * Returns 0 on success otherwise an error.
41794f19
SRV
4524 */
4525int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
4526 unsigned long ip, void *data)
59df055f 4527{
41794f19
SRV
4528 struct ftrace_func_entry *entry;
4529 struct ftrace_func_map *map;
59df055f 4530
41794f19
SRV
4531 entry = ftrace_lookup_ip(&mapper->hash, ip);
4532 if (entry)
4533 return -EBUSY;
59df055f 4534
41794f19
SRV
4535 map = kmalloc(sizeof(*map), GFP_KERNEL);
4536 if (!map)
4537 return -ENOMEM;
59df055f 4538
41794f19
SRV
4539 map->entry.ip = ip;
4540 map->data = data;
b848914c 4541
41794f19 4542 __add_hash_entry(&mapper->hash, &map->entry);
59df055f 4543
41794f19
SRV
4544 return 0;
4545}
59df055f 4546
41794f19
SRV
4547/**
4548 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
4549 * @mapper: The mapper that has the ip maps
4550 * @ip: The instruction pointer address to remove the data from
4551 *
4552 * Returns the data if it is found, otherwise NULL.
7d54c15c 4553 * Note, if the data pointer is used as the data itself, (see
41794f19
SRV
4554 * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
4555 * if the data pointer was set to zero.
4556 */
4557void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
4558 unsigned long ip)
59df055f 4559{
41794f19
SRV
4560 struct ftrace_func_entry *entry;
4561 struct ftrace_func_map *map;
4562 void *data;
4563
4564 entry = ftrace_lookup_ip(&mapper->hash, ip);
4565 if (!entry)
4566 return NULL;
4567
4568 map = (struct ftrace_func_map *)entry;
4569 data = map->data;
4570
4571 remove_hash_entry(&mapper->hash, entry);
59df055f 4572 kfree(entry);
41794f19
SRV
4573
4574 return data;
4575}
4576
4577/**
4578 * free_ftrace_func_mapper - free a mapping of ips and data
4579 * @mapper: The mapper that has the ip maps
4580 * @free_func: A function to be called on each data item.
4581 *
4582 * This is used to free the function mapper. The @free_func is optional
4583 * and can be used if the data needs to be freed as well.
4584 */
4585void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
4586 ftrace_mapper_func free_func)
4587{
4588 struct ftrace_func_entry *entry;
4589 struct ftrace_func_map *map;
4590 struct hlist_head *hhd;
04e03d9a
WL
4591 int size, i;
4592
4593 if (!mapper)
4594 return;
41794f19
SRV
4595
4596 if (free_func && mapper->hash.count) {
04e03d9a 4597 size = 1 << mapper->hash.size_bits;
41794f19
SRV
4598 for (i = 0; i < size; i++) {
4599 hhd = &mapper->hash.buckets[i];
4600 hlist_for_each_entry(entry, hhd, hlist) {
4601 map = (struct ftrace_func_map *)entry;
4602 free_func(map);
4603 }
4604 }
4605 }
4606 free_ftrace_hash(&mapper->hash);
4607}
4608
7b60f3d8
SRV
4609static void release_probe(struct ftrace_func_probe *probe)
4610{
4611 struct ftrace_probe_ops *probe_ops;
4612
4613 mutex_lock(&ftrace_lock);
4614
4615 WARN_ON(probe->ref <= 0);
4616
4617 /* Subtract the ref that was used to protect this instance */
4618 probe->ref--;
4619
4620 if (!probe->ref) {
4621 probe_ops = probe->probe_ops;
6e444319
SRV
4622 /*
4623 * Sending zero as ip tells probe_ops to free
4624 * the probe->data itself
4625 */
4626 if (probe_ops->free)
4627 probe_ops->free(probe_ops, probe->tr, 0, probe->data);
7b60f3d8
SRV
4628 list_del(&probe->list);
4629 kfree(probe);
4630 }
4631 mutex_unlock(&ftrace_lock);
4632}
4633
4634static void acquire_probe_locked(struct ftrace_func_probe *probe)
4635{
4636 /*
4637 * Add one ref to keep it from being freed when releasing the
4638 * ftrace_lock mutex.
4639 */
4640 probe->ref++;
59df055f
SR
4641}
4642
59df055f 4643int
04ec7bb6 4644register_ftrace_function_probe(char *glob, struct trace_array *tr,
7b60f3d8
SRV
4645 struct ftrace_probe_ops *probe_ops,
4646 void *data)
59df055f 4647{
ba27d855 4648 struct ftrace_func_probe *probe = NULL, *iter;
1ec3a81a 4649 struct ftrace_func_entry *entry;
1ec3a81a
SRV
4650 struct ftrace_hash **orig_hash;
4651 struct ftrace_hash *old_hash;
e1df4cb6 4652 struct ftrace_hash *hash;
59df055f 4653 int count = 0;
1ec3a81a 4654 int size;
e1df4cb6 4655 int ret;
1ec3a81a 4656 int i;
59df055f 4657
04ec7bb6 4658 if (WARN_ON(!tr))
59df055f
SR
4659 return -EINVAL;
4660
1ec3a81a
SRV
4661 /* We do not support '!' for function probes */
4662 if (WARN_ON(glob[0] == '!'))
59df055f 4663 return -EINVAL;
59df055f 4664
7485058e 4665
7b60f3d8
SRV
4666 mutex_lock(&ftrace_lock);
4667 /* Check if the probe_ops is already registered */
ba27d855
JK
4668 list_for_each_entry(iter, &tr->func_probes, list) {
4669 if (iter->probe_ops == probe_ops) {
4670 probe = iter;
7b60f3d8 4671 break;
ba27d855 4672 }
e1df4cb6 4673 }
ba27d855 4674 if (!probe) {
7b60f3d8
SRV
4675 probe = kzalloc(sizeof(*probe), GFP_KERNEL);
4676 if (!probe) {
4677 mutex_unlock(&ftrace_lock);
4678 return -ENOMEM;
4679 }
4680 probe->probe_ops = probe_ops;
4681 probe->ops.func = function_trace_probe_call;
4682 probe->tr = tr;
4683 ftrace_ops_init(&probe->ops);
4684 list_add(&probe->list, &tr->func_probes);
e1df4cb6 4685 }
59df055f 4686
7b60f3d8 4687 acquire_probe_locked(probe);
5ae0bf59 4688
7b60f3d8 4689 mutex_unlock(&ftrace_lock);
59df055f 4690
372e0d01
SRV
4691 /*
4692 * Note, there's a small window here that the func_hash->filter_hash
fdda88d3 4693 * may be NULL or empty. Need to be careful when reading the loop.
372e0d01 4694 */
7b60f3d8 4695 mutex_lock(&probe->ops.func_hash->regex_lock);
546fece4 4696
7b60f3d8 4697 orig_hash = &probe->ops.func_hash->filter_hash;
1ec3a81a
SRV
4698 old_hash = *orig_hash;
4699 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
59df055f 4700
5b0022dd
NR
4701 if (!hash) {
4702 ret = -ENOMEM;
4703 goto out;
4704 }
4705
1ec3a81a 4706 ret = ftrace_match_records(hash, glob, strlen(glob));
59df055f 4707
1ec3a81a
SRV
4708 /* Nothing found? */
4709 if (!ret)
4710 ret = -EINVAL;
59df055f 4711
1ec3a81a
SRV
4712 if (ret < 0)
4713 goto out;
59df055f 4714
1ec3a81a
SRV
4715 size = 1 << hash->size_bits;
4716 for (i = 0; i < size; i++) {
4717 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4718 if (ftrace_lookup_ip(old_hash, entry->ip))
59df055f 4719 continue;
1ec3a81a
SRV
4720 /*
4721 * The caller might want to do something special
4722 * for each function we find. We call the callback
4723 * to give the caller an opportunity to do so.
4724 */
7b60f3d8
SRV
4725 if (probe_ops->init) {
4726 ret = probe_ops->init(probe_ops, tr,
6e444319
SRV
4727 entry->ip, data,
4728 &probe->data);
4729 if (ret < 0) {
4730 if (probe_ops->free && count)
4731 probe_ops->free(probe_ops, tr,
4732 0, probe->data);
4733 probe->data = NULL;
eee8ded1 4734 goto out;
6e444319 4735 }
59df055f 4736 }
1ec3a81a 4737 count++;
59df055f 4738 }
1ec3a81a 4739 }
59df055f 4740
1ec3a81a 4741 mutex_lock(&ftrace_lock);
59df055f 4742
7b60f3d8
SRV
4743 if (!count) {
4744 /* Nothing was added? */
4745 ret = -EINVAL;
4746 goto out_unlock;
4747 }
e1df4cb6 4748
7b60f3d8
SRV
4749 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4750 hash, 1);
1ec3a81a 4751 if (ret < 0)
8d70725e 4752 goto err_unlock;
8252ecf3 4753
7b60f3d8
SRV
4754 /* One ref for each new function traced */
4755 probe->ref += count;
8252ecf3 4756
7b60f3d8
SRV
4757 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
4758 ret = ftrace_startup(&probe->ops, 0);
e1df4cb6 4759
59df055f 4760 out_unlock:
5ae0bf59 4761 mutex_unlock(&ftrace_lock);
8252ecf3 4762
3296fc4e 4763 if (!ret)
1ec3a81a 4764 ret = count;
5ae0bf59 4765 out:
7b60f3d8 4766 mutex_unlock(&probe->ops.func_hash->regex_lock);
e1df4cb6 4767 free_ftrace_hash(hash);
59df055f 4768
7b60f3d8 4769 release_probe(probe);
59df055f 4770
1ec3a81a 4771 return ret;
59df055f 4772
8d70725e 4773 err_unlock:
7b60f3d8 4774 if (!probe_ops->free || !count)
8d70725e
SRV
4775 goto out_unlock;
4776
4777 /* Failed to do the move, need to call the free functions */
4778 for (i = 0; i < size; i++) {
4779 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4780 if (ftrace_lookup_ip(old_hash, entry->ip))
4781 continue;
6e444319 4782 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
8d70725e
SRV
4783 }
4784 }
4785 goto out_unlock;
59df055f
SR
4786}
4787
d3d532d7 4788int
7b60f3d8
SRV
4789unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4790 struct ftrace_probe_ops *probe_ops)
59df055f 4791{
ba27d855 4792 struct ftrace_func_probe *probe = NULL, *iter;
82cc4fc2 4793 struct ftrace_ops_hash old_hash_ops;
eee8ded1 4794 struct ftrace_func_entry *entry;
3ba00929 4795 struct ftrace_glob func_g;
1ec3a81a
SRV
4796 struct ftrace_hash **orig_hash;
4797 struct ftrace_hash *old_hash;
1ec3a81a 4798 struct ftrace_hash *hash = NULL;
b67bfe0d 4799 struct hlist_node *tmp;
eee8ded1 4800 struct hlist_head hhd;
59df055f 4801 char str[KSYM_SYMBOL_LEN];
7b60f3d8
SRV
4802 int count = 0;
4803 int i, ret = -ENODEV;
eee8ded1 4804 int size;
59df055f 4805
cbab567c 4806 if (!glob || !strlen(glob) || !strcmp(glob, "*"))
3ba00929 4807 func_g.search = NULL;
cbab567c 4808 else {
59df055f
SR
4809 int not;
4810
3ba00929
DS
4811 func_g.type = filter_parse_regex(glob, strlen(glob),
4812 &func_g.search, &not);
4813 func_g.len = strlen(func_g.search);
59df055f 4814
b6887d79 4815 /* we do not support '!' for function probes */
59df055f 4816 if (WARN_ON(not))
d3d532d7 4817 return -EINVAL;
59df055f
SR
4818 }
4819
7b60f3d8
SRV
4820 mutex_lock(&ftrace_lock);
4821 /* Check if the probe_ops is already registered */
ba27d855
JK
4822 list_for_each_entry(iter, &tr->func_probes, list) {
4823 if (iter->probe_ops == probe_ops) {
4824 probe = iter;
7b60f3d8 4825 break;
ba27d855 4826 }
59df055f 4827 }
ba27d855 4828 if (!probe)
7b60f3d8
SRV
4829 goto err_unlock_ftrace;
4830
4831 ret = -EINVAL;
4832 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4833 goto err_unlock_ftrace;
4834
4835 acquire_probe_locked(probe);
4836
4837 mutex_unlock(&ftrace_lock);
59df055f 4838
7b60f3d8 4839 mutex_lock(&probe->ops.func_hash->regex_lock);
1ec3a81a 4840
7b60f3d8 4841 orig_hash = &probe->ops.func_hash->filter_hash;
1ec3a81a
SRV
4842 old_hash = *orig_hash;
4843
1ec3a81a
SRV
4844 if (ftrace_hash_empty(old_hash))
4845 goto out_unlock;
e1df4cb6 4846
82cc4fc2
SRV
4847 old_hash_ops.filter_hash = old_hash;
4848 /* Probes only have filters */
4849 old_hash_ops.notrace_hash = NULL;
4850
d3d532d7 4851 ret = -ENOMEM;
1ec3a81a 4852 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
e1df4cb6 4853 if (!hash)
e1df4cb6
SRRH
4854 goto out_unlock;
4855
eee8ded1 4856 INIT_HLIST_HEAD(&hhd);
59df055f 4857
eee8ded1
SRV
4858 size = 1 << hash->size_bits;
4859 for (i = 0; i < size; i++) {
4860 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
59df055f 4861
3ba00929 4862 if (func_g.search) {
59df055f
SR
4863 kallsyms_lookup(entry->ip, NULL, NULL,
4864 NULL, str);
3ba00929 4865 if (!ftrace_match(str, &func_g))
59df055f
SR
4866 continue;
4867 }
7b60f3d8 4868 count++;
eee8ded1
SRV
4869 remove_hash_entry(hash, entry);
4870 hlist_add_head(&entry->hlist, &hhd);
59df055f
SR
4871 }
4872 }
d3d532d7
SRV
4873
4874 /* Nothing found? */
7b60f3d8 4875 if (!count) {
d3d532d7
SRV
4876 ret = -EINVAL;
4877 goto out_unlock;
4878 }
4879
3f2367ba 4880 mutex_lock(&ftrace_lock);
1ec3a81a 4881
7b60f3d8 4882 WARN_ON(probe->ref < count);
eee8ded1 4883
7b60f3d8 4884 probe->ref -= count;
1ec3a81a 4885
7b60f3d8
SRV
4886 if (ftrace_hash_empty(hash))
4887 ftrace_shutdown(&probe->ops, 0);
4888
4889 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
1ec3a81a 4890 hash, 1);
82cc4fc2
SRV
4891
4892 /* still need to update the function call sites */
1ec3a81a 4893 if (ftrace_enabled && !ftrace_hash_empty(hash))
7b60f3d8 4894 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
82cc4fc2 4895 &old_hash_ops);
74401729 4896 synchronize_rcu();
3296fc4e 4897
eee8ded1
SRV
4898 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
4899 hlist_del(&entry->hlist);
7b60f3d8 4900 if (probe_ops->free)
6e444319 4901 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
eee8ded1 4902 kfree(entry);
7818b388 4903 }
3f2367ba 4904 mutex_unlock(&ftrace_lock);
3ba00929 4905
e1df4cb6 4906 out_unlock:
7b60f3d8 4907 mutex_unlock(&probe->ops.func_hash->regex_lock);
e1df4cb6 4908 free_ftrace_hash(hash);
59df055f 4909
7b60f3d8 4910 release_probe(probe);
59df055f 4911
7b60f3d8 4912 return ret;
59df055f 4913
7b60f3d8
SRV
4914 err_unlock_ftrace:
4915 mutex_unlock(&ftrace_lock);
d3d532d7 4916 return ret;
59df055f
SR
4917}
4918
a0e6369e
NR
4919void clear_ftrace_function_probes(struct trace_array *tr)
4920{
4921 struct ftrace_func_probe *probe, *n;
4922
4923 list_for_each_entry_safe(probe, n, &tr->func_probes, list)
4924 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
4925}
4926
f6180773
SR
4927static LIST_HEAD(ftrace_commands);
4928static DEFINE_MUTEX(ftrace_cmd_mutex);
4929
38de93ab
TZ
4930/*
4931 * Currently we only register ftrace commands from __init, so mark this
4932 * __init too.
4933 */
4934__init int register_ftrace_command(struct ftrace_func_command *cmd)
f6180773
SR
4935{
4936 struct ftrace_func_command *p;
4937 int ret = 0;
4938
4939 mutex_lock(&ftrace_cmd_mutex);
4940 list_for_each_entry(p, &ftrace_commands, list) {
4941 if (strcmp(cmd->name, p->name) == 0) {
4942 ret = -EBUSY;
4943 goto out_unlock;
4944 }
4945 }
4946 list_add(&cmd->list, &ftrace_commands);
4947 out_unlock:
4948 mutex_unlock(&ftrace_cmd_mutex);
4949
4950 return ret;
4951}
4952
38de93ab
TZ
4953/*
4954 * Currently we only unregister ftrace commands from __init, so mark
4955 * this __init too.
4956 */
4957__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
f6180773
SR
4958{
4959 struct ftrace_func_command *p, *n;
4960 int ret = -ENODEV;
4961
4962 mutex_lock(&ftrace_cmd_mutex);
4963 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
4964 if (strcmp(cmd->name, p->name) == 0) {
4965 ret = 0;
4966 list_del_init(&p->list);
4967 goto out_unlock;
4968 }
4969 }
4970 out_unlock:
4971 mutex_unlock(&ftrace_cmd_mutex);
4972
4973 return ret;
4974}
4975
04ec7bb6 4976static int ftrace_process_regex(struct ftrace_iterator *iter,
33dc9b12 4977 char *buff, int len, int enable)
64e7c440 4978{
04ec7bb6 4979 struct ftrace_hash *hash = iter->hash;
d2afd57a 4980 struct trace_array *tr = iter->ops->private;
f6180773 4981 char *func, *command, *next = buff;
6a24a244 4982 struct ftrace_func_command *p;
0aff1c0c 4983 int ret = -EINVAL;
64e7c440
SR
4984
4985 func = strsep(&next, ":");
4986
4987 if (!next) {
1cf41dd7 4988 ret = ftrace_match_records(hash, func, len);
b448c4e3
SR
4989 if (!ret)
4990 ret = -EINVAL;
4991 if (ret < 0)
4992 return ret;
4993 return 0;
64e7c440
SR
4994 }
4995
f6180773 4996 /* command found */
64e7c440
SR
4997
4998 command = strsep(&next, ":");
4999
f6180773
SR
5000 mutex_lock(&ftrace_cmd_mutex);
5001 list_for_each_entry(p, &ftrace_commands, list) {
5002 if (strcmp(p->name, command) == 0) {
04ec7bb6 5003 ret = p->func(tr, hash, func, command, next, enable);
f6180773
SR
5004 goto out_unlock;
5005 }
64e7c440 5006 }
f6180773
SR
5007 out_unlock:
5008 mutex_unlock(&ftrace_cmd_mutex);
64e7c440 5009
f6180773 5010 return ret;
64e7c440
SR
5011}
5012
e309b41d 5013static ssize_t
41c52c0d
SR
5014ftrace_regex_write(struct file *file, const char __user *ubuf,
5015 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
5016{
5017 struct ftrace_iterator *iter;
689fd8b6 5018 struct trace_parser *parser;
5019 ssize_t ret, read;
5072c59f 5020
4ba7978e 5021 if (!cnt)
5072c59f
SR
5022 return 0;
5023
5072c59f
SR
5024 if (file->f_mode & FMODE_READ) {
5025 struct seq_file *m = file->private_data;
5026 iter = m->private;
5027 } else
5028 iter = file->private_data;
5029
f04f24fb 5030 if (unlikely(ftrace_disabled))
3f2367ba
MH
5031 return -ENODEV;
5032
5033 /* iter->hash is a local copy, so we don't need regex_lock */
f04f24fb 5034
689fd8b6 5035 parser = &iter->parser;
5036 read = trace_get_user(parser, ubuf, cnt, ppos);
5072c59f 5037
4ba7978e 5038 if (read >= 0 && trace_parser_loaded(parser) &&
689fd8b6 5039 !trace_parser_cont(parser)) {
04ec7bb6 5040 ret = ftrace_process_regex(iter, parser->buffer,
689fd8b6 5041 parser->idx, enable);
313254a9 5042 trace_parser_clear(parser);
7c088b51 5043 if (ret < 0)
3f2367ba 5044 goto out;
eda1e328 5045 }
5072c59f 5046
5072c59f 5047 ret = read;
3f2367ba 5048 out:
5072c59f
SR
5049 return ret;
5050}
5051
fc13cb0c 5052ssize_t
41c52c0d
SR
5053ftrace_filter_write(struct file *file, const char __user *ubuf,
5054 size_t cnt, loff_t *ppos)
5055{
5056 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
5057}
5058
fc13cb0c 5059ssize_t
41c52c0d
SR
5060ftrace_notrace_write(struct file *file, const char __user *ubuf,
5061 size_t cnt, loff_t *ppos)
5062{
5063 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
5064}
5065
33dc9b12 5066static int
4f554e95 5067__ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
647664ea
MH
5068{
5069 struct ftrace_func_entry *entry;
5070
aebfd125
PZ
5071 ip = ftrace_location(ip);
5072 if (!ip)
647664ea
MH
5073 return -EINVAL;
5074
5075 if (remove) {
5076 entry = ftrace_lookup_ip(hash, ip);
5077 if (!entry)
5078 return -ENOENT;
5079 free_hash_entry(hash, entry);
5080 return 0;
5081 }
5082
5083 return add_hash_entry(hash, ip);
5084}
5085
4f554e95
JO
5086static int
5087ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips,
5088 unsigned int cnt, int remove)
5089{
5090 unsigned int i;
5091 int err;
5092
5093 for (i = 0; i < cnt; i++) {
5094 err = __ftrace_match_addr(hash, ips[i], remove);
5095 if (err) {
5096 /*
5097 * This expects the @hash is a temporary hash and if this
5098 * fails the caller must free the @hash.
5099 */
5100 return err;
5101 }
5102 }
5103 return 0;
5104}
5105
647664ea
MH
5106static int
5107ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4f554e95
JO
5108 unsigned long *ips, unsigned int cnt,
5109 int remove, int reset, int enable)
41c52c0d 5110{
33dc9b12 5111 struct ftrace_hash **orig_hash;
f45948e8 5112 struct ftrace_hash *hash;
33dc9b12 5113 int ret;
f45948e8 5114
41c52c0d 5115 if (unlikely(ftrace_disabled))
33dc9b12 5116 return -ENODEV;
41c52c0d 5117
33b7f99c 5118 mutex_lock(&ops->func_hash->regex_lock);
3f2367ba 5119
f45948e8 5120 if (enable)
33b7f99c 5121 orig_hash = &ops->func_hash->filter_hash;
f45948e8 5122 else
33b7f99c 5123 orig_hash = &ops->func_hash->notrace_hash;
33dc9b12 5124
b972cc58
WN
5125 if (reset)
5126 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5127 else
5128 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
5129
3f2367ba
MH
5130 if (!hash) {
5131 ret = -ENOMEM;
5132 goto out_regex_unlock;
5133 }
f45948e8 5134
ac483c44
JO
5135 if (buf && !ftrace_match_records(hash, buf, len)) {
5136 ret = -EINVAL;
5137 goto out_regex_unlock;
5138 }
4f554e95
JO
5139 if (ips) {
5140 ret = ftrace_match_addr(hash, ips, cnt, remove);
647664ea
MH
5141 if (ret < 0)
5142 goto out_regex_unlock;
5143 }
33dc9b12
SR
5144
5145 mutex_lock(&ftrace_lock);
e16b35dd 5146 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
33dc9b12
SR
5147 mutex_unlock(&ftrace_lock);
5148
ac483c44 5149 out_regex_unlock:
33b7f99c 5150 mutex_unlock(&ops->func_hash->regex_lock);
33dc9b12
SR
5151
5152 free_ftrace_hash(hash);
5153 return ret;
41c52c0d
SR
5154}
5155
647664ea 5156static int
4f554e95
JO
5157ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt,
5158 int remove, int reset, int enable)
647664ea 5159{
4f554e95 5160 return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable);
647664ea
MH
5161}
5162
763e34e7 5163#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
013bf0da
SRV
5164
5165struct ftrace_direct_func {
5166 struct list_head next;
5167 unsigned long addr;
5168 int count;
5169};
5170
5171static LIST_HEAD(ftrace_direct_funcs);
5172
5173/**
5174 * ftrace_find_direct_func - test an address if it is a registered direct caller
5175 * @addr: The address of a registered direct caller
5176 *
5177 * This searches to see if a ftrace direct caller has been registered
5178 * at a specific address, and if so, it returns a descriptor for it.
5179 *
5180 * This can be used by architecture code to see if an address is
5181 * a direct caller (trampoline) attached to a fentry/mcount location.
5182 * This is useful for the function_graph tracer, as it may need to
5183 * do adjustments if it traced a location that also has a direct
5184 * trampoline attached to it.
5185 */
5186struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
5187{
5188 struct ftrace_direct_func *entry;
5189 bool found = false;
5190
5191 /* May be called by fgraph trampoline (protected by rcu tasks) */
5192 list_for_each_entry_rcu(entry, &ftrace_direct_funcs, next) {
5193 if (entry->addr == addr) {
5194 found = true;
5195 break;
5196 }
5197 }
5198 if (found)
5199 return entry;
5200
5201 return NULL;
5202}
5203
8a141dd7
AS
5204static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr)
5205{
5206 struct ftrace_direct_func *direct;
5207
5208 direct = kmalloc(sizeof(*direct), GFP_KERNEL);
5209 if (!direct)
5210 return NULL;
5211 direct->addr = addr;
5212 direct->count = 0;
5213 list_add_rcu(&direct->next, &ftrace_direct_funcs);
5214 ftrace_direct_func_count++;
5215 return direct;
5216}
5217
763e34e7
SRV
5218/**
5219 * register_ftrace_direct - Call a custom trampoline directly
5220 * @ip: The address of the nop at the beginning of a function
5221 * @addr: The address of the trampoline to call at @ip
5222 *
5223 * This is used to connect a direct call from the nop location (@ip)
5224 * at the start of ftrace traced functions. The location that it calls
5225 * (@addr) must be able to handle a direct call, and save the parameters
5226 * of the function being traced, and restore them (or inject new ones
5227 * if needed), before returning.
5228 *
5229 * Returns:
5230 * 0 on success
5231 * -EBUSY - Another direct function is already attached (there can be only one)
5232 * -ENODEV - @ip does not point to a ftrace nop location (or not supported)
5233 * -ENOMEM - There was an allocation failure.
5234 */
5235int register_ftrace_direct(unsigned long ip, unsigned long addr)
5236{
013bf0da 5237 struct ftrace_direct_func *direct;
763e34e7
SRV
5238 struct ftrace_func_entry *entry;
5239 struct ftrace_hash *free_hash = NULL;
5240 struct dyn_ftrace *rec;
aebfd125 5241 int ret = -ENODEV;
763e34e7
SRV
5242
5243 mutex_lock(&direct_mutex);
5244
aebfd125
PZ
5245 ip = ftrace_location(ip);
5246 if (!ip)
5247 goto out_unlock;
5248
763e34e7 5249 /* See if there's a direct function at @ip already */
aebfd125 5250 ret = -EBUSY;
ff205766 5251 if (ftrace_find_rec_direct(ip))
763e34e7
SRV
5252 goto out_unlock;
5253
5254 ret = -ENODEV;
5255 rec = lookup_rec(ip, ip);
5256 if (!rec)
5257 goto out_unlock;
5258
5259 /*
5260 * Check if the rec says it has a direct call but we didn't
5261 * find one earlier?
5262 */
5263 if (WARN_ON(rec->flags & FTRACE_FL_DIRECT))
5264 goto out_unlock;
5265
5266 /* Make sure the ip points to the exact record */
406acdd3
SRV
5267 if (ip != rec->ip) {
5268 ip = rec->ip;
5269 /* Need to check this ip for a direct. */
ff205766 5270 if (ftrace_find_rec_direct(ip))
406acdd3
SRV
5271 goto out_unlock;
5272 }
763e34e7
SRV
5273
5274 ret = -ENOMEM;
013bf0da
SRV
5275 direct = ftrace_find_direct_func(addr);
5276 if (!direct) {
8a141dd7 5277 direct = ftrace_alloc_direct_func(addr);
1904a814 5278 if (!direct)
013bf0da 5279 goto out_unlock;
013bf0da
SRV
5280 }
5281
1904a814
JO
5282 entry = ftrace_add_rec_direct(ip, addr, &free_hash);
5283 if (!entry)
5284 goto out_unlock;
763e34e7
SRV
5285
5286 ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0);
763e34e7
SRV
5287
5288 if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) {
5289 ret = register_ftrace_function(&direct_ops);
5290 if (ret)
5291 ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
5292 }
5293
013bf0da 5294 if (ret) {
7d54c15c 5295 remove_hash_entry(direct_functions, entry);
763e34e7 5296 kfree(entry);
013bf0da
SRV
5297 if (!direct->count) {
5298 list_del_rcu(&direct->next);
5299 synchronize_rcu_tasks();
5300 kfree(direct);
5301 if (free_hash)
5302 free_ftrace_hash(free_hash);
5303 free_hash = NULL;
a3ad1a7e 5304 ftrace_direct_func_count--;
013bf0da
SRV
5305 }
5306 } else {
1c7f9b67 5307 direct->count++;
013bf0da 5308 }
763e34e7
SRV
5309 out_unlock:
5310 mutex_unlock(&direct_mutex);
5311
5312 if (free_hash) {
5313 synchronize_rcu_tasks();
5314 free_ftrace_hash(free_hash);
5315 }
5316
5317 return ret;
5318}
5319EXPORT_SYMBOL_GPL(register_ftrace_direct);
5320
ea806eb3
SRV
5321static struct ftrace_func_entry *find_direct_entry(unsigned long *ip,
5322 struct dyn_ftrace **recp)
763e34e7
SRV
5323{
5324 struct ftrace_func_entry *entry;
5325 struct dyn_ftrace *rec;
763e34e7 5326
128161f4
SRV
5327 rec = lookup_rec(*ip, *ip);
5328 if (!rec)
5329 return NULL;
763e34e7 5330
128161f4 5331 entry = __ftrace_lookup_ip(direct_functions, rec->ip);
763e34e7 5332 if (!entry) {
128161f4
SRV
5333 WARN_ON(rec->flags & FTRACE_FL_DIRECT);
5334 return NULL;
5335 }
763e34e7 5336
128161f4 5337 WARN_ON(!(rec->flags & FTRACE_FL_DIRECT));
763e34e7 5338
128161f4
SRV
5339 /* Passed in ip just needs to be on the call site */
5340 *ip = rec->ip;
5341
ea806eb3
SRV
5342 if (recp)
5343 *recp = rec;
5344
128161f4
SRV
5345 return entry;
5346}
5347
5348int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
5349{
5350 struct ftrace_direct_func *direct;
5351 struct ftrace_func_entry *entry;
7d5b7cad 5352 struct ftrace_hash *hash;
128161f4
SRV
5353 int ret = -ENODEV;
5354
5355 mutex_lock(&direct_mutex);
5356
aebfd125
PZ
5357 ip = ftrace_location(ip);
5358 if (!ip)
5359 goto out_unlock;
5360
ea806eb3 5361 entry = find_direct_entry(&ip, NULL);
128161f4
SRV
5362 if (!entry)
5363 goto out_unlock;
763e34e7 5364
7d5b7cad
JO
5365 hash = direct_ops.func_hash->filter_hash;
5366 if (hash->count == 1)
763e34e7
SRV
5367 unregister_ftrace_function(&direct_ops);
5368
5369 ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
5370
5371 WARN_ON(ret);
5372
5373 remove_hash_entry(direct_functions, entry);
5374
013bf0da
SRV
5375 direct = ftrace_find_direct_func(addr);
5376 if (!WARN_ON(!direct)) {
5377 /* This is the good path (see the ! before WARN) */
5378 direct->count--;
5379 WARN_ON(direct->count < 0);
5380 if (!direct->count) {
5381 list_del_rcu(&direct->next);
5382 synchronize_rcu_tasks();
5383 kfree(direct);
353da879 5384 kfree(entry);
a3ad1a7e 5385 ftrace_direct_func_count--;
013bf0da
SRV
5386 }
5387 }
763e34e7
SRV
5388 out_unlock:
5389 mutex_unlock(&direct_mutex);
5390
5391 return ret;
5392}
5393EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
0567d680
SRV
5394
5395static struct ftrace_ops stub_ops = {
5396 .func = ftrace_stub,
5397};
5398
ea806eb3
SRV
5399/**
5400 * ftrace_modify_direct_caller - modify ftrace nop directly
5401 * @entry: The ftrace hash entry of the direct helper for @rec
5402 * @rec: The record representing the function site to patch
5403 * @old_addr: The location that the site at @rec->ip currently calls
5404 * @new_addr: The location that the site at @rec->ip should call
5405 *
5406 * An architecture may overwrite this function to optimize the
5407 * changing of the direct callback on an ftrace nop location.
5408 * This is called with the ftrace_lock mutex held, and no other
5409 * ftrace callbacks are on the associated record (@rec). Thus,
5410 * it is safe to modify the ftrace record, where it should be
5411 * currently calling @old_addr directly, to call @new_addr.
5412 *
5413 * Safety checks should be made to make sure that the code at
5414 * @rec->ip is currently calling @old_addr. And this must
5415 * also update entry->direct to @new_addr.
5416 */
5417int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
5418 struct dyn_ftrace *rec,
5419 unsigned long old_addr,
5420 unsigned long new_addr)
5421{
5422 unsigned long ip = rec->ip;
5423 int ret;
5424
5425 /*
5426 * The ftrace_lock was used to determine if the record
5427 * had more than one registered user to it. If it did,
5428 * we needed to prevent that from changing to do the quick
5429 * switch. But if it did not (only a direct caller was attached)
5430 * then this function is called. But this function can deal
5431 * with attached callers to the rec that we care about, and
5432 * since this function uses standard ftrace calls that take
5433 * the ftrace_lock mutex, we need to release it.
5434 */
5435 mutex_unlock(&ftrace_lock);
5436
5437 /*
5438 * By setting a stub function at the same address, we force
5439 * the code to call the iterator and the direct_ops helper.
5440 * This means that @ip does not call the direct call, and
5441 * we can simply modify it.
5442 */
5443 ret = ftrace_set_filter_ip(&stub_ops, ip, 0, 0);
5444 if (ret)
5445 goto out_lock;
5446
5447 ret = register_ftrace_function(&stub_ops);
5448 if (ret) {
5449 ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
5450 goto out_lock;
5451 }
5452
5453 entry->direct = new_addr;
5454
5455 /*
5456 * By removing the stub, we put back the direct call, calling
5457 * the @new_addr.
5458 */
5459 unregister_ftrace_function(&stub_ops);
5460 ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
5461
5462 out_lock:
5463 mutex_lock(&ftrace_lock);
5464
5465 return ret;
5466}
5467
0567d680
SRV
5468/**
5469 * modify_ftrace_direct - Modify an existing direct call to call something else
5470 * @ip: The instruction pointer to modify
5471 * @old_addr: The address that the current @ip calls directly
5472 * @new_addr: The address that the @ip should call
5473 *
5474 * This modifies a ftrace direct caller at an instruction pointer without
5475 * having to disable it first. The direct call will switch over to the
5476 * @new_addr without missing anything.
5477 *
5478 * Returns: zero on success. Non zero on error, which includes:
5479 * -ENODEV : the @ip given has no direct caller attached
5480 * -EINVAL : the @old_addr does not match the current direct caller
5481 */
5482int modify_ftrace_direct(unsigned long ip,
5483 unsigned long old_addr, unsigned long new_addr)
5484{
8a141dd7 5485 struct ftrace_direct_func *direct, *new_direct = NULL;
0567d680 5486 struct ftrace_func_entry *entry;
ea806eb3 5487 struct dyn_ftrace *rec;
0567d680
SRV
5488 int ret = -ENODEV;
5489
5490 mutex_lock(&direct_mutex);
0567d680 5491
ea806eb3 5492 mutex_lock(&ftrace_lock);
aebfd125
PZ
5493
5494 ip = ftrace_location(ip);
5495 if (!ip)
5496 goto out_unlock;
5497
ea806eb3 5498 entry = find_direct_entry(&ip, &rec);
128161f4
SRV
5499 if (!entry)
5500 goto out_unlock;
0567d680
SRV
5501
5502 ret = -EINVAL;
5503 if (entry->direct != old_addr)
5504 goto out_unlock;
5505
8a141dd7
AS
5506 direct = ftrace_find_direct_func(old_addr);
5507 if (WARN_ON(!direct))
5508 goto out_unlock;
5509 if (direct->count > 1) {
5510 ret = -ENOMEM;
5511 new_direct = ftrace_alloc_direct_func(new_addr);
5512 if (!new_direct)
5513 goto out_unlock;
5514 direct->count--;
5515 new_direct->count++;
5516 } else {
5517 direct->addr = new_addr;
5518 }
5519
0567d680 5520 /*
ea806eb3
SRV
5521 * If there's no other ftrace callback on the rec->ip location,
5522 * then it can be changed directly by the architecture.
5523 * If there is another caller, then we just need to change the
5524 * direct caller helper to point to @new_addr.
0567d680 5525 */
ea806eb3
SRV
5526 if (ftrace_rec_count(rec) == 1) {
5527 ret = ftrace_modify_direct_caller(entry, rec, old_addr, new_addr);
5528 } else {
5529 entry->direct = new_addr;
5530 ret = 0;
0567d680
SRV
5531 }
5532
8a141dd7
AS
5533 if (unlikely(ret && new_direct)) {
5534 direct->count++;
5535 list_del_rcu(&new_direct->next);
5536 synchronize_rcu_tasks();
5537 kfree(new_direct);
5538 ftrace_direct_func_count--;
5539 }
5540
0567d680 5541 out_unlock:
ea806eb3 5542 mutex_unlock(&ftrace_lock);
0567d680
SRV
5543 mutex_unlock(&direct_mutex);
5544 return ret;
5545}
5546EXPORT_SYMBOL_GPL(modify_ftrace_direct);
f64dd462
JO
5547
5548#define MULTI_FLAGS (FTRACE_OPS_FL_IPMODIFY | FTRACE_OPS_FL_DIRECT | \
5549 FTRACE_OPS_FL_SAVE_REGS)
5550
5551static int check_direct_multi(struct ftrace_ops *ops)
5552{
5553 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5554 return -EINVAL;
5555 if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS)
5556 return -EINVAL;
5557 return 0;
5558}
5559
5560static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr)
5561{
5562 struct ftrace_func_entry *entry, *del;
5563 int size, i;
5564
5565 size = 1 << hash->size_bits;
5566 for (i = 0; i < size; i++) {
5567 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5568 del = __ftrace_lookup_ip(direct_functions, entry->ip);
5569 if (del && del->direct == addr) {
5570 remove_hash_entry(direct_functions, del);
5571 kfree(del);
5572 }
5573 }
5574 }
5575}
5576
5577/**
5578 * register_ftrace_direct_multi - Call a custom trampoline directly
5579 * for multiple functions registered in @ops
5580 * @ops: The address of the struct ftrace_ops object
5581 * @addr: The address of the trampoline to call at @ops functions
5582 *
5583 * This is used to connect a direct calls to @addr from the nop locations
5584 * of the functions registered in @ops (with by ftrace_set_filter_ip
5585 * function).
5586 *
5587 * The location that it calls (@addr) must be able to handle a direct call,
5588 * and save the parameters of the function being traced, and restore them
5589 * (or inject new ones if needed), before returning.
5590 *
5591 * Returns:
5592 * 0 on success
5593 * -EINVAL - The @ops object was already registered with this call or
5594 * when there are no functions in @ops object.
5595 * -EBUSY - Another direct function is already attached (there can be only one)
5596 * -ENODEV - @ip does not point to a ftrace nop location (or not supported)
5597 * -ENOMEM - There was an allocation failure.
5598 */
5599int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5600{
5601 struct ftrace_hash *hash, *free_hash = NULL;
5602 struct ftrace_func_entry *entry, *new;
5603 int err = -EBUSY, size, i;
5604
5605 if (ops->func || ops->trampoline)
5606 return -EINVAL;
5607 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5608 return -EINVAL;
5609 if (ops->flags & FTRACE_OPS_FL_ENABLED)
5610 return -EINVAL;
5611
5612 hash = ops->func_hash->filter_hash;
5613 if (ftrace_hash_empty(hash))
5614 return -EINVAL;
5615
5616 mutex_lock(&direct_mutex);
5617
5618 /* Make sure requested entries are not already registered.. */
5619 size = 1 << hash->size_bits;
5620 for (i = 0; i < size; i++) {
5621 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5622 if (ftrace_find_rec_direct(entry->ip))
5623 goto out_unlock;
5624 }
5625 }
5626
5627 /* ... and insert them to direct_functions hash. */
5628 err = -ENOMEM;
5629 for (i = 0; i < size; i++) {
5630 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5631 new = ftrace_add_rec_direct(entry->ip, addr, &free_hash);
5632 if (!new)
5633 goto out_remove;
5634 entry->direct = addr;
5635 }
5636 }
5637
5638 ops->func = call_direct_funcs;
5639 ops->flags = MULTI_FLAGS;
5640 ops->trampoline = FTRACE_REGS_ADDR;
5641
5642 err = register_ftrace_function(ops);
5643
5644 out_remove:
5645 if (err)
5646 remove_direct_functions_hash(hash, addr);
5647
5648 out_unlock:
5649 mutex_unlock(&direct_mutex);
5650
5651 if (free_hash) {
5652 synchronize_rcu_tasks();
5653 free_ftrace_hash(free_hash);
5654 }
5655 return err;
5656}
5657EXPORT_SYMBOL_GPL(register_ftrace_direct_multi);
5658
5659/**
5660 * unregister_ftrace_direct_multi - Remove calls to custom trampoline
5661 * previously registered by register_ftrace_direct_multi for @ops object.
5662 * @ops: The address of the struct ftrace_ops object
5663 *
5664 * This is used to remove a direct calls to @addr from the nop locations
5665 * of the functions registered in @ops (with by ftrace_set_filter_ip
5666 * function).
5667 *
5668 * Returns:
5669 * 0 on success
5670 * -EINVAL - The @ops object was not properly registered.
5671 */
5672int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5673{
5674 struct ftrace_hash *hash = ops->func_hash->filter_hash;
5675 int err;
5676
5677 if (check_direct_multi(ops))
5678 return -EINVAL;
5679 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5680 return -EINVAL;
5681
5682 mutex_lock(&direct_mutex);
5683 err = unregister_ftrace_function(ops);
5684 remove_direct_functions_hash(hash, addr);
5685 mutex_unlock(&direct_mutex);
fea3ffa4
JO
5686
5687 /* cleanup for possible another register call */
5688 ops->func = NULL;
5689 ops->trampoline = 0;
f64dd462
JO
5690 return err;
5691}
5692EXPORT_SYMBOL_GPL(unregister_ftrace_direct_multi);
ccf5a89e
JO
5693
5694/**
5695 * modify_ftrace_direct_multi - Modify an existing direct 'multi' call
5696 * to call something else
5697 * @ops: The address of the struct ftrace_ops object
5698 * @addr: The address of the new trampoline to call at @ops functions
5699 *
5700 * This is used to unregister currently registered direct caller and
5701 * register new one @addr on functions registered in @ops object.
5702 *
5703 * Note there's window between ftrace_shutdown and ftrace_startup calls
5704 * where there will be no callbacks called.
5705 *
5706 * Returns: zero on success. Non zero on error, which includes:
5707 * -EINVAL - The @ops object was not properly registered.
5708 */
5709int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5710{
ed292718 5711 struct ftrace_hash *hash;
ccf5a89e 5712 struct ftrace_func_entry *entry, *iter;
ed292718
SRV
5713 static struct ftrace_ops tmp_ops = {
5714 .func = ftrace_stub,
5715 .flags = FTRACE_OPS_FL_STUB,
5716 };
ccf5a89e
JO
5717 int i, size;
5718 int err;
5719
5720 if (check_direct_multi(ops))
5721 return -EINVAL;
5722 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5723 return -EINVAL;
5724
5725 mutex_lock(&direct_mutex);
ed292718
SRV
5726
5727 /* Enable the tmp_ops to have the same functions as the direct ops */
5728 ftrace_ops_init(&tmp_ops);
5729 tmp_ops.func_hash = ops->func_hash;
5730
5731 err = register_ftrace_function(&tmp_ops);
5732 if (err)
5733 goto out_direct;
ccf5a89e
JO
5734
5735 /*
ed292718
SRV
5736 * Now the ftrace_ops_list_func() is called to do the direct callers.
5737 * We can safely change the direct functions attached to each entry.
ccf5a89e 5738 */
ed292718 5739 mutex_lock(&ftrace_lock);
ccf5a89e 5740
ed292718 5741 hash = ops->func_hash->filter_hash;
ccf5a89e
JO
5742 size = 1 << hash->size_bits;
5743 for (i = 0; i < size; i++) {
5744 hlist_for_each_entry(iter, &hash->buckets[i], hlist) {
5745 entry = __ftrace_lookup_ip(direct_functions, iter->ip);
5746 if (!entry)
5747 continue;
5748 entry->direct = addr;
5749 }
5750 }
5751
2e6e9058
JO
5752 mutex_unlock(&ftrace_lock);
5753
ed292718
SRV
5754 /* Removing the tmp_ops will add the updated direct callers to the functions */
5755 unregister_ftrace_function(&tmp_ops);
ccf5a89e 5756
ed292718 5757 out_direct:
ccf5a89e
JO
5758 mutex_unlock(&direct_mutex);
5759 return err;
5760}
5761EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi);
763e34e7
SRV
5762#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
5763
647664ea
MH
5764/**
5765 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
5766 * @ops - the ops to set the filter with
5767 * @ip - the address to add to or remove from the filter.
5768 * @remove - non zero to remove the ip from the filter
5769 * @reset - non zero to reset all filters before applying this filter.
5770 *
5771 * Filters denote which functions should be enabled when tracing is enabled
f2cc020d 5772 * If @ip is NULL, it fails to update filter.
647664ea
MH
5773 */
5774int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
5775 int remove, int reset)
5776{
f04f24fb 5777 ftrace_ops_init(ops);
4f554e95 5778 return ftrace_set_addr(ops, &ip, 1, remove, reset, 1);
647664ea
MH
5779}
5780EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
5781
4f554e95
JO
5782/**
5783 * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses
5784 * @ops - the ops to set the filter with
5785 * @ips - the array of addresses to add to or remove from the filter.
5786 * @cnt - the number of addresses in @ips
5787 * @remove - non zero to remove ips from the filter
5788 * @reset - non zero to reset all filters before applying this filter.
5789 *
5790 * Filters denote which functions should be enabled when tracing is enabled
5791 * If @ips array or any ip specified within is NULL , it fails to update filter.
5792 */
5793int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
5794 unsigned int cnt, int remove, int reset)
5795{
5796 ftrace_ops_init(ops);
5797 return ftrace_set_addr(ops, ips, cnt, remove, reset, 1);
5798}
5799EXPORT_SYMBOL_GPL(ftrace_set_filter_ips);
5800
d032ae89
JF
5801/**
5802 * ftrace_ops_set_global_filter - setup ops to use global filters
5803 * @ops - the ops which will use the global filters
5804 *
5805 * ftrace users who need global function trace filtering should call this.
5806 * It can set the global filter only if ops were not initialized before.
5807 */
5808void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
5809{
5810 if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
5811 return;
5812
5813 ftrace_ops_init(ops);
5814 ops->func_hash = &global_ops.local_hash;
5815}
5816EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
5817
647664ea
MH
5818static int
5819ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
5820 int reset, int enable)
5821{
4f554e95 5822 return ftrace_set_hash(ops, buf, len, NULL, 0, 0, reset, enable);
647664ea
MH
5823}
5824
77a2b37d
SR
5825/**
5826 * ftrace_set_filter - set a function to filter on in ftrace
936e074b
SR
5827 * @ops - the ops to set the filter with
5828 * @buf - the string that holds the function filter text.
5829 * @len - the length of the string.
5830 * @reset - non zero to reset all filters before applying this filter.
5831 *
5832 * Filters denote which functions should be enabled when tracing is enabled.
5833 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
5834 */
ac483c44 5835int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
936e074b
SR
5836 int len, int reset)
5837{
f04f24fb 5838 ftrace_ops_init(ops);
ac483c44 5839 return ftrace_set_regex(ops, buf, len, reset, 1);
936e074b
SR
5840}
5841EXPORT_SYMBOL_GPL(ftrace_set_filter);
5842
5843/**
5844 * ftrace_set_notrace - set a function to not trace in ftrace
5845 * @ops - the ops to set the notrace filter with
5846 * @buf - the string that holds the function notrace text.
5847 * @len - the length of the string.
5848 * @reset - non zero to reset all filters before applying this filter.
5849 *
5850 * Notrace Filters denote which functions should not be enabled when tracing
5851 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
5852 * for tracing.
5853 */
ac483c44 5854int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
936e074b
SR
5855 int len, int reset)
5856{
f04f24fb 5857 ftrace_ops_init(ops);
ac483c44 5858 return ftrace_set_regex(ops, buf, len, reset, 0);
936e074b
SR
5859}
5860EXPORT_SYMBOL_GPL(ftrace_set_notrace);
5861/**
8d1b065d 5862 * ftrace_set_global_filter - set a function to filter on with global tracers
77a2b37d
SR
5863 * @buf - the string that holds the function filter text.
5864 * @len - the length of the string.
5865 * @reset - non zero to reset all filters before applying this filter.
5866 *
5867 * Filters denote which functions should be enabled when tracing is enabled.
5868 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
5869 */
936e074b 5870void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
77a2b37d 5871{
f45948e8 5872 ftrace_set_regex(&global_ops, buf, len, reset, 1);
41c52c0d 5873}
936e074b 5874EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
4eebcc81 5875
41c52c0d 5876/**
8d1b065d 5877 * ftrace_set_global_notrace - set a function to not trace with global tracers
41c52c0d
SR
5878 * @buf - the string that holds the function notrace text.
5879 * @len - the length of the string.
5880 * @reset - non zero to reset all filters before applying this filter.
5881 *
5882 * Notrace Filters denote which functions should not be enabled when tracing
5883 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
5884 * for tracing.
5885 */
936e074b 5886void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
41c52c0d 5887{
f45948e8 5888 ftrace_set_regex(&global_ops, buf, len, reset, 0);
77a2b37d 5889}
936e074b 5890EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
77a2b37d 5891
2af15d6a
SR
5892/*
5893 * command line interface to allow users to set filters on boot up.
5894 */
5895#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
5896static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5897static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
5898
f1ed7c74
SRRH
5899/* Used by function selftest to not test if filter is set */
5900bool ftrace_filter_param __initdata;
5901
2af15d6a
SR
5902static int __init set_ftrace_notrace(char *str)
5903{
f1ed7c74 5904 ftrace_filter_param = true;
75761cc1 5905 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2af15d6a
SR
5906 return 1;
5907}
5908__setup("ftrace_notrace=", set_ftrace_notrace);
5909
5910static int __init set_ftrace_filter(char *str)
5911{
f1ed7c74 5912 ftrace_filter_param = true;
75761cc1 5913 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2af15d6a
SR
5914 return 1;
5915}
5916__setup("ftrace_filter=", set_ftrace_filter);
5917
369bc18f 5918#ifdef CONFIG_FUNCTION_GRAPH_TRACER
f6060f46 5919static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
0d7d9a16 5920static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
b9b0c831 5921static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
801c29fd 5922
369bc18f
SA
5923static int __init set_graph_function(char *str)
5924{
06f43d66 5925 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
369bc18f
SA
5926 return 1;
5927}
5928__setup("ftrace_graph_filter=", set_graph_function);
5929
0d7d9a16
NK
5930static int __init set_graph_notrace_function(char *str)
5931{
5932 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
5933 return 1;
5934}
5935__setup("ftrace_graph_notrace=", set_graph_notrace_function);
5936
65a50c65
TB
5937static int __init set_graph_max_depth_function(char *str)
5938{
5939 if (!str)
5940 return 0;
5941 fgraph_max_depth = simple_strtoul(str, NULL, 0);
5942 return 1;
5943}
5944__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
0d7d9a16
NK
5945
5946static void __init set_ftrace_early_graph(char *buf, int enable)
369bc18f
SA
5947{
5948 int ret;
5949 char *func;
b9b0c831 5950 struct ftrace_hash *hash;
0d7d9a16 5951
92ad18ec 5952 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
24589e3a 5953 if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
92ad18ec 5954 return;
369bc18f
SA
5955
5956 while (buf) {
5957 func = strsep(&buf, ",");
5958 /* we allow only one expression at a time */
b9b0c831 5959 ret = ftrace_graph_set_hash(hash, func);
369bc18f
SA
5960 if (ret)
5961 printk(KERN_DEBUG "ftrace: function %s not "
5962 "traceable\n", func);
5963 }
92ad18ec
SRV
5964
5965 if (enable)
5966 ftrace_graph_hash = hash;
5967 else
5968 ftrace_graph_notrace_hash = hash;
369bc18f
SA
5969}
5970#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5971
2a85a37f
SR
5972void __init
5973ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
2af15d6a
SR
5974{
5975 char *func;
5976
f04f24fb
MH
5977 ftrace_ops_init(ops);
5978
2af15d6a
SR
5979 while (buf) {
5980 func = strsep(&buf, ",");
f45948e8 5981 ftrace_set_regex(ops, func, strlen(func), 0, enable);
2af15d6a
SR
5982 }
5983}
5984
5985static void __init set_ftrace_early_filters(void)
5986{
5987 if (ftrace_filter_buf[0])
2a85a37f 5988 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
2af15d6a 5989 if (ftrace_notrace_buf[0])
2a85a37f 5990 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
369bc18f
SA
5991#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5992 if (ftrace_graph_buf[0])
0d7d9a16
NK
5993 set_ftrace_early_graph(ftrace_graph_buf, 1);
5994 if (ftrace_graph_notrace_buf[0])
5995 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
369bc18f 5996#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2af15d6a
SR
5997}
5998
fc13cb0c 5999int ftrace_regex_release(struct inode *inode, struct file *file)
5072c59f
SR
6000{
6001 struct seq_file *m = (struct seq_file *)file->private_data;
6002 struct ftrace_iterator *iter;
33dc9b12 6003 struct ftrace_hash **orig_hash;
689fd8b6 6004 struct trace_parser *parser;
ed926f9b 6005 int filter_hash;
5072c59f 6006
5072c59f
SR
6007 if (file->f_mode & FMODE_READ) {
6008 iter = m->private;
5072c59f
SR
6009 seq_release(inode, file);
6010 } else
6011 iter = file->private_data;
6012
689fd8b6 6013 parser = &iter->parser;
6014 if (trace_parser_loaded(parser)) {
8c9af478
SRV
6015 int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
6016
6017 ftrace_process_regex(iter, parser->buffer,
6018 parser->idx, enable);
5072c59f
SR
6019 }
6020
689fd8b6 6021 trace_parser_put(parser);
689fd8b6 6022
33b7f99c 6023 mutex_lock(&iter->ops->func_hash->regex_lock);
3f2367ba 6024
058e297d 6025 if (file->f_mode & FMODE_WRITE) {
ed926f9b
SR
6026 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
6027
8c08f0d5 6028 if (filter_hash) {
33b7f99c 6029 orig_hash = &iter->ops->func_hash->filter_hash;
69d71879 6030 if (iter->tr && !list_empty(&iter->tr->mod_trace))
8c08f0d5
SRV
6031 iter->hash->flags |= FTRACE_HASH_FL_MOD;
6032 } else
33b7f99c 6033 orig_hash = &iter->ops->func_hash->notrace_hash;
33dc9b12 6034
058e297d 6035 mutex_lock(&ftrace_lock);
045e269c 6036 ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
e16b35dd 6037 iter->hash, filter_hash);
058e297d 6038 mutex_unlock(&ftrace_lock);
c20489da
SRV
6039 } else {
6040 /* For read only, the hash is the ops hash */
6041 iter->hash = NULL;
058e297d 6042 }
3f2367ba 6043
33b7f99c 6044 mutex_unlock(&iter->ops->func_hash->regex_lock);
33dc9b12 6045 free_ftrace_hash(iter->hash);
9ef16693
SRV
6046 if (iter->tr)
6047 trace_array_put(iter->tr);
33dc9b12 6048 kfree(iter);
058e297d 6049
5072c59f
SR
6050 return 0;
6051}
6052
5e2336a0 6053static const struct file_operations ftrace_avail_fops = {
5072c59f
SR
6054 .open = ftrace_avail_open,
6055 .read = seq_read,
6056 .llseek = seq_lseek,
3be04b47 6057 .release = seq_release_private,
5072c59f
SR
6058};
6059
647bcd03
SR
6060static const struct file_operations ftrace_enabled_fops = {
6061 .open = ftrace_enabled_open,
6062 .read = seq_read,
6063 .llseek = seq_lseek,
6064 .release = seq_release_private,
6065};
6066
5e2336a0 6067static const struct file_operations ftrace_filter_fops = {
5072c59f 6068 .open = ftrace_filter_open,
850a80cf 6069 .read = seq_read,
5072c59f 6070 .write = ftrace_filter_write,
098c879e 6071 .llseek = tracing_lseek,
1cf41dd7 6072 .release = ftrace_regex_release,
5072c59f
SR
6073};
6074
5e2336a0 6075static const struct file_operations ftrace_notrace_fops = {
41c52c0d 6076 .open = ftrace_notrace_open,
850a80cf 6077 .read = seq_read,
41c52c0d 6078 .write = ftrace_notrace_write,
098c879e 6079 .llseek = tracing_lseek,
1cf41dd7 6080 .release = ftrace_regex_release,
41c52c0d
SR
6081};
6082
ea4e2bc4
SR
6083#ifdef CONFIG_FUNCTION_GRAPH_TRACER
6084
6085static DEFINE_MUTEX(graph_lock);
6086
24a9729f 6087struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
fd0e6852 6088struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
b9b0c831
NK
6089
6090enum graph_filter_type {
6091 GRAPH_FILTER_NOTRACE = 0,
6092 GRAPH_FILTER_FUNCTION,
6093};
ea4e2bc4 6094
555fc781
SRV
6095#define FTRACE_GRAPH_EMPTY ((void *)1)
6096
faf982a6 6097struct ftrace_graph_data {
e704eff3
SRV
6098 struct ftrace_hash *hash;
6099 struct ftrace_func_entry *entry;
6100 int idx; /* for hash table iteration */
6101 enum graph_filter_type type;
6102 struct ftrace_hash *new_hash;
6103 const struct seq_operations *seq_ops;
6104 struct trace_parser parser;
faf982a6
NK
6105};
6106
ea4e2bc4 6107static void *
85951842 6108__g_next(struct seq_file *m, loff_t *pos)
ea4e2bc4 6109{
faf982a6 6110 struct ftrace_graph_data *fgd = m->private;
b9b0c831
NK
6111 struct ftrace_func_entry *entry = fgd->entry;
6112 struct hlist_head *head;
6113 int i, idx = fgd->idx;
faf982a6 6114
b9b0c831 6115 if (*pos >= fgd->hash->count)
ea4e2bc4 6116 return NULL;
b9b0c831
NK
6117
6118 if (entry) {
6119 hlist_for_each_entry_continue(entry, hlist) {
6120 fgd->entry = entry;
6121 return entry;
6122 }
6123
6124 idx++;
6125 }
6126
6127 for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
6128 head = &fgd->hash->buckets[i];
6129 hlist_for_each_entry(entry, head, hlist) {
6130 fgd->entry = entry;
6131 fgd->idx = i;
6132 return entry;
6133 }
6134 }
6135 return NULL;
85951842 6136}
ea4e2bc4 6137
85951842
LZ
6138static void *
6139g_next(struct seq_file *m, void *v, loff_t *pos)
6140{
6141 (*pos)++;
6142 return __g_next(m, pos);
ea4e2bc4
SR
6143}
6144
6145static void *g_start(struct seq_file *m, loff_t *pos)
6146{
faf982a6
NK
6147 struct ftrace_graph_data *fgd = m->private;
6148
ea4e2bc4
SR
6149 mutex_lock(&graph_lock);
6150
649b988b
SRV
6151 if (fgd->type == GRAPH_FILTER_FUNCTION)
6152 fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6153 lockdep_is_held(&graph_lock));
6154 else
6155 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6156 lockdep_is_held(&graph_lock));
6157
f9349a8f 6158 /* Nothing, tell g_show to print all functions are enabled */
b9b0c831 6159 if (ftrace_hash_empty(fgd->hash) && !*pos)
555fc781 6160 return FTRACE_GRAPH_EMPTY;
f9349a8f 6161
b9b0c831
NK
6162 fgd->idx = 0;
6163 fgd->entry = NULL;
85951842 6164 return __g_next(m, pos);
ea4e2bc4
SR
6165}
6166
6167static void g_stop(struct seq_file *m, void *p)
6168{
6169 mutex_unlock(&graph_lock);
6170}
6171
6172static int g_show(struct seq_file *m, void *v)
6173{
b9b0c831 6174 struct ftrace_func_entry *entry = v;
ea4e2bc4 6175
b9b0c831 6176 if (!entry)
ea4e2bc4
SR
6177 return 0;
6178
555fc781 6179 if (entry == FTRACE_GRAPH_EMPTY) {
280d1429
NK
6180 struct ftrace_graph_data *fgd = m->private;
6181
b9b0c831 6182 if (fgd->type == GRAPH_FILTER_FUNCTION)
fa6f0cc7 6183 seq_puts(m, "#### all functions enabled ####\n");
280d1429 6184 else
fa6f0cc7 6185 seq_puts(m, "#### no functions disabled ####\n");
f9349a8f
FW
6186 return 0;
6187 }
6188
b9b0c831 6189 seq_printf(m, "%ps\n", (void *)entry->ip);
ea4e2bc4
SR
6190
6191 return 0;
6192}
6193
88e9d34c 6194static const struct seq_operations ftrace_graph_seq_ops = {
ea4e2bc4
SR
6195 .start = g_start,
6196 .next = g_next,
6197 .stop = g_stop,
6198 .show = g_show,
6199};
6200
6201static int
faf982a6
NK
6202__ftrace_graph_open(struct inode *inode, struct file *file,
6203 struct ftrace_graph_data *fgd)
ea4e2bc4 6204{
17911ff3 6205 int ret;
b9b0c831 6206 struct ftrace_hash *new_hash = NULL;
ea4e2bc4 6207
17911ff3
SRV
6208 ret = security_locked_down(LOCKDOWN_TRACEFS);
6209 if (ret)
6210 return ret;
6211
b9b0c831
NK
6212 if (file->f_mode & FMODE_WRITE) {
6213 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
6214
e704eff3
SRV
6215 if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
6216 return -ENOMEM;
6217
b9b0c831
NK
6218 if (file->f_flags & O_TRUNC)
6219 new_hash = alloc_ftrace_hash(size_bits);
6220 else
6221 new_hash = alloc_and_copy_ftrace_hash(size_bits,
6222 fgd->hash);
6223 if (!new_hash) {
6224 ret = -ENOMEM;
6225 goto out;
6226 }
ea4e2bc4
SR
6227 }
6228
faf982a6 6229 if (file->f_mode & FMODE_READ) {
b9b0c831 6230 ret = seq_open(file, &ftrace_graph_seq_ops);
faf982a6
NK
6231 if (!ret) {
6232 struct seq_file *m = file->private_data;
6233 m->private = fgd;
b9b0c831
NK
6234 } else {
6235 /* Failed */
6236 free_ftrace_hash(new_hash);
6237 new_hash = NULL;
faf982a6
NK
6238 }
6239 } else
6240 file->private_data = fgd;
ea4e2bc4 6241
b9b0c831 6242out:
e704eff3
SRV
6243 if (ret < 0 && file->f_mode & FMODE_WRITE)
6244 trace_parser_put(&fgd->parser);
6245
b9b0c831 6246 fgd->new_hash = new_hash;
649b988b
SRV
6247
6248 /*
6249 * All uses of fgd->hash must be taken with the graph_lock
6250 * held. The graph_lock is going to be released, so force
6251 * fgd->hash to be reinitialized when it is taken again.
6252 */
6253 fgd->hash = NULL;
6254
ea4e2bc4
SR
6255 return ret;
6256}
6257
faf982a6
NK
6258static int
6259ftrace_graph_open(struct inode *inode, struct file *file)
6260{
6261 struct ftrace_graph_data *fgd;
b9b0c831 6262 int ret;
faf982a6
NK
6263
6264 if (unlikely(ftrace_disabled))
6265 return -ENODEV;
6266
6267 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6268 if (fgd == NULL)
6269 return -ENOMEM;
6270
b9b0c831
NK
6271 mutex_lock(&graph_lock);
6272
649b988b
SRV
6273 fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6274 lockdep_is_held(&graph_lock));
b9b0c831 6275 fgd->type = GRAPH_FILTER_FUNCTION;
faf982a6
NK
6276 fgd->seq_ops = &ftrace_graph_seq_ops;
6277
b9b0c831
NK
6278 ret = __ftrace_graph_open(inode, file, fgd);
6279 if (ret < 0)
6280 kfree(fgd);
6281
6282 mutex_unlock(&graph_lock);
6283 return ret;
faf982a6
NK
6284}
6285
29ad23b0
NK
6286static int
6287ftrace_graph_notrace_open(struct inode *inode, struct file *file)
6288{
6289 struct ftrace_graph_data *fgd;
b9b0c831 6290 int ret;
29ad23b0
NK
6291
6292 if (unlikely(ftrace_disabled))
6293 return -ENODEV;
6294
6295 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6296 if (fgd == NULL)
6297 return -ENOMEM;
6298
b9b0c831
NK
6299 mutex_lock(&graph_lock);
6300
649b988b
SRV
6301 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6302 lockdep_is_held(&graph_lock));
b9b0c831 6303 fgd->type = GRAPH_FILTER_NOTRACE;
29ad23b0
NK
6304 fgd->seq_ops = &ftrace_graph_seq_ops;
6305
b9b0c831
NK
6306 ret = __ftrace_graph_open(inode, file, fgd);
6307 if (ret < 0)
6308 kfree(fgd);
6309
6310 mutex_unlock(&graph_lock);
6311 return ret;
29ad23b0
NK
6312}
6313
87827111
LZ
6314static int
6315ftrace_graph_release(struct inode *inode, struct file *file)
6316{
b9b0c831 6317 struct ftrace_graph_data *fgd;
e704eff3
SRV
6318 struct ftrace_hash *old_hash, *new_hash;
6319 struct trace_parser *parser;
6320 int ret = 0;
b9b0c831 6321
faf982a6
NK
6322 if (file->f_mode & FMODE_READ) {
6323 struct seq_file *m = file->private_data;
6324
b9b0c831 6325 fgd = m->private;
87827111 6326 seq_release(inode, file);
faf982a6 6327 } else {
b9b0c831 6328 fgd = file->private_data;
faf982a6
NK
6329 }
6330
e704eff3
SRV
6331
6332 if (file->f_mode & FMODE_WRITE) {
6333
6334 parser = &fgd->parser;
6335
6336 if (trace_parser_loaded((parser))) {
e704eff3
SRV
6337 ret = ftrace_graph_set_hash(fgd->new_hash,
6338 parser->buffer);
6339 }
6340
6341 trace_parser_put(parser);
6342
6343 new_hash = __ftrace_hash_move(fgd->new_hash);
6344 if (!new_hash) {
6345 ret = -ENOMEM;
6346 goto out;
6347 }
6348
6349 mutex_lock(&graph_lock);
6350
6351 if (fgd->type == GRAPH_FILTER_FUNCTION) {
6352 old_hash = rcu_dereference_protected(ftrace_graph_hash,
6353 lockdep_is_held(&graph_lock));
6354 rcu_assign_pointer(ftrace_graph_hash, new_hash);
6355 } else {
6356 old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6357 lockdep_is_held(&graph_lock));
6358 rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
6359 }
6360
6361 mutex_unlock(&graph_lock);
6362
54a16ff6
SRV
6363 /*
6364 * We need to do a hard force of sched synchronization.
6365 * This is because we use preempt_disable() to do RCU, but
6366 * the function tracers can be called where RCU is not watching
6367 * (like before user_exit()). We can not rely on the RCU
6368 * infrastructure to do the synchronization, thus we must do it
6369 * ourselves.
6370 */
68e83498
NSJ
6371 if (old_hash != EMPTY_HASH)
6372 synchronize_rcu_tasks_rude();
e704eff3
SRV
6373
6374 free_ftrace_hash(old_hash);
6375 }
6376
6377 out:
f9797c2f 6378 free_ftrace_hash(fgd->new_hash);
b9b0c831
NK
6379 kfree(fgd);
6380
e704eff3 6381 return ret;
87827111
LZ
6382}
6383
ea4e2bc4 6384static int
b9b0c831 6385ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
ea4e2bc4 6386{
3ba00929 6387 struct ftrace_glob func_g;
ea4e2bc4
SR
6388 struct dyn_ftrace *rec;
6389 struct ftrace_page *pg;
b9b0c831 6390 struct ftrace_func_entry *entry;
c7c6b1fe 6391 int fail = 1;
3ba00929 6392 int not;
ea4e2bc4 6393
f9349a8f 6394 /* decode regex */
3ba00929
DS
6395 func_g.type = filter_parse_regex(buffer, strlen(buffer),
6396 &func_g.search, &not);
f9349a8f 6397
3ba00929 6398 func_g.len = strlen(func_g.search);
f9349a8f 6399
52baf119 6400 mutex_lock(&ftrace_lock);
45a4a237
SR
6401
6402 if (unlikely(ftrace_disabled)) {
6403 mutex_unlock(&ftrace_lock);
6404 return -ENODEV;
6405 }
6406
265c831c
SR
6407 do_for_each_ftrace_rec(pg, rec) {
6408
546fece4
SRRH
6409 if (rec->flags & FTRACE_FL_DISABLED)
6410 continue;
6411
0b507e1e 6412 if (ftrace_match_record(rec, &func_g, NULL, 0)) {
b9b0c831 6413 entry = ftrace_lookup_ip(hash, rec->ip);
c7c6b1fe
LZ
6414
6415 if (!not) {
6416 fail = 0;
b9b0c831
NK
6417
6418 if (entry)
6419 continue;
6420 if (add_hash_entry(hash, rec->ip) < 0)
6421 goto out;
c7c6b1fe 6422 } else {
b9b0c831
NK
6423 if (entry) {
6424 free_hash_entry(hash, entry);
c7c6b1fe
LZ
6425 fail = 0;
6426 }
6427 }
ea4e2bc4 6428 }
265c831c 6429 } while_for_each_ftrace_rec();
c7c6b1fe 6430out:
52baf119 6431 mutex_unlock(&ftrace_lock);
ea4e2bc4 6432
c7c6b1fe
LZ
6433 if (fail)
6434 return -EINVAL;
6435
c7c6b1fe 6436 return 0;
ea4e2bc4
SR
6437}
6438
6439static ssize_t
6440ftrace_graph_write(struct file *file, const char __user *ubuf,
6441 size_t cnt, loff_t *ppos)
6442{
6a10108b 6443 ssize_t read, ret = 0;
faf982a6 6444 struct ftrace_graph_data *fgd = file->private_data;
e704eff3 6445 struct trace_parser *parser;
ea4e2bc4 6446
c7c6b1fe 6447 if (!cnt)
ea4e2bc4
SR
6448 return 0;
6449
ae98d27a
SRV
6450 /* Read mode uses seq functions */
6451 if (file->f_mode & FMODE_READ) {
6452 struct seq_file *m = file->private_data;
6453 fgd = m->private;
6454 }
6455
e704eff3 6456 parser = &fgd->parser;
ea4e2bc4 6457
e704eff3 6458 read = trace_get_user(parser, ubuf, cnt, ppos);
689fd8b6 6459
e704eff3
SRV
6460 if (read >= 0 && trace_parser_loaded(parser) &&
6461 !trace_parser_cont(parser)) {
6a10108b 6462
b9b0c831 6463 ret = ftrace_graph_set_hash(fgd->new_hash,
e704eff3
SRV
6464 parser->buffer);
6465 trace_parser_clear(parser);
ea4e2bc4 6466 }
ea4e2bc4 6467
6a10108b
NK
6468 if (!ret)
6469 ret = read;
1eb90f13 6470
ea4e2bc4
SR
6471 return ret;
6472}
6473
6474static const struct file_operations ftrace_graph_fops = {
87827111
LZ
6475 .open = ftrace_graph_open,
6476 .read = seq_read,
6477 .write = ftrace_graph_write,
098c879e 6478 .llseek = tracing_lseek,
87827111 6479 .release = ftrace_graph_release,
ea4e2bc4 6480};
29ad23b0
NK
6481
6482static const struct file_operations ftrace_graph_notrace_fops = {
6483 .open = ftrace_graph_notrace_open,
6484 .read = seq_read,
6485 .write = ftrace_graph_write,
098c879e 6486 .llseek = tracing_lseek,
29ad23b0
NK
6487 .release = ftrace_graph_release,
6488};
ea4e2bc4
SR
6489#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6490
591dffda
SRRH
6491void ftrace_create_filter_files(struct ftrace_ops *ops,
6492 struct dentry *parent)
6493{
6494
21ccc9cd 6495 trace_create_file("set_ftrace_filter", TRACE_MODE_WRITE, parent,
591dffda
SRRH
6496 ops, &ftrace_filter_fops);
6497
21ccc9cd 6498 trace_create_file("set_ftrace_notrace", TRACE_MODE_WRITE, parent,
591dffda
SRRH
6499 ops, &ftrace_notrace_fops);
6500}
6501
6502/*
6503 * The name "destroy_filter_files" is really a misnomer. Although
9efb85c5 6504 * in the future, it may actually delete the files, but this is
591dffda
SRRH
6505 * really intended to make sure the ops passed in are disabled
6506 * and that when this function returns, the caller is free to
6507 * free the ops.
6508 *
6509 * The "destroy" name is only to match the "create" name that this
6510 * should be paired with.
6511 */
6512void ftrace_destroy_filter_files(struct ftrace_ops *ops)
6513{
6514 mutex_lock(&ftrace_lock);
6515 if (ops->flags & FTRACE_OPS_FL_ENABLED)
6516 ftrace_shutdown(ops, 0);
6517 ops->flags |= FTRACE_OPS_FL_DELETED;
2840f84f 6518 ftrace_free_filter(ops);
591dffda
SRRH
6519 mutex_unlock(&ftrace_lock);
6520}
6521
8434dc93 6522static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
5072c59f 6523{
5072c59f 6524
21ccc9cd 6525 trace_create_file("available_filter_functions", TRACE_MODE_READ,
5452af66 6526 d_tracer, NULL, &ftrace_avail_fops);
5072c59f 6527
21ccc9cd 6528 trace_create_file("enabled_functions", TRACE_MODE_READ,
647bcd03
SR
6529 d_tracer, NULL, &ftrace_enabled_fops);
6530
591dffda 6531 ftrace_create_filter_files(&global_ops, d_tracer);
ad90c0e3 6532
ea4e2bc4 6533#ifdef CONFIG_FUNCTION_GRAPH_TRACER
21ccc9cd 6534 trace_create_file("set_graph_function", TRACE_MODE_WRITE, d_tracer,
ea4e2bc4
SR
6535 NULL,
6536 &ftrace_graph_fops);
21ccc9cd 6537 trace_create_file("set_graph_notrace", TRACE_MODE_WRITE, d_tracer,
29ad23b0
NK
6538 NULL,
6539 &ftrace_graph_notrace_fops);
ea4e2bc4
SR
6540#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6541
5072c59f
SR
6542 return 0;
6543}
6544
9fd49328 6545static int ftrace_cmp_ips(const void *a, const void *b)
68950619 6546{
9fd49328
SR
6547 const unsigned long *ipa = a;
6548 const unsigned long *ipb = b;
68950619 6549
9fd49328
SR
6550 if (*ipa > *ipb)
6551 return 1;
6552 if (*ipa < *ipb)
6553 return -1;
6554 return 0;
6555}
6556
8147dc78
SRV
6557#ifdef CONFIG_FTRACE_SORT_STARTUP_TEST
6558static void test_is_sorted(unsigned long *start, unsigned long count)
6559{
6560 int i;
6561
6562 for (i = 1; i < count; i++) {
6563 if (WARN(start[i - 1] > start[i],
6564 "[%d] %pS at %lx is not sorted with %pS at %lx\n", i,
6565 (void *)start[i - 1], start[i - 1],
6566 (void *)start[i], start[i]))
6567 break;
6568 }
6569 if (i == count)
6570 pr_info("ftrace section at %px sorted properly\n", start);
6571}
6572#else
6573static void test_is_sorted(unsigned long *start, unsigned long count)
6574{
6575}
6576#endif
6577
5cb084bb 6578static int ftrace_process_locs(struct module *mod,
31e88909 6579 unsigned long *start,
68bf21aa
SR
6580 unsigned long *end)
6581{
706c81f8 6582 struct ftrace_page *start_pg;
a7900875 6583 struct ftrace_page *pg;
706c81f8 6584 struct dyn_ftrace *rec;
a7900875 6585 unsigned long count;
68bf21aa
SR
6586 unsigned long *p;
6587 unsigned long addr;
4376cac6 6588 unsigned long flags = 0; /* Shut up gcc */
a7900875
SR
6589 int ret = -ENOMEM;
6590
6591 count = end - start;
6592
6593 if (!count)
6594 return 0;
6595
72b3942a
YL
6596 /*
6597 * Sorting mcount in vmlinux at build time depend on
6b9b6413 6598 * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in
72b3942a
YL
6599 * modules can not be sorted at build time.
6600 */
6b9b6413 6601 if (!IS_ENABLED(CONFIG_BUILDTIME_MCOUNT_SORT) || mod) {
72b3942a
YL
6602 sort(start, count, sizeof(*start),
6603 ftrace_cmp_ips, NULL);
8147dc78
SRV
6604 } else {
6605 test_is_sorted(start, count);
72b3942a 6606 }
9fd49328 6607
706c81f8
SR
6608 start_pg = ftrace_allocate_pages(count);
6609 if (!start_pg)
a7900875 6610 return -ENOMEM;
68bf21aa 6611
e6ea44e9 6612 mutex_lock(&ftrace_lock);
a7900875 6613
32082309
SR
6614 /*
6615 * Core and each module needs their own pages, as
6616 * modules will free them when they are removed.
6617 * Force a new page to be allocated for modules.
6618 */
a7900875
SR
6619 if (!mod) {
6620 WARN_ON(ftrace_pages || ftrace_pages_start);
6621 /* First initialization */
706c81f8 6622 ftrace_pages = ftrace_pages_start = start_pg;
a7900875 6623 } else {
32082309 6624 if (!ftrace_pages)
a7900875 6625 goto out;
32082309 6626
a7900875
SR
6627 if (WARN_ON(ftrace_pages->next)) {
6628 /* Hmm, we have free pages? */
6629 while (ftrace_pages->next)
6630 ftrace_pages = ftrace_pages->next;
32082309 6631 }
a7900875 6632
706c81f8 6633 ftrace_pages->next = start_pg;
32082309
SR
6634 }
6635
68bf21aa 6636 p = start;
706c81f8 6637 pg = start_pg;
68bf21aa 6638 while (p < end) {
db42523b 6639 unsigned long end_offset;
68bf21aa 6640 addr = ftrace_call_adjust(*p++);
20e5227e
SR
6641 /*
6642 * Some architecture linkers will pad between
6643 * the different mcount_loc sections of different
6644 * object files to satisfy alignments.
6645 * Skip any NULL pointers.
6646 */
6647 if (!addr)
6648 continue;
706c81f8 6649
db42523b
LT
6650 end_offset = (pg->index+1) * sizeof(pg->records[0]);
6651 if (end_offset > PAGE_SIZE << pg->order) {
706c81f8
SR
6652 /* We should have allocated enough */
6653 if (WARN_ON(!pg->next))
6654 break;
6655 pg = pg->next;
6656 }
6657
6658 rec = &pg->records[pg->index++];
6659 rec->ip = addr;
68bf21aa
SR
6660 }
6661
706c81f8
SR
6662 /* We should have used all pages */
6663 WARN_ON(pg->next);
6664
6665 /* Assign the last page to ftrace_pages */
6666 ftrace_pages = pg;
6667
a4f18ed1 6668 /*
4376cac6
SR
6669 * We only need to disable interrupts on start up
6670 * because we are modifying code that an interrupt
6671 * may execute, and the modification is not atomic.
6672 * But for modules, nothing runs the code we modify
6673 * until we are finished with it, and there's no
6674 * reason to cause large interrupt latencies while we do it.
a4f18ed1 6675 */
4376cac6
SR
6676 if (!mod)
6677 local_irq_save(flags);
1dc43cf0 6678 ftrace_update_code(mod, start_pg);
4376cac6
SR
6679 if (!mod)
6680 local_irq_restore(flags);
a7900875
SR
6681 ret = 0;
6682 out:
e6ea44e9 6683 mutex_unlock(&ftrace_lock);
68bf21aa 6684
a7900875 6685 return ret;
68bf21aa
SR
6686}
6687
aba4b5c2
SRV
6688struct ftrace_mod_func {
6689 struct list_head list;
6690 char *name;
6691 unsigned long ip;
6692 unsigned int size;
6693};
6694
6695struct ftrace_mod_map {
6aa69784 6696 struct rcu_head rcu;
aba4b5c2
SRV
6697 struct list_head list;
6698 struct module *mod;
6699 unsigned long start_addr;
6700 unsigned long end_addr;
6701 struct list_head funcs;
6171a031 6702 unsigned int num_funcs;
aba4b5c2
SRV
6703};
6704
fc0ea795
AH
6705static int ftrace_get_trampoline_kallsym(unsigned int symnum,
6706 unsigned long *value, char *type,
6707 char *name, char *module_name,
6708 int *exported)
6709{
6710 struct ftrace_ops *op;
6711
6712 list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) {
6713 if (!op->trampoline || symnum--)
6714 continue;
6715 *value = op->trampoline;
6716 *type = 't';
6717 strlcpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN);
6718 strlcpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN);
6719 *exported = 0;
6720 return 0;
6721 }
6722
6723 return -ERANGE;
6724}
6725
93eb677d 6726#ifdef CONFIG_MODULES
32082309
SR
6727
6728#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
6729
6aa69784
SRV
6730static LIST_HEAD(ftrace_mod_maps);
6731
b7ffffbb
SRRH
6732static int referenced_filters(struct dyn_ftrace *rec)
6733{
6734 struct ftrace_ops *ops;
6735 int cnt = 0;
6736
6737 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
8a224ffb 6738 if (ops_references_rec(ops, rec)) {
c5f51572
CZ
6739 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT))
6740 continue;
6741 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY))
6742 continue;
8a224ffb
CZ
6743 cnt++;
6744 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
6745 rec->flags |= FTRACE_FL_REGS;
c5f51572
CZ
6746 if (cnt == 1 && ops->trampoline)
6747 rec->flags |= FTRACE_FL_TRAMP;
6748 else
6749 rec->flags &= ~FTRACE_FL_TRAMP;
8a224ffb 6750 }
b7ffffbb
SRRH
6751 }
6752
6753 return cnt;
6754}
6755
2a5bfe47
SRV
6756static void
6757clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
6758{
6759 struct ftrace_func_entry *entry;
6760 struct dyn_ftrace *rec;
6761 int i;
6762
6763 if (ftrace_hash_empty(hash))
6764 return;
6765
6766 for (i = 0; i < pg->index; i++) {
6767 rec = &pg->records[i];
6768 entry = __ftrace_lookup_ip(hash, rec->ip);
6769 /*
6770 * Do not allow this rec to match again.
6771 * Yeah, it may waste some memory, but will be removed
6772 * if/when the hash is modified again.
6773 */
6774 if (entry)
6775 entry->ip = 0;
6776 }
6777}
6778
f2cc020d 6779/* Clear any records from hashes */
2a5bfe47
SRV
6780static void clear_mod_from_hashes(struct ftrace_page *pg)
6781{
6782 struct trace_array *tr;
6783
6784 mutex_lock(&trace_types_lock);
6785 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6786 if (!tr->ops || !tr->ops->func_hash)
6787 continue;
6788 mutex_lock(&tr->ops->func_hash->regex_lock);
6789 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
6790 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
6791 mutex_unlock(&tr->ops->func_hash->regex_lock);
6792 }
6793 mutex_unlock(&trace_types_lock);
6794}
6795
6aa69784
SRV
6796static void ftrace_free_mod_map(struct rcu_head *rcu)
6797{
6798 struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
6799 struct ftrace_mod_func *mod_func;
6800 struct ftrace_mod_func *n;
6801
6802 /* All the contents of mod_map are now not visible to readers */
6803 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
6804 kfree(mod_func->name);
6805 list_del(&mod_func->list);
6806 kfree(mod_func);
6807 }
6808
6809 kfree(mod_map);
6810}
6811
e7247a15 6812void ftrace_release_mod(struct module *mod)
93eb677d 6813{
6aa69784
SRV
6814 struct ftrace_mod_map *mod_map;
6815 struct ftrace_mod_map *n;
93eb677d 6816 struct dyn_ftrace *rec;
32082309 6817 struct ftrace_page **last_pg;
2a5bfe47 6818 struct ftrace_page *tmp_page = NULL;
93eb677d 6819 struct ftrace_page *pg;
93eb677d 6820
45a4a237
SR
6821 mutex_lock(&ftrace_lock);
6822
e7247a15 6823 if (ftrace_disabled)
45a4a237 6824 goto out_unlock;
93eb677d 6825
6aa69784
SRV
6826 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
6827 if (mod_map->mod == mod) {
6828 list_del_rcu(&mod_map->list);
74401729 6829 call_rcu(&mod_map->rcu, ftrace_free_mod_map);
6aa69784
SRV
6830 break;
6831 }
6832 }
6833
32082309
SR
6834 /*
6835 * Each module has its own ftrace_pages, remove
6836 * them from the list.
6837 */
6838 last_pg = &ftrace_pages_start;
6839 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
6840 rec = &pg->records[0];
3e234289
SRV
6841 if (within_module_core(rec->ip, mod) ||
6842 within_module_init(rec->ip, mod)) {
93eb677d 6843 /*
32082309
SR
6844 * As core pages are first, the first
6845 * page should never be a module page.
93eb677d 6846 */
32082309
SR
6847 if (WARN_ON(pg == ftrace_pages_start))
6848 goto out_unlock;
6849
6850 /* Check if we are deleting the last page */
6851 if (pg == ftrace_pages)
6852 ftrace_pages = next_to_ftrace_page(last_pg);
6853
83dd1493 6854 ftrace_update_tot_cnt -= pg->index;
32082309 6855 *last_pg = pg->next;
2a5bfe47
SRV
6856
6857 pg->next = tmp_page;
6858 tmp_page = pg;
32082309
SR
6859 } else
6860 last_pg = &pg->next;
6861 }
45a4a237 6862 out_unlock:
93eb677d 6863 mutex_unlock(&ftrace_lock);
2a5bfe47
SRV
6864
6865 for (pg = tmp_page; pg; pg = tmp_page) {
6866
6867 /* Needs to be called outside of ftrace_lock */
6868 clear_mod_from_hashes(pg);
6869
db42523b
LT
6870 if (pg->records) {
6871 free_pages((unsigned long)pg->records, pg->order);
6872 ftrace_number_of_pages -= 1 << pg->order;
6873 }
2a5bfe47
SRV
6874 tmp_page = pg->next;
6875 kfree(pg);
da537f0a 6876 ftrace_number_of_groups--;
2a5bfe47 6877 }
93eb677d
SR
6878}
6879
7dcd182b 6880void ftrace_module_enable(struct module *mod)
b7ffffbb
SRRH
6881{
6882 struct dyn_ftrace *rec;
6883 struct ftrace_page *pg;
6884
6885 mutex_lock(&ftrace_lock);
6886
6887 if (ftrace_disabled)
6888 goto out_unlock;
6889
6890 /*
6891 * If the tracing is enabled, go ahead and enable the record.
6892 *
9efb85c5 6893 * The reason not to enable the record immediately is the
b7ffffbb
SRRH
6894 * inherent check of ftrace_make_nop/ftrace_make_call for
6895 * correct previous instructions. Making first the NOP
6896 * conversion puts the module to the correct state, thus
6897 * passing the ftrace_make_call check.
6898 *
6899 * We also delay this to after the module code already set the
6900 * text to read-only, as we now need to set it back to read-write
6901 * so that we can modify the text.
6902 */
6903 if (ftrace_start_up)
6904 ftrace_arch_code_modify_prepare();
6905
6906 do_for_each_ftrace_rec(pg, rec) {
6907 int cnt;
6908 /*
6909 * do_for_each_ftrace_rec() is a double loop.
6910 * module text shares the pg. If a record is
6911 * not part of this module, then skip this pg,
6912 * which the "break" will do.
6913 */
3e234289
SRV
6914 if (!within_module_core(rec->ip, mod) &&
6915 !within_module_init(rec->ip, mod))
b7ffffbb
SRRH
6916 break;
6917
b39181f7
SRG
6918 /* Weak functions should still be ignored */
6919 if (!test_for_valid_rec(rec)) {
6920 /* Clear all other flags. Should not be enabled anyway */
6921 rec->flags = FTRACE_FL_DISABLED;
6922 continue;
6923 }
6924
b7ffffbb
SRRH
6925 cnt = 0;
6926
6927 /*
6928 * When adding a module, we need to check if tracers are
6929 * currently enabled and if they are, and can trace this record,
6930 * we need to enable the module functions as well as update the
6931 * reference counts for those function records.
6932 */
6933 if (ftrace_start_up)
6934 cnt += referenced_filters(rec);
6935
8a224ffb
CZ
6936 rec->flags &= ~FTRACE_FL_DISABLED;
6937 rec->flags += cnt;
b7ffffbb
SRRH
6938
6939 if (ftrace_start_up && cnt) {
6940 int failed = __ftrace_replace_code(rec, 1);
6941 if (failed) {
6942 ftrace_bug(failed, rec);
6943 goto out_loop;
6944 }
6945 }
6946
6947 } while_for_each_ftrace_rec();
6948
6949 out_loop:
6950 if (ftrace_start_up)
6951 ftrace_arch_code_modify_post_process();
6952
6953 out_unlock:
6954 mutex_unlock(&ftrace_lock);
d7fbf8df
SRV
6955
6956 process_cached_mods(mod->name);
b7ffffbb
SRRH
6957}
6958
b6b71f66 6959void ftrace_module_init(struct module *mod)
90d595fe 6960{
2889c658
YW
6961 int ret;
6962
97e9b4fc 6963 if (ftrace_disabled || !mod->num_ftrace_callsites)
fed1939c 6964 return;
90d595fe 6965
2889c658
YW
6966 ret = ftrace_process_locs(mod, mod->ftrace_callsites,
6967 mod->ftrace_callsites + mod->num_ftrace_callsites);
6968 if (ret)
6969 pr_warn("ftrace: failed to allocate entries for module '%s' functions\n",
6970 mod->name);
8c189ea6 6971}
aba4b5c2
SRV
6972
6973static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
6974 struct dyn_ftrace *rec)
6975{
6976 struct ftrace_mod_func *mod_func;
6977 unsigned long symsize;
6978 unsigned long offset;
6979 char str[KSYM_SYMBOL_LEN];
6980 char *modname;
6981 const char *ret;
6982
6983 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
6984 if (!ret)
6985 return;
6986
6987 mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
6988 if (!mod_func)
6989 return;
6990
6991 mod_func->name = kstrdup(str, GFP_KERNEL);
6992 if (!mod_func->name) {
6993 kfree(mod_func);
6994 return;
6995 }
6996
6997 mod_func->ip = rec->ip - offset;
6998 mod_func->size = symsize;
6999
6171a031
SRV
7000 mod_map->num_funcs++;
7001
aba4b5c2
SRV
7002 list_add_rcu(&mod_func->list, &mod_map->funcs);
7003}
7004
aba4b5c2
SRV
7005static struct ftrace_mod_map *
7006allocate_ftrace_mod_map(struct module *mod,
7007 unsigned long start, unsigned long end)
7008{
7009 struct ftrace_mod_map *mod_map;
7010
7011 mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
7012 if (!mod_map)
7013 return NULL;
7014
7015 mod_map->mod = mod;
7016 mod_map->start_addr = start;
7017 mod_map->end_addr = end;
6171a031 7018 mod_map->num_funcs = 0;
aba4b5c2
SRV
7019
7020 INIT_LIST_HEAD_RCU(&mod_map->funcs);
7021
7022 list_add_rcu(&mod_map->list, &ftrace_mod_maps);
7023
7024 return mod_map;
7025}
7026
7027static const char *
7028ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
7029 unsigned long addr, unsigned long *size,
7030 unsigned long *off, char *sym)
7031{
7032 struct ftrace_mod_func *found_func = NULL;
7033 struct ftrace_mod_func *mod_func;
7034
7035 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
7036 if (addr >= mod_func->ip &&
7037 addr < mod_func->ip + mod_func->size) {
7038 found_func = mod_func;
7039 break;
7040 }
7041 }
7042
7043 if (found_func) {
7044 if (size)
7045 *size = found_func->size;
7046 if (off)
7047 *off = addr - found_func->ip;
7048 if (sym)
7049 strlcpy(sym, found_func->name, KSYM_NAME_LEN);
7050
7051 return found_func->name;
7052 }
7053
7054 return NULL;
7055}
7056
7057const char *
7058ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
7059 unsigned long *off, char **modname, char *sym)
7060{
7061 struct ftrace_mod_map *mod_map;
7062 const char *ret = NULL;
7063
74401729 7064 /* mod_map is freed via call_rcu() */
aba4b5c2
SRV
7065 preempt_disable();
7066 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7067 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
7068 if (ret) {
7069 if (modname)
7070 *modname = mod_map->mod->name;
7071 break;
7072 }
7073 }
7074 preempt_enable();
7075
7076 return ret;
7077}
7078
6171a031
SRV
7079int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7080 char *type, char *name,
7081 char *module_name, int *exported)
7082{
7083 struct ftrace_mod_map *mod_map;
7084 struct ftrace_mod_func *mod_func;
fc0ea795 7085 int ret;
6171a031
SRV
7086
7087 preempt_disable();
7088 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7089
7090 if (symnum >= mod_map->num_funcs) {
7091 symnum -= mod_map->num_funcs;
7092 continue;
7093 }
7094
7095 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
7096 if (symnum > 1) {
7097 symnum--;
7098 continue;
7099 }
7100
7101 *value = mod_func->ip;
7102 *type = 'T';
7103 strlcpy(name, mod_func->name, KSYM_NAME_LEN);
7104 strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
7105 *exported = 1;
7106 preempt_enable();
7107 return 0;
7108 }
7109 WARN_ON(1);
7110 break;
7111 }
fc0ea795
AH
7112 ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7113 module_name, exported);
6171a031 7114 preempt_enable();
fc0ea795 7115 return ret;
6171a031
SRV
7116}
7117
aba4b5c2
SRV
7118#else
7119static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
7120 struct dyn_ftrace *rec) { }
7121static inline struct ftrace_mod_map *
7122allocate_ftrace_mod_map(struct module *mod,
7123 unsigned long start, unsigned long end)
7124{
7125 return NULL;
7126}
fc0ea795
AH
7127int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7128 char *type, char *name, char *module_name,
7129 int *exported)
7130{
7131 int ret;
7132
7133 preempt_disable();
7134 ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7135 module_name, exported);
7136 preempt_enable();
7137 return ret;
7138}
93eb677d
SR
7139#endif /* CONFIG_MODULES */
7140
8715b108
JF
7141struct ftrace_init_func {
7142 struct list_head list;
7143 unsigned long ip;
7144};
7145
7146/* Clear any init ips from hashes */
7147static void
7148clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
42c269c8 7149{
8715b108
JF
7150 struct ftrace_func_entry *entry;
7151
08468754 7152 entry = ftrace_lookup_ip(hash, func->ip);
8715b108
JF
7153 /*
7154 * Do not allow this rec to match again.
7155 * Yeah, it may waste some memory, but will be removed
7156 * if/when the hash is modified again.
7157 */
7158 if (entry)
7159 entry->ip = 0;
7160}
7161
7162static void
7163clear_func_from_hashes(struct ftrace_init_func *func)
7164{
7165 struct trace_array *tr;
7166
7167 mutex_lock(&trace_types_lock);
7168 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7169 if (!tr->ops || !tr->ops->func_hash)
7170 continue;
7171 mutex_lock(&tr->ops->func_hash->regex_lock);
7172 clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
7173 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
7174 mutex_unlock(&tr->ops->func_hash->regex_lock);
7175 }
7176 mutex_unlock(&trace_types_lock);
7177}
7178
7179static void add_to_clear_hash_list(struct list_head *clear_list,
7180 struct dyn_ftrace *rec)
7181{
7182 struct ftrace_init_func *func;
7183
7184 func = kmalloc(sizeof(*func), GFP_KERNEL);
7185 if (!func) {
24589e3a 7186 MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
8715b108
JF
7187 return;
7188 }
7189
7190 func->ip = rec->ip;
7191 list_add(&func->list, clear_list);
7192}
7193
aba4b5c2 7194void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
42c269c8 7195{
6cafbe15
SRV
7196 unsigned long start = (unsigned long)(start_ptr);
7197 unsigned long end = (unsigned long)(end_ptr);
42c269c8
SRV
7198 struct ftrace_page **last_pg = &ftrace_pages_start;
7199 struct ftrace_page *pg;
7200 struct dyn_ftrace *rec;
7201 struct dyn_ftrace key;
aba4b5c2 7202 struct ftrace_mod_map *mod_map = NULL;
8715b108
JF
7203 struct ftrace_init_func *func, *func_next;
7204 struct list_head clear_hash;
42c269c8 7205
8715b108
JF
7206 INIT_LIST_HEAD(&clear_hash);
7207
42c269c8
SRV
7208 key.ip = start;
7209 key.flags = end; /* overload flags, as it is unsigned long */
7210
7211 mutex_lock(&ftrace_lock);
7212
aba4b5c2
SRV
7213 /*
7214 * If we are freeing module init memory, then check if
7215 * any tracer is active. If so, we need to save a mapping of
7216 * the module functions being freed with the address.
7217 */
7218 if (mod && ftrace_ops_list != &ftrace_list_end)
7219 mod_map = allocate_ftrace_mod_map(mod, start, end);
7220
42c269c8
SRV
7221 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
7222 if (end < pg->records[0].ip ||
7223 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
7224 continue;
7225 again:
7226 rec = bsearch(&key, pg->records, pg->index,
7227 sizeof(struct dyn_ftrace),
7228 ftrace_cmp_recs);
7229 if (!rec)
7230 continue;
aba4b5c2 7231
8715b108
JF
7232 /* rec will be cleared from hashes after ftrace_lock unlock */
7233 add_to_clear_hash_list(&clear_hash, rec);
7234
aba4b5c2
SRV
7235 if (mod_map)
7236 save_ftrace_mod_rec(mod_map, rec);
7237
42c269c8 7238 pg->index--;
4ec78467 7239 ftrace_update_tot_cnt--;
42c269c8
SRV
7240 if (!pg->index) {
7241 *last_pg = pg->next;
db42523b
LT
7242 if (pg->records) {
7243 free_pages((unsigned long)pg->records, pg->order);
7244 ftrace_number_of_pages -= 1 << pg->order;
7245 }
da537f0a 7246 ftrace_number_of_groups--;
42c269c8
SRV
7247 kfree(pg);
7248 pg = container_of(last_pg, struct ftrace_page, next);
7249 if (!(*last_pg))
7250 ftrace_pages = pg;
7251 continue;
7252 }
7253 memmove(rec, rec + 1,
7254 (pg->index - (rec - pg->records)) * sizeof(*rec));
7255 /* More than one function may be in this block */
7256 goto again;
7257 }
7258 mutex_unlock(&ftrace_lock);
8715b108
JF
7259
7260 list_for_each_entry_safe(func, func_next, &clear_hash, list) {
7261 clear_func_from_hashes(func);
7262 kfree(func);
7263 }
42c269c8
SRV
7264}
7265
6cafbe15
SRV
7266void __init ftrace_free_init_mem(void)
7267{
7268 void *start = (void *)(&__init_begin);
7269 void *end = (void *)(&__init_end);
7270
380af29b
SRG
7271 ftrace_boot_snapshot();
7272
aba4b5c2 7273 ftrace_free_mem(NULL, start, end);
42c269c8
SRV
7274}
7275
6644c654
WO
7276int __init __weak ftrace_dyn_arch_init(void)
7277{
7278 return 0;
7279}
7280
68bf21aa
SR
7281void __init ftrace_init(void)
7282{
1dc43cf0
JS
7283 extern unsigned long __start_mcount_loc[];
7284 extern unsigned long __stop_mcount_loc[];
3a36cb11 7285 unsigned long count, flags;
68bf21aa
SR
7286 int ret;
7287
68bf21aa 7288 local_irq_save(flags);
3a36cb11 7289 ret = ftrace_dyn_arch_init();
68bf21aa 7290 local_irq_restore(flags);
af64a7cb 7291 if (ret)
68bf21aa
SR
7292 goto failed;
7293
7294 count = __stop_mcount_loc - __start_mcount_loc;
c867ccd8
JS
7295 if (!count) {
7296 pr_info("ftrace: No functions to be traced?\n");
68bf21aa 7297 goto failed;
c867ccd8
JS
7298 }
7299
7300 pr_info("ftrace: allocating %ld entries in %ld pages\n",
7301 count, count / ENTRIES_PER_PAGE + 1);
68bf21aa 7302
5cb084bb 7303 ret = ftrace_process_locs(NULL,
31e88909 7304 __start_mcount_loc,
68bf21aa 7305 __stop_mcount_loc);
2889c658
YW
7306 if (ret) {
7307 pr_warn("ftrace: failed to allocate entries for functions\n");
7308 goto failed;
7309 }
68bf21aa 7310
da537f0a
SRV
7311 pr_info("ftrace: allocated %ld pages with %ld groups\n",
7312 ftrace_number_of_pages, ftrace_number_of_groups);
7313
2889c658
YW
7314 last_ftrace_enabled = ftrace_enabled = 1;
7315
2af15d6a
SR
7316 set_ftrace_early_filters();
7317
68bf21aa
SR
7318 return;
7319 failed:
7320 ftrace_disabled = 1;
7321}
68bf21aa 7322
f3bea491
SRRH
7323/* Do nothing if arch does not support this */
7324void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
7325{
7326}
7327
7328static void ftrace_update_trampoline(struct ftrace_ops *ops)
7329{
fc0ea795
AH
7330 unsigned long trampoline = ops->trampoline;
7331
f3bea491 7332 arch_ftrace_update_trampoline(ops);
fc0ea795 7333 if (ops->trampoline && ops->trampoline != trampoline &&
dd9ddf46
AH
7334 (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) {
7335 /* Add to kallsyms before the perf events */
fc0ea795 7336 ftrace_add_trampoline_to_kallsyms(ops);
dd9ddf46
AH
7337 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
7338 ops->trampoline, ops->trampoline_size, false,
7339 FTRACE_TRAMPOLINE_SYM);
548e1f6c
AH
7340 /*
7341 * Record the perf text poke event after the ksymbol register
7342 * event.
7343 */
7344 perf_event_text_poke((void *)ops->trampoline, NULL, 0,
7345 (void *)ops->trampoline,
7346 ops->trampoline_size);
dd9ddf46 7347 }
f3bea491
SRRH
7348}
7349
04ec7bb6
SRV
7350void ftrace_init_trace_array(struct trace_array *tr)
7351{
7352 INIT_LIST_HEAD(&tr->func_probes);
673feb9d
SRV
7353 INIT_LIST_HEAD(&tr->mod_trace);
7354 INIT_LIST_HEAD(&tr->mod_notrace);
04ec7bb6 7355}
3d083395 7356#else
0b6e4d56 7357
3306fc4a 7358struct ftrace_ops global_ops = {
bd69c30b 7359 .func = ftrace_stub,
a25d036d 7360 .flags = FTRACE_OPS_FL_INITIALIZED |
e3eea140 7361 FTRACE_OPS_FL_PID,
bd69c30b
SR
7362};
7363
0b6e4d56
FW
7364static int __init ftrace_nodyn_init(void)
7365{
7366 ftrace_enabled = 1;
7367 return 0;
7368}
6f415672 7369core_initcall(ftrace_nodyn_init);
0b6e4d56 7370
8434dc93 7371static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
e1effa01 7372static inline void ftrace_startup_all(int command) { }
8a56d776 7373
f3bea491
SRRH
7374static void ftrace_update_trampoline(struct ftrace_ops *ops)
7375{
7376}
7377
3d083395
SR
7378#endif /* CONFIG_DYNAMIC_FTRACE */
7379
4104d326
SRRH
7380__init void ftrace_init_global_array_ops(struct trace_array *tr)
7381{
7382 tr->ops = &global_ops;
7383 tr->ops->private = tr;
04ec7bb6 7384 ftrace_init_trace_array(tr);
4104d326
SRRH
7385}
7386
7387void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
7388{
7389 /* If we filter on pids, update to use the pid function */
7390 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
7391 if (WARN_ON(tr->ops->func != ftrace_stub))
7392 printk("ftrace ops had %pS for function\n",
7393 tr->ops->func);
4104d326
SRRH
7394 }
7395 tr->ops->func = func;
7396 tr->ops->private = tr;
7397}
7398
7399void ftrace_reset_array_ops(struct trace_array *tr)
7400{
7401 tr->ops->func = ftrace_stub;
7402}
7403
fabe38ab 7404static nokprobe_inline void
2f5f6ad9 7405__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
d19ad077 7406 struct ftrace_ops *ignored, struct ftrace_regs *fregs)
b848914c 7407{
d19ad077 7408 struct pt_regs *regs = ftrace_get_regs(fregs);
cdbe61bf 7409 struct ftrace_ops *op;
edc15caf 7410 int bit;
b848914c 7411
ce5e4803 7412 /*
7413 * The ftrace_test_and_set_recursion() will disable preemption,
7414 * which is required since some of the ops may be dynamically
7415 * allocated, they must be freed after a synchronize_rcu().
7416 */
ed65df63 7417 bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
edc15caf
SR
7418 if (bit < 0)
7419 return;
b1cff0ad 7420
0a016409 7421 do_for_each_ftrace_op(op, ftrace_ops_list) {
2fa717a0
SRV
7422 /* Stub functions don't need to be called nor tested */
7423 if (op->flags & FTRACE_OPS_FL_STUB)
7424 continue;
ba27f2bc
SRRH
7425 /*
7426 * Check the following for each ops before calling their func:
7427 * if RCU flag is set, then rcu_is_watching() must be true
7428 * if PER_CPU is set, then ftrace_function_local_disable()
7429 * must be false
7430 * Otherwise test if the ip matches the ops filter
7431 *
7432 * If any of the above fails then the op->func() is not executed.
7433 */
7434 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
ba27f2bc 7435 ftrace_ops_test(op, ip, regs)) {
1d48d596
SRRH
7436 if (FTRACE_WARN_ON(!op->func)) {
7437 pr_warn("op=%p %pS\n", op, op);
4104d326
SRRH
7438 goto out;
7439 }
d19ad077 7440 op->func(ip, parent_ip, op, fregs);
4104d326 7441 }
0a016409 7442 } while_for_each_ftrace_op(op);
4104d326 7443out:
edc15caf 7444 trace_clear_recursion(bit);
b848914c
SR
7445}
7446
2f5f6ad9
SR
7447/*
7448 * Some archs only support passing ip and parent_ip. Even though
7449 * the list function ignores the op parameter, we do not want any
7450 * C side effects, where a function is called without the caller
7451 * sending a third parameter.
a1e2e31d
SR
7452 * Archs are to support both the regs and ftrace_ops at the same time.
7453 * If they support ftrace_ops, it is assumed they support regs.
7454 * If call backs want to use regs, they must either check for regs
06aeaaea
MH
7455 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
7456 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
a1e2e31d 7457 * An architecture can pass partial regs with ftrace_ops and still
b8ec330a 7458 * set the ARCH_SUPPORTS_FTRACE_OPS.
34cdd18b
SRV
7459 *
7460 * In vmlinux.lds.h, ftrace_ops_list_func() is defined to be
7461 * arch_ftrace_ops_list_func.
2f5f6ad9
SR
7462 */
7463#if ARCH_SUPPORTS_FTRACE_OPS
34cdd18b
SRV
7464void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7465 struct ftrace_ops *op, struct ftrace_regs *fregs)
2f5f6ad9 7466{
d19ad077 7467 __ftrace_ops_list_func(ip, parent_ip, NULL, fregs);
2f5f6ad9
SR
7468}
7469#else
34cdd18b 7470void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
2f5f6ad9 7471{
a1e2e31d 7472 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
2f5f6ad9
SR
7473}
7474#endif
34cdd18b 7475NOKPROBE_SYMBOL(arch_ftrace_ops_list_func);
2f5f6ad9 7476
f1ff6348
SRRH
7477/*
7478 * If there's only one function registered but it does not support
c68c0fa2
SRRH
7479 * recursion, needs RCU protection and/or requires per cpu handling, then
7480 * this function will be called by the mcount trampoline.
f1ff6348 7481 */
c68c0fa2 7482static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
d19ad077 7483 struct ftrace_ops *op, struct ftrace_regs *fregs)
f1ff6348
SRRH
7484{
7485 int bit;
7486
ed65df63 7487 bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
f1ff6348
SRRH
7488 if (bit < 0)
7489 return;
7490
b40341fa 7491 if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
d19ad077 7492 op->func(ip, parent_ip, op, fregs);
c68c0fa2 7493
f1ff6348
SRRH
7494 trace_clear_recursion(bit);
7495}
fabe38ab 7496NOKPROBE_SYMBOL(ftrace_ops_assist_func);
f1ff6348 7497
87354059
SRRH
7498/**
7499 * ftrace_ops_get_func - get the function a trampoline should call
7500 * @ops: the ops to get the function for
7501 *
7502 * Normally the mcount trampoline will call the ops->func, but there
7503 * are times that it should not. For example, if the ops does not
7504 * have its own recursion protection, then it should call the
3a150df9 7505 * ftrace_ops_assist_func() instead.
87354059
SRRH
7506 *
7507 * Returns the function that the trampoline should call for @ops.
7508 */
7509ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
7510{
87354059 7511 /*
a25d036d
SRV
7512 * If the function does not handle recursion or needs to be RCU safe,
7513 * then we need to call the assist handler.
87354059 7514 */
a25d036d
SRV
7515 if (ops->flags & (FTRACE_OPS_FL_RECURSION |
7516 FTRACE_OPS_FL_RCU))
c68c0fa2 7517 return ftrace_ops_assist_func;
87354059
SRRH
7518
7519 return ops->func;
7520}
7521
345ddcc8
SRRH
7522static void
7523ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
fa2c3254 7524 struct task_struct *prev,
9c2136be
DK
7525 struct task_struct *next,
7526 unsigned int prev_state)
978f3a45 7527{
345ddcc8
SRRH
7528 struct trace_array *tr = data;
7529 struct trace_pid_list *pid_list;
b3b1e6ed 7530 struct trace_pid_list *no_pid_list;
978f3a45 7531
345ddcc8 7532 pid_list = rcu_dereference_sched(tr->function_pids);
b3b1e6ed 7533 no_pid_list = rcu_dereference_sched(tr->function_no_pids);
e32d8956 7534
b3b1e6ed 7535 if (trace_ignore_this_task(pid_list, no_pid_list, next))
717e3f5e
SRV
7536 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7537 FTRACE_PID_IGNORE);
7538 else
7539 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7540 next->pid);
978f3a45
SR
7541}
7542
1e10486f
NK
7543static void
7544ftrace_pid_follow_sched_process_fork(void *data,
7545 struct task_struct *self,
7546 struct task_struct *task)
7547{
7548 struct trace_pid_list *pid_list;
7549 struct trace_array *tr = data;
7550
7551 pid_list = rcu_dereference_sched(tr->function_pids);
7552 trace_filter_add_remove_task(pid_list, self, task);
b3b1e6ed
SRV
7553
7554 pid_list = rcu_dereference_sched(tr->function_no_pids);
7555 trace_filter_add_remove_task(pid_list, self, task);
1e10486f
NK
7556}
7557
7558static void
7559ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
7560{
7561 struct trace_pid_list *pid_list;
7562 struct trace_array *tr = data;
7563
7564 pid_list = rcu_dereference_sched(tr->function_pids);
7565 trace_filter_add_remove_task(pid_list, NULL, task);
b3b1e6ed
SRV
7566
7567 pid_list = rcu_dereference_sched(tr->function_no_pids);
7568 trace_filter_add_remove_task(pid_list, NULL, task);
1e10486f
NK
7569}
7570
7571void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
7572{
7573 if (enable) {
7574 register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
7575 tr);
afcab636 7576 register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
1e10486f
NK
7577 tr);
7578 } else {
7579 unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
7580 tr);
afcab636 7581 unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
1e10486f
NK
7582 tr);
7583 }
7584}
7585
b3b1e6ed 7586static void clear_ftrace_pids(struct trace_array *tr, int type)
e32d8956 7587{
345ddcc8 7588 struct trace_pid_list *pid_list;
b3b1e6ed 7589 struct trace_pid_list *no_pid_list;
345ddcc8 7590 int cpu;
e32d8956 7591
345ddcc8
SRRH
7592 pid_list = rcu_dereference_protected(tr->function_pids,
7593 lockdep_is_held(&ftrace_lock));
b3b1e6ed
SRV
7594 no_pid_list = rcu_dereference_protected(tr->function_no_pids,
7595 lockdep_is_held(&ftrace_lock));
7596
7597 /* Make sure there's something to do */
27683626 7598 if (!pid_type_enabled(type, pid_list, no_pid_list))
345ddcc8 7599 return;
229c4ef8 7600
b3b1e6ed 7601 /* See if the pids still need to be checked after this */
27683626 7602 if (!still_need_pid_events(type, pid_list, no_pid_list)) {
b3b1e6ed
SRV
7603 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7604 for_each_possible_cpu(cpu)
7605 per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
7606 }
e32d8956 7607
b3b1e6ed
SRV
7608 if (type & TRACE_PIDS)
7609 rcu_assign_pointer(tr->function_pids, NULL);
978f3a45 7610
b3b1e6ed
SRV
7611 if (type & TRACE_NO_PIDS)
7612 rcu_assign_pointer(tr->function_no_pids, NULL);
978f3a45 7613
345ddcc8 7614 /* Wait till all users are no longer using pid filtering */
74401729 7615 synchronize_rcu();
e32d8956 7616
b3b1e6ed 7617 if ((type & TRACE_PIDS) && pid_list)
6954e415 7618 trace_pid_list_free(pid_list);
b3b1e6ed
SRV
7619
7620 if ((type & TRACE_NO_PIDS) && no_pid_list)
6954e415 7621 trace_pid_list_free(no_pid_list);
e32d8956
SR
7622}
7623
d879d0b8
NK
7624void ftrace_clear_pids(struct trace_array *tr)
7625{
7626 mutex_lock(&ftrace_lock);
7627
b3b1e6ed 7628 clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
d879d0b8
NK
7629
7630 mutex_unlock(&ftrace_lock);
7631}
7632
b3b1e6ed 7633static void ftrace_pid_reset(struct trace_array *tr, int type)
df4fc315 7634{
756d17ee 7635 mutex_lock(&ftrace_lock);
b3b1e6ed 7636 clear_ftrace_pids(tr, type);
978f3a45 7637
756d17ee 7638 ftrace_update_pid_func();
e1effa01 7639 ftrace_startup_all(0);
756d17ee 7640
7641 mutex_unlock(&ftrace_lock);
756d17ee 7642}
7643
345ddcc8
SRRH
7644/* Greater than any max PID */
7645#define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1)
df4fc315 7646
756d17ee 7647static void *fpid_start(struct seq_file *m, loff_t *pos)
345ddcc8 7648 __acquires(RCU)
756d17ee 7649{
345ddcc8
SRRH
7650 struct trace_pid_list *pid_list;
7651 struct trace_array *tr = m->private;
7652
756d17ee 7653 mutex_lock(&ftrace_lock);
345ddcc8
SRRH
7654 rcu_read_lock_sched();
7655
7656 pid_list = rcu_dereference_sched(tr->function_pids);
756d17ee 7657
345ddcc8
SRRH
7658 if (!pid_list)
7659 return !(*pos) ? FTRACE_NO_PIDS : NULL;
756d17ee 7660
345ddcc8 7661 return trace_pid_start(pid_list, pos);
756d17ee 7662}
7663
7664static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
7665{
345ddcc8
SRRH
7666 struct trace_array *tr = m->private;
7667 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
7668
e4075e8b
VA
7669 if (v == FTRACE_NO_PIDS) {
7670 (*pos)++;
756d17ee 7671 return NULL;
e4075e8b 7672 }
345ddcc8 7673 return trace_pid_next(pid_list, v, pos);
756d17ee 7674}
7675
7676static void fpid_stop(struct seq_file *m, void *p)
345ddcc8 7677 __releases(RCU)
756d17ee 7678{
345ddcc8 7679 rcu_read_unlock_sched();
756d17ee 7680 mutex_unlock(&ftrace_lock);
7681}
7682
7683static int fpid_show(struct seq_file *m, void *v)
7684{
345ddcc8 7685 if (v == FTRACE_NO_PIDS) {
fa6f0cc7 7686 seq_puts(m, "no pid\n");
756d17ee 7687 return 0;
7688 }
7689
345ddcc8 7690 return trace_pid_show(m, v);
756d17ee 7691}
7692
7693static const struct seq_operations ftrace_pid_sops = {
7694 .start = fpid_start,
7695 .next = fpid_next,
7696 .stop = fpid_stop,
7697 .show = fpid_show,
7698};
7699
b3b1e6ed
SRV
7700static void *fnpid_start(struct seq_file *m, loff_t *pos)
7701 __acquires(RCU)
7702{
7703 struct trace_pid_list *pid_list;
7704 struct trace_array *tr = m->private;
7705
7706 mutex_lock(&ftrace_lock);
7707 rcu_read_lock_sched();
7708
7709 pid_list = rcu_dereference_sched(tr->function_no_pids);
7710
7711 if (!pid_list)
7712 return !(*pos) ? FTRACE_NO_PIDS : NULL;
7713
7714 return trace_pid_start(pid_list, pos);
7715}
7716
7717static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos)
756d17ee 7718{
b3b1e6ed
SRV
7719 struct trace_array *tr = m->private;
7720 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids);
7721
7722 if (v == FTRACE_NO_PIDS) {
7723 (*pos)++;
7724 return NULL;
7725 }
7726 return trace_pid_next(pid_list, v, pos);
7727}
7728
7729static const struct seq_operations ftrace_no_pid_sops = {
7730 .start = fnpid_start,
7731 .next = fnpid_next,
7732 .stop = fpid_stop,
7733 .show = fpid_show,
7734};
7735
7736static int pid_open(struct inode *inode, struct file *file, int type)
7737{
7738 const struct seq_operations *seq_ops;
345ddcc8
SRRH
7739 struct trace_array *tr = inode->i_private;
7740 struct seq_file *m;
756d17ee 7741 int ret = 0;
7742
8530dec6
SRV
7743 ret = tracing_check_open_get_tr(tr);
7744 if (ret)
7745 return ret;
345ddcc8 7746
756d17ee 7747 if ((file->f_mode & FMODE_WRITE) &&
7748 (file->f_flags & O_TRUNC))
b3b1e6ed
SRV
7749 ftrace_pid_reset(tr, type);
7750
7751 switch (type) {
7752 case TRACE_PIDS:
7753 seq_ops = &ftrace_pid_sops;
7754 break;
7755 case TRACE_NO_PIDS:
7756 seq_ops = &ftrace_no_pid_sops;
7757 break;
026bb845
KC
7758 default:
7759 trace_array_put(tr);
7760 WARN_ON_ONCE(1);
7761 return -EINVAL;
b3b1e6ed 7762 }
756d17ee 7763
b3b1e6ed 7764 ret = seq_open(file, seq_ops);
345ddcc8
SRRH
7765 if (ret < 0) {
7766 trace_array_put(tr);
7767 } else {
7768 m = file->private_data;
7769 /* copy tr over to seq ops */
7770 m->private = tr;
7771 }
756d17ee 7772
7773 return ret;
7774}
7775
b3b1e6ed
SRV
7776static int
7777ftrace_pid_open(struct inode *inode, struct file *file)
7778{
7779 return pid_open(inode, file, TRACE_PIDS);
7780}
7781
7782static int
7783ftrace_no_pid_open(struct inode *inode, struct file *file)
7784{
7785 return pid_open(inode, file, TRACE_NO_PIDS);
7786}
7787
345ddcc8
SRRH
7788static void ignore_task_cpu(void *data)
7789{
7790 struct trace_array *tr = data;
7791 struct trace_pid_list *pid_list;
b3b1e6ed 7792 struct trace_pid_list *no_pid_list;
345ddcc8
SRRH
7793
7794 /*
7795 * This function is called by on_each_cpu() while the
7796 * event_mutex is held.
7797 */
7798 pid_list = rcu_dereference_protected(tr->function_pids,
7799 mutex_is_locked(&ftrace_lock));
b3b1e6ed
SRV
7800 no_pid_list = rcu_dereference_protected(tr->function_no_pids,
7801 mutex_is_locked(&ftrace_lock));
345ddcc8 7802
b3b1e6ed 7803 if (trace_ignore_this_task(pid_list, no_pid_list, current))
717e3f5e
SRV
7804 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7805 FTRACE_PID_IGNORE);
7806 else
7807 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7808 current->pid);
345ddcc8
SRRH
7809}
7810
df4fc315 7811static ssize_t
b3b1e6ed
SRV
7812pid_write(struct file *filp, const char __user *ubuf,
7813 size_t cnt, loff_t *ppos, int type)
df4fc315 7814{
345ddcc8
SRRH
7815 struct seq_file *m = filp->private_data;
7816 struct trace_array *tr = m->private;
b3b1e6ed
SRV
7817 struct trace_pid_list *filtered_pids;
7818 struct trace_pid_list *other_pids;
345ddcc8
SRRH
7819 struct trace_pid_list *pid_list;
7820 ssize_t ret;
df4fc315 7821
345ddcc8
SRRH
7822 if (!cnt)
7823 return 0;
7824
7825 mutex_lock(&ftrace_lock);
7826
b3b1e6ed
SRV
7827 switch (type) {
7828 case TRACE_PIDS:
7829 filtered_pids = rcu_dereference_protected(tr->function_pids,
345ddcc8 7830 lockdep_is_held(&ftrace_lock));
b3b1e6ed
SRV
7831 other_pids = rcu_dereference_protected(tr->function_no_pids,
7832 lockdep_is_held(&ftrace_lock));
7833 break;
7834 case TRACE_NO_PIDS:
7835 filtered_pids = rcu_dereference_protected(tr->function_no_pids,
7836 lockdep_is_held(&ftrace_lock));
7837 other_pids = rcu_dereference_protected(tr->function_pids,
345ddcc8 7838 lockdep_is_held(&ftrace_lock));
b3b1e6ed 7839 break;
026bb845
KC
7840 default:
7841 ret = -EINVAL;
7842 WARN_ON_ONCE(1);
7843 goto out;
b3b1e6ed 7844 }
345ddcc8
SRRH
7845
7846 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
7847 if (ret < 0)
7848 goto out;
df4fc315 7849
b3b1e6ed
SRV
7850 switch (type) {
7851 case TRACE_PIDS:
7852 rcu_assign_pointer(tr->function_pids, pid_list);
7853 break;
7854 case TRACE_NO_PIDS:
7855 rcu_assign_pointer(tr->function_no_pids, pid_list);
7856 break;
7857 }
7858
df4fc315 7859
345ddcc8 7860 if (filtered_pids) {
74401729 7861 synchronize_rcu();
6954e415 7862 trace_pid_list_free(filtered_pids);
b3b1e6ed 7863 } else if (pid_list && !other_pids) {
345ddcc8
SRRH
7864 /* Register a probe to set whether to ignore the tracing of a task */
7865 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7866 }
df4fc315 7867
756d17ee 7868 /*
345ddcc8
SRRH
7869 * Ignoring of pids is done at task switch. But we have to
7870 * check for those tasks that are currently running.
7871 * Always do this in case a pid was appended or removed.
756d17ee 7872 */
345ddcc8 7873 on_each_cpu(ignore_task_cpu, tr, 1);
756d17ee 7874
345ddcc8
SRRH
7875 ftrace_update_pid_func();
7876 ftrace_startup_all(0);
7877 out:
7878 mutex_unlock(&ftrace_lock);
df4fc315 7879
345ddcc8
SRRH
7880 if (ret > 0)
7881 *ppos += ret;
df4fc315 7882
345ddcc8 7883 return ret;
756d17ee 7884}
df4fc315 7885
b3b1e6ed
SRV
7886static ssize_t
7887ftrace_pid_write(struct file *filp, const char __user *ubuf,
7888 size_t cnt, loff_t *ppos)
7889{
7890 return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
7891}
7892
7893static ssize_t
7894ftrace_no_pid_write(struct file *filp, const char __user *ubuf,
7895 size_t cnt, loff_t *ppos)
7896{
7897 return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
7898}
7899
756d17ee 7900static int
7901ftrace_pid_release(struct inode *inode, struct file *file)
7902{
345ddcc8 7903 struct trace_array *tr = inode->i_private;
df4fc315 7904
345ddcc8
SRRH
7905 trace_array_put(tr);
7906
7907 return seq_release(inode, file);
df4fc315
SR
7908}
7909
5e2336a0 7910static const struct file_operations ftrace_pid_fops = {
756d17ee 7911 .open = ftrace_pid_open,
7912 .write = ftrace_pid_write,
7913 .read = seq_read,
098c879e 7914 .llseek = tracing_lseek,
756d17ee 7915 .release = ftrace_pid_release,
df4fc315
SR
7916};
7917
b3b1e6ed
SRV
7918static const struct file_operations ftrace_no_pid_fops = {
7919 .open = ftrace_no_pid_open,
7920 .write = ftrace_no_pid_write,
7921 .read = seq_read,
7922 .llseek = tracing_lseek,
7923 .release = ftrace_pid_release,
7924};
7925
345ddcc8 7926void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
df4fc315 7927{
21ccc9cd 7928 trace_create_file("set_ftrace_pid", TRACE_MODE_WRITE, d_tracer,
345ddcc8 7929 tr, &ftrace_pid_fops);
21ccc9cd
SRV
7930 trace_create_file("set_ftrace_notrace_pid", TRACE_MODE_WRITE,
7931 d_tracer, tr, &ftrace_no_pid_fops);
df4fc315 7932}
df4fc315 7933
501c2375
SRRH
7934void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
7935 struct dentry *d_tracer)
7936{
7937 /* Only the top level directory has the dyn_tracefs and profile */
7938 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
7939
7940 ftrace_init_dyn_tracefs(d_tracer);
7941 ftrace_profile_tracefs(d_tracer);
7942}
7943
a2bb6a3d 7944/**
81adbdc0 7945 * ftrace_kill - kill ftrace
a2bb6a3d
SR
7946 *
7947 * This function should be used by panic code. It stops ftrace
7948 * but in a not so nice way. If you need to simply kill ftrace
7949 * from a non-atomic section, use ftrace_kill.
7950 */
81adbdc0 7951void ftrace_kill(void)
a2bb6a3d
SR
7952{
7953 ftrace_disabled = 1;
7954 ftrace_enabled = 0;
5ccba64a 7955 ftrace_trace_function = ftrace_stub;
a2bb6a3d
SR
7956}
7957
e0a413f6 7958/**
6130722f
SRV
7959 * ftrace_is_dead - Test if ftrace is dead or not.
7960 *
7961 * Returns 1 if ftrace is "dead", zero otherwise.
e0a413f6
SR
7962 */
7963int ftrace_is_dead(void)
7964{
7965 return ftrace_disabled;
7966}
7967
16444a8a 7968/**
3d083395 7969 * register_ftrace_function - register a function for profiling
78cbc651 7970 * @ops: ops structure that holds the function for profiling.
16444a8a 7971 *
3d083395
SR
7972 * Register a function to be called by all functions in the
7973 * kernel.
7974 *
7975 * Note: @ops->func and all the functions it calls must be labeled
7976 * with "notrace", otherwise it will go into a
7977 * recursive loop.
16444a8a 7978 */
3d083395 7979int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 7980{
3b1a8f45 7981 int ret;
4eebcc81 7982
f04f24fb
MH
7983 ftrace_ops_init(ops);
7984
e6ea44e9 7985 mutex_lock(&ftrace_lock);
e7d3737e 7986
8a56d776 7987 ret = ftrace_startup(ops, 0);
b848914c 7988
e6ea44e9 7989 mutex_unlock(&ftrace_lock);
8d240dd8 7990
b0fc494f 7991 return ret;
3d083395 7992}
cdbe61bf 7993EXPORT_SYMBOL_GPL(register_ftrace_function);
3d083395
SR
7994
7995/**
32632920 7996 * unregister_ftrace_function - unregister a function for profiling.
78cbc651 7997 * @ops: ops structure that holds the function to unregister
3d083395
SR
7998 *
7999 * Unregister a function that was added to be called by ftrace profiling.
8000 */
8001int unregister_ftrace_function(struct ftrace_ops *ops)
8002{
8003 int ret;
8004
e6ea44e9 8005 mutex_lock(&ftrace_lock);
8a56d776 8006 ret = ftrace_shutdown(ops, 0);
e6ea44e9 8007 mutex_unlock(&ftrace_lock);
b0fc494f
SR
8008
8009 return ret;
8010}
cdbe61bf 8011EXPORT_SYMBOL_GPL(unregister_ftrace_function);
b0fc494f 8012
bed0d9a5
JO
8013static int symbols_cmp(const void *a, const void *b)
8014{
8015 const char **str_a = (const char **) a;
8016 const char **str_b = (const char **) b;
8017
8018 return strcmp(*str_a, *str_b);
8019}
8020
8021struct kallsyms_data {
8022 unsigned long *addrs;
8023 const char **syms;
8024 size_t cnt;
8025 size_t found;
8026};
8027
8028static int kallsyms_callback(void *data, const char *name,
8029 struct module *mod, unsigned long addr)
8030{
8031 struct kallsyms_data *args = data;
eb1b2985
JO
8032 const char **sym;
8033 int idx;
bed0d9a5 8034
eb1b2985
JO
8035 sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp);
8036 if (!sym)
8037 return 0;
8038
8039 idx = sym - args->syms;
8040 if (args->addrs[idx])
bed0d9a5
JO
8041 return 0;
8042
8043 addr = ftrace_location(addr);
8044 if (!addr)
8045 return 0;
8046
eb1b2985
JO
8047 args->addrs[idx] = addr;
8048 args->found++;
bed0d9a5
JO
8049 return args->found == args->cnt ? 1 : 0;
8050}
8051
8052/**
8053 * ftrace_lookup_symbols - Lookup addresses for array of symbols
8054 *
8055 * @sorted_syms: array of symbols pointers symbols to resolve,
8056 * must be alphabetically sorted
8057 * @cnt: number of symbols/addresses in @syms/@addrs arrays
8058 * @addrs: array for storing resulting addresses
8059 *
8060 * This function looks up addresses for array of symbols provided in
8061 * @syms array (must be alphabetically sorted) and stores them in
8062 * @addrs array, which needs to be big enough to store at least @cnt
8063 * addresses.
8064 *
8065 * This function returns 0 if all provided symbols are found,
8066 * -ESRCH otherwise.
8067 */
8068int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
8069{
8070 struct kallsyms_data args;
8071 int err;
8072
eb1b2985 8073 memset(addrs, 0, sizeof(*addrs) * cnt);
bed0d9a5
JO
8074 args.addrs = addrs;
8075 args.syms = sorted_syms;
8076 args.cnt = cnt;
8077 args.found = 0;
8078 err = kallsyms_on_each_symbol(kallsyms_callback, &args);
8079 if (err < 0)
8080 return err;
8081 return args.found == args.cnt ? 0 : -ESRCH;
8082}
44d35720 8083
5d79fa0d 8084#ifdef CONFIG_SYSCTL
8fd7c214
LC
8085
8086#ifdef CONFIG_DYNAMIC_FTRACE
f8b7d2b4
LC
8087static void ftrace_startup_sysctl(void)
8088{
8089 int command;
8090
8091 if (unlikely(ftrace_disabled))
8092 return;
8093
8094 /* Force update next time */
8095 saved_ftrace_func = NULL;
8096 /* ftrace_start_up is true if we want ftrace running */
8097 if (ftrace_start_up) {
8098 command = FTRACE_UPDATE_CALLS;
8099 if (ftrace_graph_active)
8100 command |= FTRACE_START_FUNC_RET;
8101 ftrace_startup_enable(command);
8102 }
8103}
8104
8105static void ftrace_shutdown_sysctl(void)
8106{
8107 int command;
8108
8109 if (unlikely(ftrace_disabled))
8110 return;
8111
8112 /* ftrace_start_up is true if ftrace is running */
8113 if (ftrace_start_up) {
8114 command = FTRACE_DISABLE_CALLS;
8115 if (ftrace_graph_active)
8116 command |= FTRACE_STOP_FUNC_RET;
8117 ftrace_run_update_code(command);
8118 }
8119}
8fd7c214
LC
8120#else
8121# define ftrace_startup_sysctl() do { } while (0)
8122# define ftrace_shutdown_sysctl() do { } while (0)
8123#endif /* CONFIG_DYNAMIC_FTRACE */
f8b7d2b4 8124
7162431d
MB
8125static bool is_permanent_ops_registered(void)
8126{
8127 struct ftrace_ops *op;
8128
8129 do_for_each_ftrace_op(op, ftrace_ops_list) {
8130 if (op->flags & FTRACE_OPS_FL_PERMANENT)
8131 return true;
8132 } while_for_each_ftrace_op(op);
8133
8134 return false;
8135}
8136
8e4e83b2 8137static int
b0fc494f 8138ftrace_enable_sysctl(struct ctl_table *table, int write,
54fa9ba5 8139 void *buffer, size_t *lenp, loff_t *ppos)
b0fc494f 8140{
45a4a237 8141 int ret = -ENODEV;
4eebcc81 8142
e6ea44e9 8143 mutex_lock(&ftrace_lock);
b0fc494f 8144
45a4a237
SR
8145 if (unlikely(ftrace_disabled))
8146 goto out;
8147
8148 ret = proc_dointvec(table, write, buffer, lenp, ppos);
b0fc494f 8149
a32c7765 8150 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
b0fc494f
SR
8151 goto out;
8152
b0fc494f
SR
8153 if (ftrace_enabled) {
8154
b0fc494f 8155 /* we are starting ftrace again */
f86f4180
CZ
8156 if (rcu_dereference_protected(ftrace_ops_list,
8157 lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
5000c418 8158 update_ftrace_function();
b0fc494f 8159
524a3868
SRRH
8160 ftrace_startup_sysctl();
8161
b0fc494f 8162 } else {
7162431d
MB
8163 if (is_permanent_ops_registered()) {
8164 ftrace_enabled = true;
8165 ret = -EBUSY;
8166 goto out;
8167 }
8168
b0fc494f
SR
8169 /* stopping ftrace calls (just send to ftrace_stub) */
8170 ftrace_trace_function = ftrace_stub;
8171
8172 ftrace_shutdown_sysctl();
8173 }
8174
7162431d 8175 last_ftrace_enabled = !!ftrace_enabled;
b0fc494f 8176 out:
e6ea44e9 8177 mutex_unlock(&ftrace_lock);
3d083395 8178 return ret;
16444a8a 8179}
8e4e83b2
WX
8180
8181static struct ctl_table ftrace_sysctls[] = {
8182 {
8183 .procname = "ftrace_enabled",
8184 .data = &ftrace_enabled,
8185 .maxlen = sizeof(int),
8186 .mode = 0644,
8187 .proc_handler = ftrace_enable_sysctl,
8188 },
8189 {}
8190};
8191
8192static int __init ftrace_sysctl_init(void)
8193{
8194 register_sysctl_init("kernel", ftrace_sysctls);
8195 return 0;
8196}
8197late_initcall(ftrace_sysctl_init);
8198#endif