ftrace: Fix hash record accounting bug
[linux-2.6-block.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f 19#include <linux/seq_file.h>
4a2b8dda 20#include <linux/suspend.h>
5072c59f 21#include <linux/debugfs.h>
3d083395 22#include <linux/hardirq.h>
2d8b820b 23#include <linux/kthread.h>
5072c59f 24#include <linux/uaccess.h>
2d8b820b 25#include <linux/ftrace.h>
b0fc494f 26#include <linux/sysctl.h>
5a0e3ad6 27#include <linux/slab.h>
5072c59f 28#include <linux/ctype.h>
3d083395 29#include <linux/list.h>
59df055f 30#include <linux/hash.h>
3f379b03 31#include <linux/rcupdate.h>
3d083395 32
ad8d75ff 33#include <trace/events/sched.h>
8aef2d28 34
2af15d6a 35#include <asm/setup.h>
395a59d0 36
0706f1c4 37#include "trace_output.h"
bac429f0 38#include "trace_stat.h"
16444a8a 39
6912896e 40#define FTRACE_WARN_ON(cond) \
0778d9ad
SR
41 ({ \
42 int ___r = cond; \
43 if (WARN_ON(___r)) \
6912896e 44 ftrace_kill(); \
0778d9ad
SR
45 ___r; \
46 })
6912896e
SR
47
48#define FTRACE_WARN_ON_ONCE(cond) \
0778d9ad
SR
49 ({ \
50 int ___r = cond; \
51 if (WARN_ON_ONCE(___r)) \
6912896e 52 ftrace_kill(); \
0778d9ad
SR
53 ___r; \
54 })
6912896e 55
8fc0c701
SR
56/* hash bits for specific function selection */
57#define FTRACE_HASH_BITS 7
58#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
33dc9b12
SR
59#define FTRACE_HASH_DEFAULT_BITS 10
60#define FTRACE_HASH_MAX_BITS 12
8fc0c701 61
4eebcc81
SR
62/* ftrace_enabled is a method to turn ftrace on or off */
63int ftrace_enabled __read_mostly;
d61f82d0 64static int last_ftrace_enabled;
b0fc494f 65
60a7ecf4
SR
66/* Quick disabling of function tracer. */
67int function_trace_stop;
68
756d17ee 69/* List for set_ftrace_pid's pids. */
70LIST_HEAD(ftrace_pids);
71struct ftrace_pid {
72 struct list_head list;
73 struct pid *pid;
74};
75
4eebcc81
SR
76/*
77 * ftrace_disabled is set when an anomaly is discovered.
78 * ftrace_disabled is much stronger than ftrace_enabled.
79 */
80static int ftrace_disabled __read_mostly;
81
52baf119 82static DEFINE_MUTEX(ftrace_lock);
b0fc494f 83
bd38c0e6 84static struct ftrace_ops ftrace_list_end __read_mostly = {
fb9fb015 85 .func = ftrace_stub,
16444a8a
ACM
86};
87
b848914c
SR
88static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
89static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
16444a8a 90ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
6331c28c 91static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
60a7ecf4 92ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
df4fc315 93ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
2b499381 94static struct ftrace_ops global_ops;
16444a8a 95
b848914c
SR
96static void
97ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
98
3f379b03 99/*
b848914c 100 * Traverse the ftrace_global_list, invoking all entries. The reason that we
3f379b03
PM
101 * can use rcu_dereference_raw() is that elements removed from this list
102 * are simply leaked, so there is no need to interact with a grace-period
103 * mechanism. The rcu_dereference_raw() calls are needed to handle
b848914c 104 * concurrent insertions into the ftrace_global_list.
3f379b03
PM
105 *
106 * Silly Alpha and silly pointer-speculation compiler optimizations!
107 */
b848914c
SR
108static void ftrace_global_list_func(unsigned long ip,
109 unsigned long parent_ip)
16444a8a 110{
b1cff0ad
SR
111 struct ftrace_ops *op;
112
113 if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
114 return;
16444a8a 115
b1cff0ad
SR
116 trace_recursion_set(TRACE_GLOBAL_BIT);
117 op = rcu_dereference_raw(ftrace_global_list); /*see above*/
16444a8a 118 while (op != &ftrace_list_end) {
16444a8a 119 op->func(ip, parent_ip);
3f379b03 120 op = rcu_dereference_raw(op->next); /*see above*/
16444a8a 121 };
b1cff0ad 122 trace_recursion_clear(TRACE_GLOBAL_BIT);
16444a8a
ACM
123}
124
df4fc315
SR
125static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
126{
0ef8cde5 127 if (!test_tsk_trace_trace(current))
df4fc315
SR
128 return;
129
130 ftrace_pid_function(ip, parent_ip);
131}
132
133static void set_ftrace_pid_function(ftrace_func_t func)
134{
135 /* do not set ftrace_pid_function to itself! */
136 if (func != ftrace_pid_func)
137 ftrace_pid_function = func;
138}
139
16444a8a 140/**
3d083395 141 * clear_ftrace_function - reset the ftrace function
16444a8a 142 *
3d083395
SR
143 * This NULLs the ftrace function and in essence stops
144 * tracing. There may be lag
16444a8a 145 */
3d083395 146void clear_ftrace_function(void)
16444a8a 147{
3d083395 148 ftrace_trace_function = ftrace_stub;
60a7ecf4 149 __ftrace_trace_function = ftrace_stub;
6331c28c 150 __ftrace_trace_function_delay = ftrace_stub;
df4fc315 151 ftrace_pid_function = ftrace_stub;
3d083395
SR
152}
153
60a7ecf4
SR
154#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
155/*
156 * For those archs that do not test ftrace_trace_stop in their
157 * mcount call site, we need to do it from C.
158 */
159static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
160{
161 if (function_trace_stop)
162 return;
163
164 __ftrace_trace_function(ip, parent_ip);
165}
166#endif
167
2b499381 168static void update_global_ops(void)
491d0dcf
SR
169{
170 ftrace_func_t func;
171
172 /*
173 * If there's only one function registered, then call that
174 * function directly. Otherwise, we need to iterate over the
175 * registered callers.
176 */
b848914c
SR
177 if (ftrace_global_list == &ftrace_list_end ||
178 ftrace_global_list->next == &ftrace_list_end)
179 func = ftrace_global_list->func;
491d0dcf 180 else
b848914c 181 func = ftrace_global_list_func;
491d0dcf
SR
182
183 /* If we filter on pids, update to use the pid function */
184 if (!list_empty(&ftrace_pids)) {
185 set_ftrace_pid_function(func);
186 func = ftrace_pid_func;
187 }
2b499381
SR
188
189 global_ops.func = func;
190}
191
192static void update_ftrace_function(void)
193{
194 ftrace_func_t func;
195
196 update_global_ops();
197
cdbe61bf
SR
198 /*
199 * If we are at the end of the list and this ops is
200 * not dynamic, then have the mcount trampoline call
201 * the function directly
202 */
b848914c 203 if (ftrace_ops_list == &ftrace_list_end ||
cdbe61bf
SR
204 (ftrace_ops_list->next == &ftrace_list_end &&
205 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
b848914c
SR
206 func = ftrace_ops_list->func;
207 else
208 func = ftrace_ops_list_func;
2b499381 209
491d0dcf
SR
210#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
211 ftrace_trace_function = func;
6331c28c
SR
212#else
213#ifdef CONFIG_DYNAMIC_FTRACE
214 /* do not update till all functions have been modified */
215 __ftrace_trace_function_delay = func;
491d0dcf
SR
216#else
217 __ftrace_trace_function = func;
6331c28c 218#endif
491d0dcf
SR
219 ftrace_trace_function = ftrace_test_stop_func;
220#endif
221}
222
2b499381 223static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
3d083395 224{
2b499381 225 ops->next = *list;
16444a8a 226 /*
b848914c 227 * We are entering ops into the list but another
16444a8a
ACM
228 * CPU might be walking that list. We need to make sure
229 * the ops->next pointer is valid before another CPU sees
b848914c 230 * the ops pointer included into the list.
16444a8a 231 */
2b499381 232 rcu_assign_pointer(*list, ops);
16444a8a
ACM
233}
234
2b499381 235static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
16444a8a 236{
16444a8a 237 struct ftrace_ops **p;
16444a8a
ACM
238
239 /*
3d083395
SR
240 * If we are removing the last function, then simply point
241 * to the ftrace_stub.
16444a8a 242 */
2b499381
SR
243 if (*list == ops && ops->next == &ftrace_list_end) {
244 *list = &ftrace_list_end;
e6ea44e9 245 return 0;
16444a8a
ACM
246 }
247
2b499381 248 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
16444a8a
ACM
249 if (*p == ops)
250 break;
251
e6ea44e9
SR
252 if (*p != ops)
253 return -1;
16444a8a
ACM
254
255 *p = (*p)->next;
2b499381
SR
256 return 0;
257}
16444a8a 258
2b499381
SR
259static int __register_ftrace_function(struct ftrace_ops *ops)
260{
261 if (ftrace_disabled)
262 return -ENODEV;
263
264 if (FTRACE_WARN_ON(ops == &global_ops))
265 return -EINVAL;
266
b848914c
SR
267 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
268 return -EBUSY;
269
cdbe61bf
SR
270 if (!core_kernel_data((unsigned long)ops))
271 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
272
b848914c
SR
273 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
274 int first = ftrace_global_list == &ftrace_list_end;
275 add_ftrace_ops(&ftrace_global_list, ops);
276 ops->flags |= FTRACE_OPS_FL_ENABLED;
277 if (first)
278 add_ftrace_ops(&ftrace_ops_list, &global_ops);
279 } else
280 add_ftrace_ops(&ftrace_ops_list, ops);
281
2b499381
SR
282 if (ftrace_enabled)
283 update_ftrace_function();
284
285 return 0;
286}
287
288static int __unregister_ftrace_function(struct ftrace_ops *ops)
289{
290 int ret;
291
292 if (ftrace_disabled)
293 return -ENODEV;
294
b848914c
SR
295 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
296 return -EBUSY;
297
2b499381
SR
298 if (FTRACE_WARN_ON(ops == &global_ops))
299 return -EINVAL;
300
b848914c
SR
301 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
302 ret = remove_ftrace_ops(&ftrace_global_list, ops);
303 if (!ret && ftrace_global_list == &ftrace_list_end)
304 ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
305 if (!ret)
306 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
307 } else
308 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
309
2b499381
SR
310 if (ret < 0)
311 return ret;
b848914c 312
491d0dcf
SR
313 if (ftrace_enabled)
314 update_ftrace_function();
16444a8a 315
cdbe61bf
SR
316 /*
317 * Dynamic ops may be freed, we must make sure that all
318 * callers are done before leaving this function.
319 */
320 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
321 synchronize_sched();
322
e6ea44e9 323 return 0;
3d083395
SR
324}
325
df4fc315
SR
326static void ftrace_update_pid_func(void)
327{
491d0dcf 328 /* Only do something if we are tracing something */
df4fc315 329 if (ftrace_trace_function == ftrace_stub)
10dd3ebe 330 return;
df4fc315 331
491d0dcf 332 update_ftrace_function();
df4fc315
SR
333}
334
493762fc
SR
335#ifdef CONFIG_FUNCTION_PROFILER
336struct ftrace_profile {
337 struct hlist_node node;
338 unsigned long ip;
339 unsigned long counter;
0706f1c4
SR
340#ifdef CONFIG_FUNCTION_GRAPH_TRACER
341 unsigned long long time;
e330b3bc 342 unsigned long long time_squared;
0706f1c4 343#endif
8fc0c701
SR
344};
345
493762fc
SR
346struct ftrace_profile_page {
347 struct ftrace_profile_page *next;
348 unsigned long index;
349 struct ftrace_profile records[];
d61f82d0
SR
350};
351
cafb168a
SR
352struct ftrace_profile_stat {
353 atomic_t disabled;
354 struct hlist_head *hash;
355 struct ftrace_profile_page *pages;
356 struct ftrace_profile_page *start;
357 struct tracer_stat stat;
358};
359
493762fc
SR
360#define PROFILE_RECORDS_SIZE \
361 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
5072c59f 362
493762fc
SR
363#define PROFILES_PER_PAGE \
364 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
3d083395 365
fb9fb015
SR
366static int ftrace_profile_bits __read_mostly;
367static int ftrace_profile_enabled __read_mostly;
368
369/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
bac429f0
SR
370static DEFINE_MUTEX(ftrace_profile_lock);
371
cafb168a 372static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
493762fc
SR
373
374#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
375
bac429f0
SR
376static void *
377function_stat_next(void *v, int idx)
378{
493762fc
SR
379 struct ftrace_profile *rec = v;
380 struct ftrace_profile_page *pg;
bac429f0 381
493762fc 382 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
bac429f0
SR
383
384 again:
0296e425
LZ
385 if (idx != 0)
386 rec++;
387
bac429f0
SR
388 if ((void *)rec >= (void *)&pg->records[pg->index]) {
389 pg = pg->next;
390 if (!pg)
391 return NULL;
392 rec = &pg->records[0];
493762fc
SR
393 if (!rec->counter)
394 goto again;
bac429f0
SR
395 }
396
bac429f0
SR
397 return rec;
398}
399
400static void *function_stat_start(struct tracer_stat *trace)
401{
cafb168a
SR
402 struct ftrace_profile_stat *stat =
403 container_of(trace, struct ftrace_profile_stat, stat);
404
405 if (!stat || !stat->start)
406 return NULL;
407
408 return function_stat_next(&stat->start->records[0], 0);
bac429f0
SR
409}
410
0706f1c4
SR
411#ifdef CONFIG_FUNCTION_GRAPH_TRACER
412/* function graph compares on total time */
413static int function_stat_cmp(void *p1, void *p2)
414{
415 struct ftrace_profile *a = p1;
416 struct ftrace_profile *b = p2;
417
418 if (a->time < b->time)
419 return -1;
420 if (a->time > b->time)
421 return 1;
422 else
423 return 0;
424}
425#else
426/* not function graph compares against hits */
bac429f0
SR
427static int function_stat_cmp(void *p1, void *p2)
428{
493762fc
SR
429 struct ftrace_profile *a = p1;
430 struct ftrace_profile *b = p2;
bac429f0
SR
431
432 if (a->counter < b->counter)
433 return -1;
434 if (a->counter > b->counter)
435 return 1;
436 else
437 return 0;
438}
0706f1c4 439#endif
bac429f0
SR
440
441static int function_stat_headers(struct seq_file *m)
442{
0706f1c4 443#ifdef CONFIG_FUNCTION_GRAPH_TRACER
34886c8b 444 seq_printf(m, " Function "
e330b3bc 445 "Hit Time Avg s^2\n"
34886c8b 446 " -------- "
e330b3bc 447 "--- ---- --- ---\n");
0706f1c4 448#else
bac429f0
SR
449 seq_printf(m, " Function Hit\n"
450 " -------- ---\n");
0706f1c4 451#endif
bac429f0
SR
452 return 0;
453}
454
455static int function_stat_show(struct seq_file *m, void *v)
456{
493762fc 457 struct ftrace_profile *rec = v;
bac429f0 458 char str[KSYM_SYMBOL_LEN];
3aaba20f 459 int ret = 0;
0706f1c4 460#ifdef CONFIG_FUNCTION_GRAPH_TRACER
34886c8b
SR
461 static struct trace_seq s;
462 unsigned long long avg;
e330b3bc 463 unsigned long long stddev;
0706f1c4 464#endif
3aaba20f
LZ
465 mutex_lock(&ftrace_profile_lock);
466
467 /* we raced with function_profile_reset() */
468 if (unlikely(rec->counter == 0)) {
469 ret = -EBUSY;
470 goto out;
471 }
bac429f0
SR
472
473 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
0706f1c4
SR
474 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
475
476#ifdef CONFIG_FUNCTION_GRAPH_TRACER
477 seq_printf(m, " ");
34886c8b
SR
478 avg = rec->time;
479 do_div(avg, rec->counter);
480
e330b3bc
CD
481 /* Sample standard deviation (s^2) */
482 if (rec->counter <= 1)
483 stddev = 0;
484 else {
485 stddev = rec->time_squared - rec->counter * avg * avg;
486 /*
487 * Divide only 1000 for ns^2 -> us^2 conversion.
488 * trace_print_graph_duration will divide 1000 again.
489 */
490 do_div(stddev, (rec->counter - 1) * 1000);
491 }
492
34886c8b
SR
493 trace_seq_init(&s);
494 trace_print_graph_duration(rec->time, &s);
495 trace_seq_puts(&s, " ");
496 trace_print_graph_duration(avg, &s);
e330b3bc
CD
497 trace_seq_puts(&s, " ");
498 trace_print_graph_duration(stddev, &s);
0706f1c4 499 trace_print_seq(m, &s);
0706f1c4
SR
500#endif
501 seq_putc(m, '\n');
3aaba20f
LZ
502out:
503 mutex_unlock(&ftrace_profile_lock);
bac429f0 504
3aaba20f 505 return ret;
bac429f0
SR
506}
507
cafb168a 508static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
bac429f0 509{
493762fc 510 struct ftrace_profile_page *pg;
bac429f0 511
cafb168a 512 pg = stat->pages = stat->start;
bac429f0 513
493762fc
SR
514 while (pg) {
515 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
516 pg->index = 0;
517 pg = pg->next;
bac429f0
SR
518 }
519
cafb168a 520 memset(stat->hash, 0,
493762fc
SR
521 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
522}
bac429f0 523
cafb168a 524int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
493762fc
SR
525{
526 struct ftrace_profile_page *pg;
318e0a73
SR
527 int functions;
528 int pages;
493762fc 529 int i;
bac429f0 530
493762fc 531 /* If we already allocated, do nothing */
cafb168a 532 if (stat->pages)
493762fc 533 return 0;
bac429f0 534
cafb168a
SR
535 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
536 if (!stat->pages)
493762fc 537 return -ENOMEM;
bac429f0 538
318e0a73
SR
539#ifdef CONFIG_DYNAMIC_FTRACE
540 functions = ftrace_update_tot_cnt;
541#else
542 /*
543 * We do not know the number of functions that exist because
544 * dynamic tracing is what counts them. With past experience
545 * we have around 20K functions. That should be more than enough.
546 * It is highly unlikely we will execute every function in
547 * the kernel.
548 */
549 functions = 20000;
550#endif
551
cafb168a 552 pg = stat->start = stat->pages;
bac429f0 553
318e0a73
SR
554 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
555
556 for (i = 0; i < pages; i++) {
493762fc 557 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
493762fc 558 if (!pg->next)
318e0a73 559 goto out_free;
493762fc
SR
560 pg = pg->next;
561 }
562
563 return 0;
318e0a73
SR
564
565 out_free:
566 pg = stat->start;
567 while (pg) {
568 unsigned long tmp = (unsigned long)pg;
569
570 pg = pg->next;
571 free_page(tmp);
572 }
573
574 free_page((unsigned long)stat->pages);
575 stat->pages = NULL;
576 stat->start = NULL;
577
578 return -ENOMEM;
bac429f0
SR
579}
580
cafb168a 581static int ftrace_profile_init_cpu(int cpu)
bac429f0 582{
cafb168a 583 struct ftrace_profile_stat *stat;
493762fc 584 int size;
bac429f0 585
cafb168a
SR
586 stat = &per_cpu(ftrace_profile_stats, cpu);
587
588 if (stat->hash) {
493762fc 589 /* If the profile is already created, simply reset it */
cafb168a 590 ftrace_profile_reset(stat);
493762fc
SR
591 return 0;
592 }
bac429f0 593
493762fc
SR
594 /*
595 * We are profiling all functions, but usually only a few thousand
596 * functions are hit. We'll make a hash of 1024 items.
597 */
598 size = FTRACE_PROFILE_HASH_SIZE;
bac429f0 599
cafb168a 600 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
493762fc 601
cafb168a 602 if (!stat->hash)
493762fc
SR
603 return -ENOMEM;
604
cafb168a
SR
605 if (!ftrace_profile_bits) {
606 size--;
493762fc 607
cafb168a
SR
608 for (; size; size >>= 1)
609 ftrace_profile_bits++;
610 }
493762fc 611
318e0a73 612 /* Preallocate the function profiling pages */
cafb168a
SR
613 if (ftrace_profile_pages_init(stat) < 0) {
614 kfree(stat->hash);
615 stat->hash = NULL;
493762fc
SR
616 return -ENOMEM;
617 }
618
619 return 0;
bac429f0
SR
620}
621
cafb168a
SR
622static int ftrace_profile_init(void)
623{
624 int cpu;
625 int ret = 0;
626
627 for_each_online_cpu(cpu) {
628 ret = ftrace_profile_init_cpu(cpu);
629 if (ret)
630 break;
631 }
632
633 return ret;
634}
635
493762fc 636/* interrupts must be disabled */
cafb168a
SR
637static struct ftrace_profile *
638ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
bac429f0 639{
493762fc 640 struct ftrace_profile *rec;
bac429f0
SR
641 struct hlist_head *hhd;
642 struct hlist_node *n;
bac429f0
SR
643 unsigned long key;
644
bac429f0 645 key = hash_long(ip, ftrace_profile_bits);
cafb168a 646 hhd = &stat->hash[key];
bac429f0
SR
647
648 if (hlist_empty(hhd))
649 return NULL;
650
bac429f0
SR
651 hlist_for_each_entry_rcu(rec, n, hhd, node) {
652 if (rec->ip == ip)
493762fc
SR
653 return rec;
654 }
655
656 return NULL;
657}
658
cafb168a
SR
659static void ftrace_add_profile(struct ftrace_profile_stat *stat,
660 struct ftrace_profile *rec)
493762fc
SR
661{
662 unsigned long key;
663
664 key = hash_long(rec->ip, ftrace_profile_bits);
cafb168a 665 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
493762fc
SR
666}
667
318e0a73
SR
668/*
669 * The memory is already allocated, this simply finds a new record to use.
670 */
493762fc 671static struct ftrace_profile *
318e0a73 672ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
493762fc
SR
673{
674 struct ftrace_profile *rec = NULL;
675
318e0a73 676 /* prevent recursion (from NMIs) */
cafb168a 677 if (atomic_inc_return(&stat->disabled) != 1)
493762fc
SR
678 goto out;
679
493762fc 680 /*
318e0a73
SR
681 * Try to find the function again since an NMI
682 * could have added it
493762fc 683 */
cafb168a 684 rec = ftrace_find_profiled_func(stat, ip);
493762fc 685 if (rec)
cafb168a 686 goto out;
493762fc 687
cafb168a
SR
688 if (stat->pages->index == PROFILES_PER_PAGE) {
689 if (!stat->pages->next)
690 goto out;
691 stat->pages = stat->pages->next;
bac429f0 692 }
493762fc 693
cafb168a 694 rec = &stat->pages->records[stat->pages->index++];
493762fc 695 rec->ip = ip;
cafb168a 696 ftrace_add_profile(stat, rec);
493762fc 697
bac429f0 698 out:
cafb168a 699 atomic_dec(&stat->disabled);
bac429f0
SR
700
701 return rec;
702}
703
704static void
705function_profile_call(unsigned long ip, unsigned long parent_ip)
706{
cafb168a 707 struct ftrace_profile_stat *stat;
493762fc 708 struct ftrace_profile *rec;
bac429f0
SR
709 unsigned long flags;
710
711 if (!ftrace_profile_enabled)
712 return;
713
714 local_irq_save(flags);
cafb168a
SR
715
716 stat = &__get_cpu_var(ftrace_profile_stats);
0f6ce3de 717 if (!stat->hash || !ftrace_profile_enabled)
cafb168a
SR
718 goto out;
719
720 rec = ftrace_find_profiled_func(stat, ip);
493762fc 721 if (!rec) {
318e0a73 722 rec = ftrace_profile_alloc(stat, ip);
493762fc
SR
723 if (!rec)
724 goto out;
725 }
bac429f0
SR
726
727 rec->counter++;
728 out:
729 local_irq_restore(flags);
730}
731
0706f1c4
SR
732#ifdef CONFIG_FUNCTION_GRAPH_TRACER
733static int profile_graph_entry(struct ftrace_graph_ent *trace)
734{
735 function_profile_call(trace->func, 0);
736 return 1;
737}
738
739static void profile_graph_return(struct ftrace_graph_ret *trace)
740{
cafb168a 741 struct ftrace_profile_stat *stat;
a2a16d6a 742 unsigned long long calltime;
0706f1c4 743 struct ftrace_profile *rec;
cafb168a 744 unsigned long flags;
0706f1c4
SR
745
746 local_irq_save(flags);
cafb168a 747 stat = &__get_cpu_var(ftrace_profile_stats);
0f6ce3de 748 if (!stat->hash || !ftrace_profile_enabled)
cafb168a
SR
749 goto out;
750
37e44bc5
SR
751 /* If the calltime was zero'd ignore it */
752 if (!trace->calltime)
753 goto out;
754
a2a16d6a
SR
755 calltime = trace->rettime - trace->calltime;
756
757 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
758 int index;
759
760 index = trace->depth;
761
762 /* Append this call time to the parent time to subtract */
763 if (index)
764 current->ret_stack[index - 1].subtime += calltime;
765
766 if (current->ret_stack[index].subtime < calltime)
767 calltime -= current->ret_stack[index].subtime;
768 else
769 calltime = 0;
770 }
771
cafb168a 772 rec = ftrace_find_profiled_func(stat, trace->func);
e330b3bc 773 if (rec) {
a2a16d6a 774 rec->time += calltime;
e330b3bc
CD
775 rec->time_squared += calltime * calltime;
776 }
a2a16d6a 777
cafb168a 778 out:
0706f1c4
SR
779 local_irq_restore(flags);
780}
781
782static int register_ftrace_profiler(void)
783{
784 return register_ftrace_graph(&profile_graph_return,
785 &profile_graph_entry);
786}
787
788static void unregister_ftrace_profiler(void)
789{
790 unregister_ftrace_graph();
791}
792#else
bd38c0e6 793static struct ftrace_ops ftrace_profile_ops __read_mostly = {
fb9fb015 794 .func = function_profile_call,
bac429f0
SR
795};
796
0706f1c4
SR
797static int register_ftrace_profiler(void)
798{
799 return register_ftrace_function(&ftrace_profile_ops);
800}
801
802static void unregister_ftrace_profiler(void)
803{
804 unregister_ftrace_function(&ftrace_profile_ops);
805}
806#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
807
bac429f0
SR
808static ssize_t
809ftrace_profile_write(struct file *filp, const char __user *ubuf,
810 size_t cnt, loff_t *ppos)
811{
812 unsigned long val;
bac429f0
SR
813 int ret;
814
22fe9b54
PH
815 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
816 if (ret)
bac429f0
SR
817 return ret;
818
819 val = !!val;
820
821 mutex_lock(&ftrace_profile_lock);
822 if (ftrace_profile_enabled ^ val) {
823 if (val) {
493762fc
SR
824 ret = ftrace_profile_init();
825 if (ret < 0) {
826 cnt = ret;
827 goto out;
828 }
829
0706f1c4
SR
830 ret = register_ftrace_profiler();
831 if (ret < 0) {
832 cnt = ret;
833 goto out;
834 }
bac429f0
SR
835 ftrace_profile_enabled = 1;
836 } else {
837 ftrace_profile_enabled = 0;
0f6ce3de
SR
838 /*
839 * unregister_ftrace_profiler calls stop_machine
840 * so this acts like an synchronize_sched.
841 */
0706f1c4 842 unregister_ftrace_profiler();
bac429f0
SR
843 }
844 }
493762fc 845 out:
bac429f0
SR
846 mutex_unlock(&ftrace_profile_lock);
847
cf8517cf 848 *ppos += cnt;
bac429f0
SR
849
850 return cnt;
851}
852
493762fc
SR
853static ssize_t
854ftrace_profile_read(struct file *filp, char __user *ubuf,
855 size_t cnt, loff_t *ppos)
856{
fb9fb015 857 char buf[64]; /* big enough to hold a number */
493762fc
SR
858 int r;
859
860 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
861 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
862}
863
bac429f0
SR
864static const struct file_operations ftrace_profile_fops = {
865 .open = tracing_open_generic,
866 .read = ftrace_profile_read,
867 .write = ftrace_profile_write,
6038f373 868 .llseek = default_llseek,
bac429f0
SR
869};
870
cafb168a
SR
871/* used to initialize the real stat files */
872static struct tracer_stat function_stats __initdata = {
fb9fb015
SR
873 .name = "functions",
874 .stat_start = function_stat_start,
875 .stat_next = function_stat_next,
876 .stat_cmp = function_stat_cmp,
877 .stat_headers = function_stat_headers,
878 .stat_show = function_stat_show
cafb168a
SR
879};
880
6ab5d668 881static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
bac429f0 882{
cafb168a 883 struct ftrace_profile_stat *stat;
bac429f0 884 struct dentry *entry;
cafb168a 885 char *name;
bac429f0 886 int ret;
cafb168a
SR
887 int cpu;
888
889 for_each_possible_cpu(cpu) {
890 stat = &per_cpu(ftrace_profile_stats, cpu);
891
892 /* allocate enough for function name + cpu number */
893 name = kmalloc(32, GFP_KERNEL);
894 if (!name) {
895 /*
896 * The files created are permanent, if something happens
897 * we still do not free memory.
898 */
cafb168a
SR
899 WARN(1,
900 "Could not allocate stat file for cpu %d\n",
901 cpu);
902 return;
903 }
904 stat->stat = function_stats;
905 snprintf(name, 32, "function%d", cpu);
906 stat->stat.name = name;
907 ret = register_stat_tracer(&stat->stat);
908 if (ret) {
909 WARN(1,
910 "Could not register function stat for cpu %d\n",
911 cpu);
912 kfree(name);
913 return;
914 }
bac429f0
SR
915 }
916
917 entry = debugfs_create_file("function_profile_enabled", 0644,
918 d_tracer, NULL, &ftrace_profile_fops);
919 if (!entry)
920 pr_warning("Could not create debugfs "
921 "'function_profile_enabled' entry\n");
922}
923
bac429f0 924#else /* CONFIG_FUNCTION_PROFILER */
6ab5d668 925static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
bac429f0
SR
926{
927}
bac429f0
SR
928#endif /* CONFIG_FUNCTION_PROFILER */
929
493762fc
SR
930static struct pid * const ftrace_swapper_pid = &init_struct_pid;
931
932#ifdef CONFIG_DYNAMIC_FTRACE
933
934#ifndef CONFIG_FTRACE_MCOUNT_RECORD
935# error Dynamic ftrace depends on MCOUNT_RECORD
936#endif
937
938static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
939
940struct ftrace_func_probe {
941 struct hlist_node node;
942 struct ftrace_probe_ops *ops;
943 unsigned long flags;
944 unsigned long ip;
945 void *data;
946 struct rcu_head rcu;
947};
948
949enum {
950 FTRACE_ENABLE_CALLS = (1 << 0),
951 FTRACE_DISABLE_CALLS = (1 << 1),
952 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
79e406d7
SR
953 FTRACE_START_FUNC_RET = (1 << 3),
954 FTRACE_STOP_FUNC_RET = (1 << 4),
493762fc 955};
b448c4e3
SR
956struct ftrace_func_entry {
957 struct hlist_node hlist;
958 unsigned long ip;
959};
960
961struct ftrace_hash {
962 unsigned long size_bits;
963 struct hlist_head *buckets;
964 unsigned long count;
07fd5515 965 struct rcu_head rcu;
b448c4e3
SR
966};
967
33dc9b12
SR
968/*
969 * We make these constant because no one should touch them,
970 * but they are used as the default "empty hash", to avoid allocating
971 * it all the time. These are in a read only section such that if
972 * anyone does try to modify it, it will cause an exception.
973 */
974static const struct hlist_head empty_buckets[1];
975static const struct ftrace_hash empty_hash = {
976 .buckets = (struct hlist_head *)empty_buckets,
1cf41dd7 977};
33dc9b12 978#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
493762fc 979
2b499381 980static struct ftrace_ops global_ops = {
f45948e8 981 .func = ftrace_stub,
33dc9b12
SR
982 .notrace_hash = EMPTY_HASH,
983 .filter_hash = EMPTY_HASH,
f45948e8
SR
984};
985
493762fc
SR
986static struct dyn_ftrace *ftrace_new_addrs;
987
988static DEFINE_MUTEX(ftrace_regex_lock);
989
990struct ftrace_page {
991 struct ftrace_page *next;
992 int index;
993 struct dyn_ftrace records[];
994};
995
996#define ENTRIES_PER_PAGE \
997 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
998
999/* estimate from running different kernels */
1000#define NR_TO_INIT 10000
1001
1002static struct ftrace_page *ftrace_pages_start;
1003static struct ftrace_page *ftrace_pages;
1004
1005static struct dyn_ftrace *ftrace_free_records;
1006
b448c4e3
SR
1007static struct ftrace_func_entry *
1008ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1009{
1010 unsigned long key;
1011 struct ftrace_func_entry *entry;
1012 struct hlist_head *hhd;
1013 struct hlist_node *n;
1014
1015 if (!hash->count)
1016 return NULL;
1017
1018 if (hash->size_bits > 0)
1019 key = hash_long(ip, hash->size_bits);
1020 else
1021 key = 0;
1022
1023 hhd = &hash->buckets[key];
1024
1025 hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1026 if (entry->ip == ip)
1027 return entry;
1028 }
1029 return NULL;
1030}
1031
33dc9b12
SR
1032static void __add_hash_entry(struct ftrace_hash *hash,
1033 struct ftrace_func_entry *entry)
b448c4e3 1034{
b448c4e3
SR
1035 struct hlist_head *hhd;
1036 unsigned long key;
1037
b448c4e3 1038 if (hash->size_bits)
33dc9b12 1039 key = hash_long(entry->ip, hash->size_bits);
b448c4e3
SR
1040 else
1041 key = 0;
1042
b448c4e3
SR
1043 hhd = &hash->buckets[key];
1044 hlist_add_head(&entry->hlist, hhd);
1045 hash->count++;
33dc9b12
SR
1046}
1047
1048static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1049{
1050 struct ftrace_func_entry *entry;
1051
1052 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1053 if (!entry)
1054 return -ENOMEM;
1055
1056 entry->ip = ip;
1057 __add_hash_entry(hash, entry);
b448c4e3
SR
1058
1059 return 0;
1060}
1061
1062static void
33dc9b12 1063free_hash_entry(struct ftrace_hash *hash,
b448c4e3
SR
1064 struct ftrace_func_entry *entry)
1065{
1066 hlist_del(&entry->hlist);
1067 kfree(entry);
1068 hash->count--;
1069}
1070
33dc9b12
SR
1071static void
1072remove_hash_entry(struct ftrace_hash *hash,
1073 struct ftrace_func_entry *entry)
1074{
1075 hlist_del(&entry->hlist);
1076 hash->count--;
1077}
1078
b448c4e3
SR
1079static void ftrace_hash_clear(struct ftrace_hash *hash)
1080{
1081 struct hlist_head *hhd;
1082 struct hlist_node *tp, *tn;
1083 struct ftrace_func_entry *entry;
1084 int size = 1 << hash->size_bits;
1085 int i;
1086
33dc9b12
SR
1087 if (!hash->count)
1088 return;
1089
b448c4e3
SR
1090 for (i = 0; i < size; i++) {
1091 hhd = &hash->buckets[i];
1092 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
33dc9b12 1093 free_hash_entry(hash, entry);
b448c4e3
SR
1094 }
1095 FTRACE_WARN_ON(hash->count);
1096}
1097
33dc9b12
SR
1098static void free_ftrace_hash(struct ftrace_hash *hash)
1099{
1100 if (!hash || hash == EMPTY_HASH)
1101 return;
1102 ftrace_hash_clear(hash);
1103 kfree(hash->buckets);
1104 kfree(hash);
1105}
1106
07fd5515
SR
1107static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1108{
1109 struct ftrace_hash *hash;
1110
1111 hash = container_of(rcu, struct ftrace_hash, rcu);
1112 free_ftrace_hash(hash);
1113}
1114
1115static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1116{
1117 if (!hash || hash == EMPTY_HASH)
1118 return;
1119 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1120}
1121
33dc9b12
SR
1122static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1123{
1124 struct ftrace_hash *hash;
1125 int size;
1126
1127 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1128 if (!hash)
1129 return NULL;
1130
1131 size = 1 << size_bits;
1132 hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
1133
1134 if (!hash->buckets) {
1135 kfree(hash);
1136 return NULL;
1137 }
1138
1139 hash->size_bits = size_bits;
1140
1141 return hash;
1142}
1143
1144static struct ftrace_hash *
1145alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1146{
1147 struct ftrace_func_entry *entry;
1148 struct ftrace_hash *new_hash;
1149 struct hlist_node *tp;
1150 int size;
1151 int ret;
1152 int i;
1153
1154 new_hash = alloc_ftrace_hash(size_bits);
1155 if (!new_hash)
1156 return NULL;
1157
1158 /* Empty hash? */
1159 if (!hash || !hash->count)
1160 return new_hash;
1161
1162 size = 1 << hash->size_bits;
1163 for (i = 0; i < size; i++) {
1164 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1165 ret = add_hash_entry(new_hash, entry->ip);
1166 if (ret < 0)
1167 goto free_hash;
1168 }
1169 }
1170
1171 FTRACE_WARN_ON(new_hash->count != hash->count);
1172
1173 return new_hash;
1174
1175 free_hash:
1176 free_ftrace_hash(new_hash);
1177 return NULL;
1178}
1179
41fb61c2
SR
1180static void
1181ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1182static void
1183ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1184
33dc9b12 1185static int
41fb61c2
SR
1186ftrace_hash_move(struct ftrace_ops *ops, int enable,
1187 struct ftrace_hash **dst, struct ftrace_hash *src)
33dc9b12
SR
1188{
1189 struct ftrace_func_entry *entry;
1190 struct hlist_node *tp, *tn;
1191 struct hlist_head *hhd;
07fd5515
SR
1192 struct ftrace_hash *old_hash;
1193 struct ftrace_hash *new_hash;
33dc9b12
SR
1194 unsigned long key;
1195 int size = src->count;
1196 int bits = 0;
41fb61c2 1197 int ret;
33dc9b12
SR
1198 int i;
1199
41fb61c2
SR
1200 /*
1201 * Remove the current set, update the hash and add
1202 * them back.
1203 */
1204 ftrace_hash_rec_disable(ops, enable);
1205
33dc9b12
SR
1206 /*
1207 * If the new source is empty, just free dst and assign it
1208 * the empty_hash.
1209 */
1210 if (!src->count) {
07fd5515
SR
1211 free_ftrace_hash_rcu(*dst);
1212 rcu_assign_pointer(*dst, EMPTY_HASH);
d4d34b98
SR
1213 /* still need to update the function records */
1214 ret = 0;
1215 goto out;
33dc9b12
SR
1216 }
1217
33dc9b12
SR
1218 /*
1219 * Make the hash size about 1/2 the # found
1220 */
1221 for (size /= 2; size; size >>= 1)
1222 bits++;
1223
1224 /* Don't allocate too much */
1225 if (bits > FTRACE_HASH_MAX_BITS)
1226 bits = FTRACE_HASH_MAX_BITS;
1227
41fb61c2 1228 ret = -ENOMEM;
07fd5515
SR
1229 new_hash = alloc_ftrace_hash(bits);
1230 if (!new_hash)
41fb61c2 1231 goto out;
33dc9b12
SR
1232
1233 size = 1 << src->size_bits;
1234 for (i = 0; i < size; i++) {
1235 hhd = &src->buckets[i];
1236 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1237 if (bits > 0)
1238 key = hash_long(entry->ip, bits);
1239 else
1240 key = 0;
1241 remove_hash_entry(src, entry);
07fd5515 1242 __add_hash_entry(new_hash, entry);
33dc9b12
SR
1243 }
1244 }
1245
07fd5515
SR
1246 old_hash = *dst;
1247 rcu_assign_pointer(*dst, new_hash);
1248 free_ftrace_hash_rcu(old_hash);
1249
41fb61c2
SR
1250 ret = 0;
1251 out:
1252 /*
1253 * Enable regardless of ret:
1254 * On success, we enable the new hash.
1255 * On failure, we re-enable the original hash.
1256 */
1257 ftrace_hash_rec_enable(ops, enable);
1258
1259 return ret;
33dc9b12
SR
1260}
1261
b848914c
SR
1262/*
1263 * Test the hashes for this ops to see if we want to call
1264 * the ops->func or not.
1265 *
1266 * It's a match if the ip is in the ops->filter_hash or
1267 * the filter_hash does not exist or is empty,
1268 * AND
1269 * the ip is not in the ops->notrace_hash.
cdbe61bf
SR
1270 *
1271 * This needs to be called with preemption disabled as
1272 * the hashes are freed with call_rcu_sched().
b848914c
SR
1273 */
1274static int
1275ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1276{
1277 struct ftrace_hash *filter_hash;
1278 struct ftrace_hash *notrace_hash;
1279 int ret;
1280
b848914c
SR
1281 filter_hash = rcu_dereference_raw(ops->filter_hash);
1282 notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1283
1284 if ((!filter_hash || !filter_hash->count ||
1285 ftrace_lookup_ip(filter_hash, ip)) &&
1286 (!notrace_hash || !notrace_hash->count ||
1287 !ftrace_lookup_ip(notrace_hash, ip)))
1288 ret = 1;
1289 else
1290 ret = 0;
b848914c
SR
1291
1292 return ret;
1293}
1294
493762fc
SR
1295/*
1296 * This is a double for. Do not use 'break' to break out of the loop,
1297 * you must use a goto.
1298 */
1299#define do_for_each_ftrace_rec(pg, rec) \
1300 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1301 int _____i; \
1302 for (_____i = 0; _____i < pg->index; _____i++) { \
1303 rec = &pg->records[_____i];
1304
1305#define while_for_each_ftrace_rec() \
1306 } \
1307 }
1308
ed926f9b
SR
1309static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1310 int filter_hash,
1311 bool inc)
1312{
1313 struct ftrace_hash *hash;
1314 struct ftrace_hash *other_hash;
1315 struct ftrace_page *pg;
1316 struct dyn_ftrace *rec;
1317 int count = 0;
1318 int all = 0;
1319
1320 /* Only update if the ops has been registered */
1321 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1322 return;
1323
1324 /*
1325 * In the filter_hash case:
1326 * If the count is zero, we update all records.
1327 * Otherwise we just update the items in the hash.
1328 *
1329 * In the notrace_hash case:
1330 * We enable the update in the hash.
1331 * As disabling notrace means enabling the tracing,
1332 * and enabling notrace means disabling, the inc variable
1333 * gets inversed.
1334 */
1335 if (filter_hash) {
1336 hash = ops->filter_hash;
1337 other_hash = ops->notrace_hash;
b848914c 1338 if (!hash || !hash->count)
ed926f9b
SR
1339 all = 1;
1340 } else {
1341 inc = !inc;
1342 hash = ops->notrace_hash;
1343 other_hash = ops->filter_hash;
1344 /*
1345 * If the notrace hash has no items,
1346 * then there's nothing to do.
1347 */
b848914c 1348 if (hash && !hash->count)
ed926f9b
SR
1349 return;
1350 }
1351
1352 do_for_each_ftrace_rec(pg, rec) {
1353 int in_other_hash = 0;
1354 int in_hash = 0;
1355 int match = 0;
1356
1357 if (all) {
1358 /*
1359 * Only the filter_hash affects all records.
1360 * Update if the record is not in the notrace hash.
1361 */
b848914c 1362 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
ed926f9b
SR
1363 match = 1;
1364 } else {
b848914c
SR
1365 in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip);
1366 in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip);
ed926f9b
SR
1367
1368 /*
1369 *
1370 */
1371 if (filter_hash && in_hash && !in_other_hash)
1372 match = 1;
1373 else if (!filter_hash && in_hash &&
1374 (in_other_hash || !other_hash->count))
1375 match = 1;
1376 }
1377 if (!match)
1378 continue;
1379
1380 if (inc) {
1381 rec->flags++;
1382 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1383 return;
1384 } else {
1385 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1386 return;
1387 rec->flags--;
1388 }
1389 count++;
1390 /* Shortcut, if we handled all records, we are done. */
1391 if (!all && count == hash->count)
1392 return;
1393 } while_for_each_ftrace_rec();
1394}
1395
1396static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1397 int filter_hash)
1398{
1399 __ftrace_hash_rec_update(ops, filter_hash, 0);
1400}
1401
1402static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1403 int filter_hash)
1404{
1405 __ftrace_hash_rec_update(ops, filter_hash, 1);
1406}
1407
e309b41d 1408static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084 1409{
ee000b7f 1410 rec->freelist = ftrace_free_records;
37ad5084
SR
1411 ftrace_free_records = rec;
1412 rec->flags |= FTRACE_FL_FREE;
1413}
1414
e309b41d 1415static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 1416{
37ad5084
SR
1417 struct dyn_ftrace *rec;
1418
1419 /* First check for freed records */
1420 if (ftrace_free_records) {
1421 rec = ftrace_free_records;
1422
37ad5084 1423 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
6912896e 1424 FTRACE_WARN_ON_ONCE(1);
37ad5084
SR
1425 ftrace_free_records = NULL;
1426 return NULL;
1427 }
1428
ee000b7f 1429 ftrace_free_records = rec->freelist;
37ad5084
SR
1430 memset(rec, 0, sizeof(*rec));
1431 return rec;
1432 }
1433
3c1720f0 1434 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
08f5ac90
SR
1435 if (!ftrace_pages->next) {
1436 /* allocate another page */
1437 ftrace_pages->next =
1438 (void *)get_zeroed_page(GFP_KERNEL);
1439 if (!ftrace_pages->next)
1440 return NULL;
1441 }
3c1720f0
SR
1442 ftrace_pages = ftrace_pages->next;
1443 }
1444
1445 return &ftrace_pages->records[ftrace_pages->index++];
1446}
1447
08f5ac90 1448static struct dyn_ftrace *
d61f82d0 1449ftrace_record_ip(unsigned long ip)
3d083395 1450{
08f5ac90 1451 struct dyn_ftrace *rec;
3d083395 1452
f3c7ac40 1453 if (ftrace_disabled)
08f5ac90 1454 return NULL;
3d083395 1455
08f5ac90
SR
1456 rec = ftrace_alloc_dyn_node(ip);
1457 if (!rec)
1458 return NULL;
3d083395 1459
08f5ac90 1460 rec->ip = ip;
ee000b7f 1461 rec->newlist = ftrace_new_addrs;
e94142a6 1462 ftrace_new_addrs = rec;
3d083395 1463
08f5ac90 1464 return rec;
3d083395
SR
1465}
1466
b17e8a37
SR
1467static void print_ip_ins(const char *fmt, unsigned char *p)
1468{
1469 int i;
1470
1471 printk(KERN_CONT "%s", fmt);
1472
1473 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1474 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1475}
1476
31e88909 1477static void ftrace_bug(int failed, unsigned long ip)
b17e8a37
SR
1478{
1479 switch (failed) {
1480 case -EFAULT:
1481 FTRACE_WARN_ON_ONCE(1);
1482 pr_info("ftrace faulted on modifying ");
1483 print_ip_sym(ip);
1484 break;
1485 case -EINVAL:
1486 FTRACE_WARN_ON_ONCE(1);
1487 pr_info("ftrace failed to modify ");
1488 print_ip_sym(ip);
b17e8a37 1489 print_ip_ins(" actual: ", (unsigned char *)ip);
b17e8a37
SR
1490 printk(KERN_CONT "\n");
1491 break;
1492 case -EPERM:
1493 FTRACE_WARN_ON_ONCE(1);
1494 pr_info("ftrace faulted on writing ");
1495 print_ip_sym(ip);
1496 break;
1497 default:
1498 FTRACE_WARN_ON_ONCE(1);
1499 pr_info("ftrace faulted on unknown error ");
1500 print_ip_sym(ip);
1501 }
1502}
1503
3c1720f0 1504
2cfa1978
MH
1505/* Return 1 if the address range is reserved for ftrace */
1506int ftrace_text_reserved(void *start, void *end)
1507{
1508 struct dyn_ftrace *rec;
1509 struct ftrace_page *pg;
1510
1511 do_for_each_ftrace_rec(pg, rec) {
1512 if (rec->ip <= (unsigned long)end &&
1513 rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1514 return 1;
1515 } while_for_each_ftrace_rec();
1516 return 0;
1517}
1518
1519
0eb96701 1520static int
31e88909 1521__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
5072c59f 1522{
e7d3737e 1523 unsigned long ftrace_addr;
64fbcd16 1524 unsigned long flag = 0UL;
e7d3737e 1525
f0001207 1526 ftrace_addr = (unsigned long)FTRACE_ADDR;
5072c59f 1527
982c350b 1528 /*
ed926f9b 1529 * If we are enabling tracing:
982c350b 1530 *
ed926f9b
SR
1531 * If the record has a ref count, then we need to enable it
1532 * because someone is using it.
982c350b 1533 *
ed926f9b
SR
1534 * Otherwise we make sure its disabled.
1535 *
1536 * If we are disabling tracing, then disable all records that
1537 * are enabled.
982c350b 1538 */
ed926f9b
SR
1539 if (enable && (rec->flags & ~FTRACE_FL_MASK))
1540 flag = FTRACE_FL_ENABLED;
982c350b 1541
64fbcd16
XG
1542 /* If the state of this record hasn't changed, then do nothing */
1543 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1544 return 0;
982c350b 1545
64fbcd16
XG
1546 if (flag) {
1547 rec->flags |= FTRACE_FL_ENABLED;
1548 return ftrace_make_call(rec, ftrace_addr);
5072c59f
SR
1549 }
1550
64fbcd16
XG
1551 rec->flags &= ~FTRACE_FL_ENABLED;
1552 return ftrace_make_nop(NULL, rec, ftrace_addr);
5072c59f
SR
1553}
1554
e309b41d 1555static void ftrace_replace_code(int enable)
3c1720f0 1556{
3c1720f0
SR
1557 struct dyn_ftrace *rec;
1558 struct ftrace_page *pg;
6a24a244 1559 int failed;
3c1720f0 1560
45a4a237
SR
1561 if (unlikely(ftrace_disabled))
1562 return;
1563
265c831c 1564 do_for_each_ftrace_rec(pg, rec) {
d2c8c3ea
SR
1565 /* Skip over free records */
1566 if (rec->flags & FTRACE_FL_FREE)
265c831c
SR
1567 continue;
1568
265c831c 1569 failed = __ftrace_replace_code(rec, enable);
fa9d13cf 1570 if (failed) {
3279ba37
SR
1571 ftrace_bug(failed, rec->ip);
1572 /* Stop processing */
1573 return;
3c1720f0 1574 }
265c831c 1575 } while_for_each_ftrace_rec();
3c1720f0
SR
1576}
1577
492a7ea5 1578static int
31e88909 1579ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
3c1720f0
SR
1580{
1581 unsigned long ip;
593eb8a2 1582 int ret;
3c1720f0
SR
1583
1584 ip = rec->ip;
1585
45a4a237
SR
1586 if (unlikely(ftrace_disabled))
1587 return 0;
1588
25aac9dc 1589 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
593eb8a2 1590 if (ret) {
31e88909 1591 ftrace_bug(ret, ip);
492a7ea5 1592 return 0;
37ad5084 1593 }
492a7ea5 1594 return 1;
3c1720f0
SR
1595}
1596
000ab691
SR
1597/*
1598 * archs can override this function if they must do something
1599 * before the modifying code is performed.
1600 */
1601int __weak ftrace_arch_code_modify_prepare(void)
1602{
1603 return 0;
1604}
1605
1606/*
1607 * archs can override this function if they must do something
1608 * after the modifying code is performed.
1609 */
1610int __weak ftrace_arch_code_modify_post_process(void)
1611{
1612 return 0;
1613}
1614
e309b41d 1615static int __ftrace_modify_code(void *data)
3d083395 1616{
d61f82d0
SR
1617 int *command = data;
1618
6331c28c
SR
1619 /*
1620 * Do not call function tracer while we update the code.
1621 * We are in stop machine, no worrying about races.
1622 */
1623 function_trace_stop++;
1624
a3583244 1625 if (*command & FTRACE_ENABLE_CALLS)
d61f82d0 1626 ftrace_replace_code(1);
a3583244 1627 else if (*command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
1628 ftrace_replace_code(0);
1629
1630 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1631 ftrace_update_ftrace_func(ftrace_trace_function);
1632
5a45cfe1
SR
1633 if (*command & FTRACE_START_FUNC_RET)
1634 ftrace_enable_ftrace_graph_caller();
1635 else if (*command & FTRACE_STOP_FUNC_RET)
1636 ftrace_disable_ftrace_graph_caller();
1637
6331c28c
SR
1638#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
1639 /*
1640 * For archs that call ftrace_test_stop_func(), we must
1641 * wait till after we update all the function callers
1642 * before we update the callback. This keeps different
1643 * ops that record different functions from corrupting
1644 * each other.
1645 */
1646 __ftrace_trace_function = __ftrace_trace_function_delay;
1647#endif
1648 function_trace_stop--;
1649
d61f82d0 1650 return 0;
3d083395
SR
1651}
1652
e309b41d 1653static void ftrace_run_update_code(int command)
3d083395 1654{
000ab691
SR
1655 int ret;
1656
1657 ret = ftrace_arch_code_modify_prepare();
1658 FTRACE_WARN_ON(ret);
1659 if (ret)
1660 return;
1661
784e2d76 1662 stop_machine(__ftrace_modify_code, &command, NULL);
000ab691
SR
1663
1664 ret = ftrace_arch_code_modify_post_process();
1665 FTRACE_WARN_ON(ret);
3d083395
SR
1666}
1667
d61f82d0 1668static ftrace_func_t saved_ftrace_func;
60a7ecf4 1669static int ftrace_start_up;
b848914c 1670static int global_start_up;
df4fc315
SR
1671
1672static void ftrace_startup_enable(int command)
1673{
1674 if (saved_ftrace_func != ftrace_trace_function) {
1675 saved_ftrace_func = ftrace_trace_function;
1676 command |= FTRACE_UPDATE_TRACE_FUNC;
1677 }
1678
1679 if (!command || !ftrace_enabled)
1680 return;
1681
1682 ftrace_run_update_code(command);
1683}
d61f82d0 1684
a1cd6173 1685static int ftrace_startup(struct ftrace_ops *ops, int command)
3d083395 1686{
b848914c
SR
1687 bool hash_enable = true;
1688
4eebcc81 1689 if (unlikely(ftrace_disabled))
a1cd6173 1690 return -ENODEV;
4eebcc81 1691
60a7ecf4 1692 ftrace_start_up++;
982c350b 1693 command |= FTRACE_ENABLE_CALLS;
d61f82d0 1694
b848914c
SR
1695 /* ops marked global share the filter hashes */
1696 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1697 ops = &global_ops;
1698 /* Don't update hash if global is already set */
1699 if (global_start_up)
1700 hash_enable = false;
1701 global_start_up++;
1702 }
1703
ed926f9b 1704 ops->flags |= FTRACE_OPS_FL_ENABLED;
b848914c 1705 if (hash_enable)
ed926f9b
SR
1706 ftrace_hash_rec_enable(ops, 1);
1707
df4fc315 1708 ftrace_startup_enable(command);
a1cd6173
SR
1709
1710 return 0;
3d083395
SR
1711}
1712
bd69c30b 1713static void ftrace_shutdown(struct ftrace_ops *ops, int command)
3d083395 1714{
b848914c
SR
1715 bool hash_disable = true;
1716
4eebcc81
SR
1717 if (unlikely(ftrace_disabled))
1718 return;
1719
60a7ecf4 1720 ftrace_start_up--;
9ea1a153
FW
1721 /*
1722 * Just warn in case of unbalance, no need to kill ftrace, it's not
1723 * critical but the ftrace_call callers may be never nopped again after
1724 * further ftrace uses.
1725 */
1726 WARN_ON_ONCE(ftrace_start_up < 0);
1727
b848914c
SR
1728 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1729 ops = &global_ops;
1730 global_start_up--;
1731 WARN_ON_ONCE(global_start_up < 0);
1732 /* Don't update hash if global still has users */
1733 if (global_start_up) {
1734 WARN_ON_ONCE(!ftrace_start_up);
1735 hash_disable = false;
1736 }
1737 }
1738
1739 if (hash_disable)
ed926f9b
SR
1740 ftrace_hash_rec_disable(ops, 1);
1741
b848914c 1742 if (ops != &global_ops || !global_start_up)
ed926f9b 1743 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
b848914c
SR
1744
1745 if (!ftrace_start_up)
1746 command |= FTRACE_DISABLE_CALLS;
3d083395 1747
d61f82d0
SR
1748 if (saved_ftrace_func != ftrace_trace_function) {
1749 saved_ftrace_func = ftrace_trace_function;
1750 command |= FTRACE_UPDATE_TRACE_FUNC;
1751 }
3d083395 1752
d61f82d0 1753 if (!command || !ftrace_enabled)
e6ea44e9 1754 return;
d61f82d0
SR
1755
1756 ftrace_run_update_code(command);
3d083395
SR
1757}
1758
e309b41d 1759static void ftrace_startup_sysctl(void)
b0fc494f 1760{
4eebcc81
SR
1761 if (unlikely(ftrace_disabled))
1762 return;
1763
d61f82d0
SR
1764 /* Force update next time */
1765 saved_ftrace_func = NULL;
60a7ecf4
SR
1766 /* ftrace_start_up is true if we want ftrace running */
1767 if (ftrace_start_up)
79e406d7 1768 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
b0fc494f
SR
1769}
1770
e309b41d 1771static void ftrace_shutdown_sysctl(void)
b0fc494f 1772{
4eebcc81
SR
1773 if (unlikely(ftrace_disabled))
1774 return;
1775
60a7ecf4
SR
1776 /* ftrace_start_up is true if ftrace is running */
1777 if (ftrace_start_up)
79e406d7 1778 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
b0fc494f
SR
1779}
1780
3d083395
SR
1781static cycle_t ftrace_update_time;
1782static unsigned long ftrace_update_cnt;
1783unsigned long ftrace_update_tot_cnt;
1784
f7bc8b61
SR
1785static int ops_traces_mod(struct ftrace_ops *ops)
1786{
1787 struct ftrace_hash *hash;
1788
1789 hash = ops->filter_hash;
1790 return !!(!hash || !hash->count);
1791}
1792
31e88909 1793static int ftrace_update_code(struct module *mod)
3d083395 1794{
e94142a6 1795 struct dyn_ftrace *p;
f22f9a89 1796 cycle_t start, stop;
f7bc8b61
SR
1797 unsigned long ref = 0;
1798
1799 /*
1800 * When adding a module, we need to check if tracers are
1801 * currently enabled and if they are set to trace all functions.
1802 * If they are, we need to enable the module functions as well
1803 * as update the reference counts for those function records.
1804 */
1805 if (mod) {
1806 struct ftrace_ops *ops;
1807
1808 for (ops = ftrace_ops_list;
1809 ops != &ftrace_list_end; ops = ops->next) {
1810 if (ops->flags & FTRACE_OPS_FL_ENABLED &&
1811 ops_traces_mod(ops))
1812 ref++;
1813 }
1814 }
3d083395 1815
750ed1a4 1816 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
1817 ftrace_update_cnt = 0;
1818
e94142a6 1819 while (ftrace_new_addrs) {
3d083395 1820
08f5ac90
SR
1821 /* If something went wrong, bail without enabling anything */
1822 if (unlikely(ftrace_disabled))
1823 return -1;
f22f9a89 1824
e94142a6 1825 p = ftrace_new_addrs;
ee000b7f 1826 ftrace_new_addrs = p->newlist;
f7bc8b61 1827 p->flags = ref;
f22f9a89 1828
5cb084bb 1829 /*
25985edc 1830 * Do the initial record conversion from mcount jump
5cb084bb
JO
1831 * to the NOP instructions.
1832 */
1833 if (!ftrace_code_disable(mod, p)) {
08f5ac90 1834 ftrace_free_rec(p);
d2c8c3ea
SR
1835 /* Game over */
1836 break;
5cb084bb
JO
1837 }
1838
5cb084bb
JO
1839 ftrace_update_cnt++;
1840
1841 /*
1842 * If the tracing is enabled, go ahead and enable the record.
1843 *
1844 * The reason not to enable the record immediatelly is the
1845 * inherent check of ftrace_make_nop/ftrace_make_call for
1846 * correct previous instructions. Making first the NOP
1847 * conversion puts the module to the correct state, thus
1848 * passing the ftrace_make_call check.
1849 */
f7bc8b61 1850 if (ftrace_start_up && ref) {
5cb084bb
JO
1851 int failed = __ftrace_replace_code(p, 1);
1852 if (failed) {
1853 ftrace_bug(failed, p->ip);
1854 ftrace_free_rec(p);
1855 }
1856 }
3d083395
SR
1857 }
1858
750ed1a4 1859 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
1860 ftrace_update_time = stop - start;
1861 ftrace_update_tot_cnt += ftrace_update_cnt;
1862
16444a8a
ACM
1863 return 0;
1864}
1865
68bf21aa 1866static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
3c1720f0
SR
1867{
1868 struct ftrace_page *pg;
1869 int cnt;
1870 int i;
3c1720f0
SR
1871
1872 /* allocate a few pages */
1873 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1874 if (!ftrace_pages_start)
1875 return -1;
1876
1877 /*
1878 * Allocate a few more pages.
1879 *
1880 * TODO: have some parser search vmlinux before
1881 * final linking to find all calls to ftrace.
1882 * Then we can:
1883 * a) know how many pages to allocate.
1884 * and/or
1885 * b) set up the table then.
1886 *
1887 * The dynamic code is still necessary for
1888 * modules.
1889 */
1890
1891 pg = ftrace_pages = ftrace_pages_start;
1892
68bf21aa 1893 cnt = num_to_init / ENTRIES_PER_PAGE;
08f5ac90 1894 pr_info("ftrace: allocating %ld entries in %d pages\n",
5821e1b7 1895 num_to_init, cnt + 1);
3c1720f0
SR
1896
1897 for (i = 0; i < cnt; i++) {
1898 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1899
1900 /* If we fail, we'll try later anyway */
1901 if (!pg->next)
1902 break;
1903
1904 pg = pg->next;
1905 }
1906
1907 return 0;
1908}
1909
5072c59f
SR
1910enum {
1911 FTRACE_ITER_FILTER = (1 << 0),
689fd8b6 1912 FTRACE_ITER_NOTRACE = (1 << 1),
3499e461
SR
1913 FTRACE_ITER_PRINTALL = (1 << 2),
1914 FTRACE_ITER_HASH = (1 << 3),
647bcd03 1915 FTRACE_ITER_ENABLED = (1 << 4),
5072c59f
SR
1916};
1917
1918#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1919
1920struct ftrace_iterator {
98c4fd04 1921 loff_t pos;
4aeb6967
SR
1922 loff_t func_pos;
1923 struct ftrace_page *pg;
1924 struct dyn_ftrace *func;
1925 struct ftrace_func_probe *probe;
1926 struct trace_parser parser;
1cf41dd7 1927 struct ftrace_hash *hash;
33dc9b12 1928 struct ftrace_ops *ops;
4aeb6967
SR
1929 int hidx;
1930 int idx;
1931 unsigned flags;
5072c59f
SR
1932};
1933
8fc0c701 1934static void *
4aeb6967 1935t_hash_next(struct seq_file *m, loff_t *pos)
8fc0c701
SR
1936{
1937 struct ftrace_iterator *iter = m->private;
4aeb6967 1938 struct hlist_node *hnd = NULL;
8fc0c701
SR
1939 struct hlist_head *hhd;
1940
8fc0c701 1941 (*pos)++;
98c4fd04 1942 iter->pos = *pos;
8fc0c701 1943
4aeb6967
SR
1944 if (iter->probe)
1945 hnd = &iter->probe->node;
8fc0c701
SR
1946 retry:
1947 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1948 return NULL;
1949
1950 hhd = &ftrace_func_hash[iter->hidx];
1951
1952 if (hlist_empty(hhd)) {
1953 iter->hidx++;
1954 hnd = NULL;
1955 goto retry;
1956 }
1957
1958 if (!hnd)
1959 hnd = hhd->first;
1960 else {
1961 hnd = hnd->next;
1962 if (!hnd) {
1963 iter->hidx++;
1964 goto retry;
1965 }
1966 }
1967
4aeb6967
SR
1968 if (WARN_ON_ONCE(!hnd))
1969 return NULL;
1970
1971 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
1972
1973 return iter;
8fc0c701
SR
1974}
1975
1976static void *t_hash_start(struct seq_file *m, loff_t *pos)
1977{
1978 struct ftrace_iterator *iter = m->private;
1979 void *p = NULL;
d82d6244
LZ
1980 loff_t l;
1981
2bccfffd
SR
1982 if (iter->func_pos > *pos)
1983 return NULL;
8fc0c701 1984
d82d6244 1985 iter->hidx = 0;
2bccfffd 1986 for (l = 0; l <= (*pos - iter->func_pos); ) {
4aeb6967 1987 p = t_hash_next(m, &l);
d82d6244
LZ
1988 if (!p)
1989 break;
1990 }
4aeb6967
SR
1991 if (!p)
1992 return NULL;
1993
98c4fd04
SR
1994 /* Only set this if we have an item */
1995 iter->flags |= FTRACE_ITER_HASH;
1996
4aeb6967 1997 return iter;
8fc0c701
SR
1998}
1999
4aeb6967
SR
2000static int
2001t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
8fc0c701 2002{
b6887d79 2003 struct ftrace_func_probe *rec;
8fc0c701 2004
4aeb6967
SR
2005 rec = iter->probe;
2006 if (WARN_ON_ONCE(!rec))
2007 return -EIO;
8fc0c701 2008
809dcf29
SR
2009 if (rec->ops->print)
2010 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2011
b375a11a 2012 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
8fc0c701
SR
2013
2014 if (rec->data)
2015 seq_printf(m, ":%p", rec->data);
2016 seq_putc(m, '\n');
2017
2018 return 0;
2019}
2020
e309b41d 2021static void *
5072c59f
SR
2022t_next(struct seq_file *m, void *v, loff_t *pos)
2023{
2024 struct ftrace_iterator *iter = m->private;
f45948e8 2025 struct ftrace_ops *ops = &global_ops;
5072c59f
SR
2026 struct dyn_ftrace *rec = NULL;
2027
45a4a237
SR
2028 if (unlikely(ftrace_disabled))
2029 return NULL;
2030
8fc0c701 2031 if (iter->flags & FTRACE_ITER_HASH)
4aeb6967 2032 return t_hash_next(m, pos);
8fc0c701 2033
5072c59f 2034 (*pos)++;
1106b699 2035 iter->pos = iter->func_pos = *pos;
5072c59f 2036
0c75a3ed 2037 if (iter->flags & FTRACE_ITER_PRINTALL)
57c072c7 2038 return t_hash_start(m, pos);
0c75a3ed 2039
5072c59f
SR
2040 retry:
2041 if (iter->idx >= iter->pg->index) {
2042 if (iter->pg->next) {
2043 iter->pg = iter->pg->next;
2044 iter->idx = 0;
2045 goto retry;
2046 }
2047 } else {
2048 rec = &iter->pg->records[iter->idx++];
a9fdda33
SR
2049 if ((rec->flags & FTRACE_FL_FREE) ||
2050
0183fb1c 2051 ((iter->flags & FTRACE_ITER_FILTER) &&
f45948e8 2052 !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
0183fb1c 2053
41c52c0d 2054 ((iter->flags & FTRACE_ITER_NOTRACE) &&
647bcd03
SR
2055 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2056
2057 ((iter->flags & FTRACE_ITER_ENABLED) &&
2058 !(rec->flags & ~FTRACE_FL_MASK))) {
2059
5072c59f
SR
2060 rec = NULL;
2061 goto retry;
2062 }
2063 }
2064
4aeb6967 2065 if (!rec)
57c072c7 2066 return t_hash_start(m, pos);
4aeb6967
SR
2067
2068 iter->func = rec;
2069
2070 return iter;
5072c59f
SR
2071}
2072
98c4fd04
SR
2073static void reset_iter_read(struct ftrace_iterator *iter)
2074{
2075 iter->pos = 0;
2076 iter->func_pos = 0;
2077 iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
5072c59f
SR
2078}
2079
2080static void *t_start(struct seq_file *m, loff_t *pos)
2081{
2082 struct ftrace_iterator *iter = m->private;
f45948e8 2083 struct ftrace_ops *ops = &global_ops;
5072c59f 2084 void *p = NULL;
694ce0a5 2085 loff_t l;
5072c59f 2086
8fc0c701 2087 mutex_lock(&ftrace_lock);
45a4a237
SR
2088
2089 if (unlikely(ftrace_disabled))
2090 return NULL;
2091
98c4fd04
SR
2092 /*
2093 * If an lseek was done, then reset and start from beginning.
2094 */
2095 if (*pos < iter->pos)
2096 reset_iter_read(iter);
2097
0c75a3ed
SR
2098 /*
2099 * For set_ftrace_filter reading, if we have the filter
2100 * off, we can short cut and just print out that all
2101 * functions are enabled.
2102 */
f45948e8 2103 if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
0c75a3ed 2104 if (*pos > 0)
8fc0c701 2105 return t_hash_start(m, pos);
0c75a3ed 2106 iter->flags |= FTRACE_ITER_PRINTALL;
df091625
CW
2107 /* reset in case of seek/pread */
2108 iter->flags &= ~FTRACE_ITER_HASH;
0c75a3ed
SR
2109 return iter;
2110 }
2111
8fc0c701
SR
2112 if (iter->flags & FTRACE_ITER_HASH)
2113 return t_hash_start(m, pos);
2114
98c4fd04
SR
2115 /*
2116 * Unfortunately, we need to restart at ftrace_pages_start
2117 * every time we let go of the ftrace_mutex. This is because
2118 * those pointers can change without the lock.
2119 */
694ce0a5
LZ
2120 iter->pg = ftrace_pages_start;
2121 iter->idx = 0;
2122 for (l = 0; l <= *pos; ) {
2123 p = t_next(m, p, &l);
2124 if (!p)
2125 break;
50cdaf08 2126 }
5821e1b7 2127
4aeb6967
SR
2128 if (!p) {
2129 if (iter->flags & FTRACE_ITER_FILTER)
2130 return t_hash_start(m, pos);
8fc0c701 2131
4aeb6967
SR
2132 return NULL;
2133 }
2134
2135 return iter;
5072c59f
SR
2136}
2137
2138static void t_stop(struct seq_file *m, void *p)
2139{
8fc0c701 2140 mutex_unlock(&ftrace_lock);
5072c59f
SR
2141}
2142
2143static int t_show(struct seq_file *m, void *v)
2144{
0c75a3ed 2145 struct ftrace_iterator *iter = m->private;
4aeb6967 2146 struct dyn_ftrace *rec;
5072c59f 2147
8fc0c701 2148 if (iter->flags & FTRACE_ITER_HASH)
4aeb6967 2149 return t_hash_show(m, iter);
8fc0c701 2150
0c75a3ed
SR
2151 if (iter->flags & FTRACE_ITER_PRINTALL) {
2152 seq_printf(m, "#### all functions enabled ####\n");
2153 return 0;
2154 }
2155
4aeb6967
SR
2156 rec = iter->func;
2157
5072c59f
SR
2158 if (!rec)
2159 return 0;
2160
647bcd03
SR
2161 seq_printf(m, "%ps", (void *)rec->ip);
2162 if (iter->flags & FTRACE_ITER_ENABLED)
2163 seq_printf(m, " (%ld)",
2164 rec->flags & ~FTRACE_FL_MASK);
2165 seq_printf(m, "\n");
5072c59f
SR
2166
2167 return 0;
2168}
2169
88e9d34c 2170static const struct seq_operations show_ftrace_seq_ops = {
5072c59f
SR
2171 .start = t_start,
2172 .next = t_next,
2173 .stop = t_stop,
2174 .show = t_show,
2175};
2176
e309b41d 2177static int
5072c59f
SR
2178ftrace_avail_open(struct inode *inode, struct file *file)
2179{
2180 struct ftrace_iterator *iter;
2181 int ret;
2182
4eebcc81
SR
2183 if (unlikely(ftrace_disabled))
2184 return -ENODEV;
2185
5072c59f
SR
2186 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2187 if (!iter)
2188 return -ENOMEM;
2189
2190 iter->pg = ftrace_pages_start;
5072c59f
SR
2191
2192 ret = seq_open(file, &show_ftrace_seq_ops);
2193 if (!ret) {
2194 struct seq_file *m = file->private_data;
4bf39a94 2195
5072c59f 2196 m->private = iter;
4bf39a94 2197 } else {
5072c59f 2198 kfree(iter);
4bf39a94 2199 }
5072c59f
SR
2200
2201 return ret;
2202}
2203
647bcd03
SR
2204static int
2205ftrace_enabled_open(struct inode *inode, struct file *file)
2206{
2207 struct ftrace_iterator *iter;
2208 int ret;
2209
2210 if (unlikely(ftrace_disabled))
2211 return -ENODEV;
2212
2213 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2214 if (!iter)
2215 return -ENOMEM;
2216
2217 iter->pg = ftrace_pages_start;
2218 iter->flags = FTRACE_ITER_ENABLED;
2219
2220 ret = seq_open(file, &show_ftrace_seq_ops);
2221 if (!ret) {
2222 struct seq_file *m = file->private_data;
2223
2224 m->private = iter;
2225 } else {
2226 kfree(iter);
2227 }
2228
2229 return ret;
2230}
2231
1cf41dd7 2232static void ftrace_filter_reset(struct ftrace_hash *hash)
5072c59f 2233{
52baf119 2234 mutex_lock(&ftrace_lock);
1cf41dd7 2235 ftrace_hash_clear(hash);
52baf119 2236 mutex_unlock(&ftrace_lock);
5072c59f
SR
2237}
2238
e309b41d 2239static int
f45948e8 2240ftrace_regex_open(struct ftrace_ops *ops, int flag,
1cf41dd7 2241 struct inode *inode, struct file *file)
5072c59f
SR
2242{
2243 struct ftrace_iterator *iter;
f45948e8 2244 struct ftrace_hash *hash;
5072c59f
SR
2245 int ret = 0;
2246
4eebcc81
SR
2247 if (unlikely(ftrace_disabled))
2248 return -ENODEV;
2249
5072c59f
SR
2250 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2251 if (!iter)
2252 return -ENOMEM;
2253
689fd8b6 2254 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2255 kfree(iter);
2256 return -ENOMEM;
2257 }
2258
f45948e8
SR
2259 if (flag & FTRACE_ITER_NOTRACE)
2260 hash = ops->notrace_hash;
2261 else
2262 hash = ops->filter_hash;
2263
33dc9b12
SR
2264 iter->ops = ops;
2265 iter->flags = flag;
2266
2267 if (file->f_mode & FMODE_WRITE) {
2268 mutex_lock(&ftrace_lock);
2269 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2270 mutex_unlock(&ftrace_lock);
2271
2272 if (!iter->hash) {
2273 trace_parser_put(&iter->parser);
2274 kfree(iter);
2275 return -ENOMEM;
2276 }
2277 }
1cf41dd7 2278
41c52c0d 2279 mutex_lock(&ftrace_regex_lock);
33dc9b12 2280
5072c59f 2281 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 2282 (file->f_flags & O_TRUNC))
33dc9b12 2283 ftrace_filter_reset(iter->hash);
5072c59f
SR
2284
2285 if (file->f_mode & FMODE_READ) {
2286 iter->pg = ftrace_pages_start;
5072c59f
SR
2287
2288 ret = seq_open(file, &show_ftrace_seq_ops);
2289 if (!ret) {
2290 struct seq_file *m = file->private_data;
2291 m->private = iter;
79fe249c 2292 } else {
33dc9b12
SR
2293 /* Failed */
2294 free_ftrace_hash(iter->hash);
79fe249c 2295 trace_parser_put(&iter->parser);
5072c59f 2296 kfree(iter);
79fe249c 2297 }
5072c59f
SR
2298 } else
2299 file->private_data = iter;
41c52c0d 2300 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
2301
2302 return ret;
2303}
2304
41c52c0d
SR
2305static int
2306ftrace_filter_open(struct inode *inode, struct file *file)
2307{
f45948e8 2308 return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER,
1cf41dd7 2309 inode, file);
41c52c0d
SR
2310}
2311
2312static int
2313ftrace_notrace_open(struct inode *inode, struct file *file)
2314{
f45948e8 2315 return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
1cf41dd7 2316 inode, file);
41c52c0d
SR
2317}
2318
e309b41d 2319static loff_t
41c52c0d 2320ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
2321{
2322 loff_t ret;
2323
2324 if (file->f_mode & FMODE_READ)
2325 ret = seq_lseek(file, offset, origin);
2326 else
2327 file->f_pos = ret = 1;
2328
2329 return ret;
2330}
2331
64e7c440 2332static int ftrace_match(char *str, char *regex, int len, int type)
9f4801e3 2333{
9f4801e3 2334 int matched = 0;
751e9983 2335 int slen;
9f4801e3 2336
9f4801e3
SR
2337 switch (type) {
2338 case MATCH_FULL:
2339 if (strcmp(str, regex) == 0)
2340 matched = 1;
2341 break;
2342 case MATCH_FRONT_ONLY:
2343 if (strncmp(str, regex, len) == 0)
2344 matched = 1;
2345 break;
2346 case MATCH_MIDDLE_ONLY:
2347 if (strstr(str, regex))
2348 matched = 1;
2349 break;
2350 case MATCH_END_ONLY:
751e9983
LZ
2351 slen = strlen(str);
2352 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
9f4801e3
SR
2353 matched = 1;
2354 break;
2355 }
2356
2357 return matched;
2358}
2359
b448c4e3 2360static int
1cf41dd7 2361enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
996e87be 2362{
b448c4e3 2363 struct ftrace_func_entry *entry;
b448c4e3
SR
2364 int ret = 0;
2365
1cf41dd7
SR
2366 entry = ftrace_lookup_ip(hash, rec->ip);
2367 if (not) {
2368 /* Do nothing if it doesn't exist */
2369 if (!entry)
2370 return 0;
b448c4e3 2371
33dc9b12 2372 free_hash_entry(hash, entry);
1cf41dd7
SR
2373 } else {
2374 /* Do nothing if it exists */
2375 if (entry)
2376 return 0;
b448c4e3 2377
1cf41dd7 2378 ret = add_hash_entry(hash, rec->ip);
b448c4e3
SR
2379 }
2380 return ret;
996e87be
SR
2381}
2382
64e7c440 2383static int
b9df92d2
SR
2384ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2385 char *regex, int len, int type)
64e7c440
SR
2386{
2387 char str[KSYM_SYMBOL_LEN];
b9df92d2
SR
2388 char *modname;
2389
2390 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2391
2392 if (mod) {
2393 /* module lookup requires matching the module */
2394 if (!modname || strcmp(modname, mod))
2395 return 0;
2396
2397 /* blank search means to match all funcs in the mod */
2398 if (!len)
2399 return 1;
2400 }
64e7c440 2401
64e7c440
SR
2402 return ftrace_match(str, regex, len, type);
2403}
2404
1cf41dd7
SR
2405static int
2406match_records(struct ftrace_hash *hash, char *buff,
2407 int len, char *mod, int not)
9f4801e3 2408{
b9df92d2 2409 unsigned search_len = 0;
9f4801e3
SR
2410 struct ftrace_page *pg;
2411 struct dyn_ftrace *rec;
b9df92d2
SR
2412 int type = MATCH_FULL;
2413 char *search = buff;
311d16da 2414 int found = 0;
b448c4e3 2415 int ret;
9f4801e3 2416
b9df92d2
SR
2417 if (len) {
2418 type = filter_parse_regex(buff, len, &search, &not);
2419 search_len = strlen(search);
2420 }
9f4801e3 2421
52baf119 2422 mutex_lock(&ftrace_lock);
265c831c 2423
b9df92d2
SR
2424 if (unlikely(ftrace_disabled))
2425 goto out_unlock;
9f4801e3 2426
265c831c 2427 do_for_each_ftrace_rec(pg, rec) {
265c831c 2428
b9df92d2 2429 if (ftrace_match_record(rec, mod, search, search_len, type)) {
1cf41dd7 2430 ret = enter_record(hash, rec, not);
b448c4e3
SR
2431 if (ret < 0) {
2432 found = ret;
2433 goto out_unlock;
2434 }
311d16da 2435 found = 1;
265c831c
SR
2436 }
2437 } while_for_each_ftrace_rec();
b9df92d2 2438 out_unlock:
52baf119 2439 mutex_unlock(&ftrace_lock);
311d16da
LZ
2440
2441 return found;
5072c59f
SR
2442}
2443
64e7c440 2444static int
1cf41dd7 2445ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
64e7c440 2446{
1cf41dd7 2447 return match_records(hash, buff, len, NULL, 0);
64e7c440
SR
2448}
2449
1cf41dd7
SR
2450static int
2451ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
64e7c440 2452{
64e7c440 2453 int not = 0;
6a24a244 2454
64e7c440
SR
2455 /* blank or '*' mean the same */
2456 if (strcmp(buff, "*") == 0)
2457 buff[0] = 0;
2458
2459 /* handle the case of 'dont filter this module' */
2460 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2461 buff[0] = 0;
2462 not = 1;
2463 }
2464
1cf41dd7 2465 return match_records(hash, buff, strlen(buff), mod, not);
64e7c440
SR
2466}
2467
f6180773
SR
2468/*
2469 * We register the module command as a template to show others how
2470 * to register the a command as well.
2471 */
2472
2473static int
43dd61c9
SR
2474ftrace_mod_callback(struct ftrace_hash *hash,
2475 char *func, char *cmd, char *param, int enable)
f6180773
SR
2476{
2477 char *mod;
b448c4e3 2478 int ret = -EINVAL;
f6180773
SR
2479
2480 /*
2481 * cmd == 'mod' because we only registered this func
2482 * for the 'mod' ftrace_func_command.
2483 * But if you register one func with multiple commands,
2484 * you can tell which command was used by the cmd
2485 * parameter.
2486 */
2487
2488 /* we must have a module name */
2489 if (!param)
b448c4e3 2490 return ret;
f6180773
SR
2491
2492 mod = strsep(&param, ":");
2493 if (!strlen(mod))
b448c4e3 2494 return ret;
f6180773 2495
1cf41dd7 2496 ret = ftrace_match_module_records(hash, func, mod);
b448c4e3
SR
2497 if (!ret)
2498 ret = -EINVAL;
2499 if (ret < 0)
2500 return ret;
2501
2502 return 0;
f6180773
SR
2503}
2504
2505static struct ftrace_func_command ftrace_mod_cmd = {
2506 .name = "mod",
2507 .func = ftrace_mod_callback,
2508};
2509
2510static int __init ftrace_mod_cmd_init(void)
2511{
2512 return register_ftrace_command(&ftrace_mod_cmd);
2513}
2514device_initcall(ftrace_mod_cmd_init);
2515
59df055f 2516static void
b6887d79 2517function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
59df055f 2518{
b6887d79 2519 struct ftrace_func_probe *entry;
59df055f
SR
2520 struct hlist_head *hhd;
2521 struct hlist_node *n;
2522 unsigned long key;
59df055f
SR
2523
2524 key = hash_long(ip, FTRACE_HASH_BITS);
2525
2526 hhd = &ftrace_func_hash[key];
2527
2528 if (hlist_empty(hhd))
2529 return;
2530
2531 /*
2532 * Disable preemption for these calls to prevent a RCU grace
2533 * period. This syncs the hash iteration and freeing of items
2534 * on the hash. rcu_read_lock is too dangerous here.
2535 */
5168ae50 2536 preempt_disable_notrace();
59df055f
SR
2537 hlist_for_each_entry_rcu(entry, n, hhd, node) {
2538 if (entry->ip == ip)
2539 entry->ops->func(ip, parent_ip, &entry->data);
2540 }
5168ae50 2541 preempt_enable_notrace();
59df055f
SR
2542}
2543
b6887d79 2544static struct ftrace_ops trace_probe_ops __read_mostly =
59df055f 2545{
fb9fb015 2546 .func = function_trace_probe_call,
59df055f
SR
2547};
2548
b6887d79 2549static int ftrace_probe_registered;
59df055f 2550
b6887d79 2551static void __enable_ftrace_function_probe(void)
59df055f 2552{
b848914c 2553 int ret;
59df055f
SR
2554 int i;
2555
b6887d79 2556 if (ftrace_probe_registered)
59df055f
SR
2557 return;
2558
2559 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2560 struct hlist_head *hhd = &ftrace_func_hash[i];
2561 if (hhd->first)
2562 break;
2563 }
2564 /* Nothing registered? */
2565 if (i == FTRACE_FUNC_HASHSIZE)
2566 return;
2567
b848914c
SR
2568 ret = __register_ftrace_function(&trace_probe_ops);
2569 if (!ret)
a1cd6173 2570 ret = ftrace_startup(&trace_probe_ops, 0);
b848914c 2571
b6887d79 2572 ftrace_probe_registered = 1;
59df055f
SR
2573}
2574
b6887d79 2575static void __disable_ftrace_function_probe(void)
59df055f 2576{
b848914c 2577 int ret;
59df055f
SR
2578 int i;
2579
b6887d79 2580 if (!ftrace_probe_registered)
59df055f
SR
2581 return;
2582
2583 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2584 struct hlist_head *hhd = &ftrace_func_hash[i];
2585 if (hhd->first)
2586 return;
2587 }
2588
2589 /* no more funcs left */
b848914c
SR
2590 ret = __unregister_ftrace_function(&trace_probe_ops);
2591 if (!ret)
2592 ftrace_shutdown(&trace_probe_ops, 0);
2593
b6887d79 2594 ftrace_probe_registered = 0;
59df055f
SR
2595}
2596
2597
2598static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2599{
b6887d79
SR
2600 struct ftrace_func_probe *entry =
2601 container_of(rhp, struct ftrace_func_probe, rcu);
59df055f
SR
2602
2603 if (entry->ops->free)
2604 entry->ops->free(&entry->data);
2605 kfree(entry);
2606}
2607
2608
2609int
b6887d79 2610register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
2611 void *data)
2612{
b6887d79 2613 struct ftrace_func_probe *entry;
59df055f
SR
2614 struct ftrace_page *pg;
2615 struct dyn_ftrace *rec;
59df055f 2616 int type, len, not;
6a24a244 2617 unsigned long key;
59df055f
SR
2618 int count = 0;
2619 char *search;
2620
3f6fe06d 2621 type = filter_parse_regex(glob, strlen(glob), &search, &not);
59df055f
SR
2622 len = strlen(search);
2623
b6887d79 2624 /* we do not support '!' for function probes */
59df055f
SR
2625 if (WARN_ON(not))
2626 return -EINVAL;
2627
2628 mutex_lock(&ftrace_lock);
59df055f 2629
45a4a237
SR
2630 if (unlikely(ftrace_disabled))
2631 goto out_unlock;
59df055f 2632
45a4a237 2633 do_for_each_ftrace_rec(pg, rec) {
59df055f 2634
b9df92d2 2635 if (!ftrace_match_record(rec, NULL, search, len, type))
59df055f
SR
2636 continue;
2637
2638 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2639 if (!entry) {
b6887d79 2640 /* If we did not process any, then return error */
59df055f
SR
2641 if (!count)
2642 count = -ENOMEM;
2643 goto out_unlock;
2644 }
2645
2646 count++;
2647
2648 entry->data = data;
2649
2650 /*
2651 * The caller might want to do something special
2652 * for each function we find. We call the callback
2653 * to give the caller an opportunity to do so.
2654 */
2655 if (ops->callback) {
2656 if (ops->callback(rec->ip, &entry->data) < 0) {
2657 /* caller does not like this func */
2658 kfree(entry);
2659 continue;
2660 }
2661 }
2662
2663 entry->ops = ops;
2664 entry->ip = rec->ip;
2665
2666 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2667 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2668
2669 } while_for_each_ftrace_rec();
b6887d79 2670 __enable_ftrace_function_probe();
59df055f
SR
2671
2672 out_unlock:
2673 mutex_unlock(&ftrace_lock);
2674
2675 return count;
2676}
2677
2678enum {
b6887d79
SR
2679 PROBE_TEST_FUNC = 1,
2680 PROBE_TEST_DATA = 2
59df055f
SR
2681};
2682
2683static void
b6887d79 2684__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
2685 void *data, int flags)
2686{
b6887d79 2687 struct ftrace_func_probe *entry;
59df055f
SR
2688 struct hlist_node *n, *tmp;
2689 char str[KSYM_SYMBOL_LEN];
2690 int type = MATCH_FULL;
2691 int i, len = 0;
2692 char *search;
2693
b36461da 2694 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
59df055f 2695 glob = NULL;
b36461da 2696 else if (glob) {
59df055f
SR
2697 int not;
2698
3f6fe06d 2699 type = filter_parse_regex(glob, strlen(glob), &search, &not);
59df055f
SR
2700 len = strlen(search);
2701
b6887d79 2702 /* we do not support '!' for function probes */
59df055f
SR
2703 if (WARN_ON(not))
2704 return;
2705 }
2706
2707 mutex_lock(&ftrace_lock);
2708 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2709 struct hlist_head *hhd = &ftrace_func_hash[i];
2710
2711 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2712
2713 /* break up if statements for readability */
b6887d79 2714 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
59df055f
SR
2715 continue;
2716
b6887d79 2717 if ((flags & PROBE_TEST_DATA) && entry->data != data)
59df055f
SR
2718 continue;
2719
2720 /* do this last, since it is the most expensive */
2721 if (glob) {
2722 kallsyms_lookup(entry->ip, NULL, NULL,
2723 NULL, str);
2724 if (!ftrace_match(str, glob, len, type))
2725 continue;
2726 }
2727
2728 hlist_del(&entry->node);
2729 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2730 }
2731 }
b6887d79 2732 __disable_ftrace_function_probe();
59df055f
SR
2733 mutex_unlock(&ftrace_lock);
2734}
2735
2736void
b6887d79 2737unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
2738 void *data)
2739{
b6887d79
SR
2740 __unregister_ftrace_function_probe(glob, ops, data,
2741 PROBE_TEST_FUNC | PROBE_TEST_DATA);
59df055f
SR
2742}
2743
2744void
b6887d79 2745unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
59df055f 2746{
b6887d79 2747 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
59df055f
SR
2748}
2749
b6887d79 2750void unregister_ftrace_function_probe_all(char *glob)
59df055f 2751{
b6887d79 2752 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
59df055f
SR
2753}
2754
f6180773
SR
2755static LIST_HEAD(ftrace_commands);
2756static DEFINE_MUTEX(ftrace_cmd_mutex);
2757
2758int register_ftrace_command(struct ftrace_func_command *cmd)
2759{
2760 struct ftrace_func_command *p;
2761 int ret = 0;
2762
2763 mutex_lock(&ftrace_cmd_mutex);
2764 list_for_each_entry(p, &ftrace_commands, list) {
2765 if (strcmp(cmd->name, p->name) == 0) {
2766 ret = -EBUSY;
2767 goto out_unlock;
2768 }
2769 }
2770 list_add(&cmd->list, &ftrace_commands);
2771 out_unlock:
2772 mutex_unlock(&ftrace_cmd_mutex);
2773
2774 return ret;
2775}
2776
2777int unregister_ftrace_command(struct ftrace_func_command *cmd)
2778{
2779 struct ftrace_func_command *p, *n;
2780 int ret = -ENODEV;
2781
2782 mutex_lock(&ftrace_cmd_mutex);
2783 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2784 if (strcmp(cmd->name, p->name) == 0) {
2785 ret = 0;
2786 list_del_init(&p->list);
2787 goto out_unlock;
2788 }
2789 }
2790 out_unlock:
2791 mutex_unlock(&ftrace_cmd_mutex);
2792
2793 return ret;
2794}
2795
33dc9b12
SR
2796static int ftrace_process_regex(struct ftrace_hash *hash,
2797 char *buff, int len, int enable)
64e7c440 2798{
f6180773 2799 char *func, *command, *next = buff;
6a24a244 2800 struct ftrace_func_command *p;
0aff1c0c 2801 int ret = -EINVAL;
64e7c440
SR
2802
2803 func = strsep(&next, ":");
2804
2805 if (!next) {
1cf41dd7 2806 ret = ftrace_match_records(hash, func, len);
b448c4e3
SR
2807 if (!ret)
2808 ret = -EINVAL;
2809 if (ret < 0)
2810 return ret;
2811 return 0;
64e7c440
SR
2812 }
2813
f6180773 2814 /* command found */
64e7c440
SR
2815
2816 command = strsep(&next, ":");
2817
f6180773
SR
2818 mutex_lock(&ftrace_cmd_mutex);
2819 list_for_each_entry(p, &ftrace_commands, list) {
2820 if (strcmp(p->name, command) == 0) {
43dd61c9 2821 ret = p->func(hash, func, command, next, enable);
f6180773
SR
2822 goto out_unlock;
2823 }
64e7c440 2824 }
f6180773
SR
2825 out_unlock:
2826 mutex_unlock(&ftrace_cmd_mutex);
64e7c440 2827
f6180773 2828 return ret;
64e7c440
SR
2829}
2830
e309b41d 2831static ssize_t
41c52c0d
SR
2832ftrace_regex_write(struct file *file, const char __user *ubuf,
2833 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
2834{
2835 struct ftrace_iterator *iter;
689fd8b6 2836 struct trace_parser *parser;
2837 ssize_t ret, read;
5072c59f 2838
4ba7978e 2839 if (!cnt)
5072c59f
SR
2840 return 0;
2841
41c52c0d 2842 mutex_lock(&ftrace_regex_lock);
5072c59f 2843
45a4a237
SR
2844 ret = -ENODEV;
2845 if (unlikely(ftrace_disabled))
2846 goto out_unlock;
2847
5072c59f
SR
2848 if (file->f_mode & FMODE_READ) {
2849 struct seq_file *m = file->private_data;
2850 iter = m->private;
2851 } else
2852 iter = file->private_data;
2853
689fd8b6 2854 parser = &iter->parser;
2855 read = trace_get_user(parser, ubuf, cnt, ppos);
5072c59f 2856
4ba7978e 2857 if (read >= 0 && trace_parser_loaded(parser) &&
689fd8b6 2858 !trace_parser_cont(parser)) {
33dc9b12 2859 ret = ftrace_process_regex(iter->hash, parser->buffer,
689fd8b6 2860 parser->idx, enable);
313254a9 2861 trace_parser_clear(parser);
5072c59f 2862 if (ret)
ed146b25 2863 goto out_unlock;
eda1e328 2864 }
5072c59f 2865
5072c59f 2866 ret = read;
ed146b25 2867out_unlock:
689fd8b6 2868 mutex_unlock(&ftrace_regex_lock);
ed146b25 2869
5072c59f
SR
2870 return ret;
2871}
2872
41c52c0d
SR
2873static ssize_t
2874ftrace_filter_write(struct file *file, const char __user *ubuf,
2875 size_t cnt, loff_t *ppos)
2876{
2877 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2878}
2879
2880static ssize_t
2881ftrace_notrace_write(struct file *file, const char __user *ubuf,
2882 size_t cnt, loff_t *ppos)
2883{
2884 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2885}
2886
33dc9b12 2887static int
f45948e8
SR
2888ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2889 int reset, int enable)
41c52c0d 2890{
33dc9b12 2891 struct ftrace_hash **orig_hash;
f45948e8 2892 struct ftrace_hash *hash;
33dc9b12 2893 int ret;
f45948e8 2894
936e074b
SR
2895 /* All global ops uses the global ops filters */
2896 if (ops->flags & FTRACE_OPS_FL_GLOBAL)
2897 ops = &global_ops;
2898
41c52c0d 2899 if (unlikely(ftrace_disabled))
33dc9b12 2900 return -ENODEV;
41c52c0d 2901
f45948e8 2902 if (enable)
33dc9b12 2903 orig_hash = &ops->filter_hash;
f45948e8 2904 else
33dc9b12
SR
2905 orig_hash = &ops->notrace_hash;
2906
2907 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2908 if (!hash)
2909 return -ENOMEM;
f45948e8 2910
41c52c0d
SR
2911 mutex_lock(&ftrace_regex_lock);
2912 if (reset)
1cf41dd7 2913 ftrace_filter_reset(hash);
41c52c0d 2914 if (buf)
1cf41dd7 2915 ftrace_match_records(hash, buf, len);
33dc9b12
SR
2916
2917 mutex_lock(&ftrace_lock);
41fb61c2 2918 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
072126f4
SR
2919 if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
2920 && ftrace_enabled)
2921 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2922
33dc9b12
SR
2923 mutex_unlock(&ftrace_lock);
2924
41c52c0d 2925 mutex_unlock(&ftrace_regex_lock);
33dc9b12
SR
2926
2927 free_ftrace_hash(hash);
2928 return ret;
41c52c0d
SR
2929}
2930
77a2b37d
SR
2931/**
2932 * ftrace_set_filter - set a function to filter on in ftrace
936e074b
SR
2933 * @ops - the ops to set the filter with
2934 * @buf - the string that holds the function filter text.
2935 * @len - the length of the string.
2936 * @reset - non zero to reset all filters before applying this filter.
2937 *
2938 * Filters denote which functions should be enabled when tracing is enabled.
2939 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2940 */
2941void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
2942 int len, int reset)
2943{
2944 ftrace_set_regex(ops, buf, len, reset, 1);
2945}
2946EXPORT_SYMBOL_GPL(ftrace_set_filter);
2947
2948/**
2949 * ftrace_set_notrace - set a function to not trace in ftrace
2950 * @ops - the ops to set the notrace filter with
2951 * @buf - the string that holds the function notrace text.
2952 * @len - the length of the string.
2953 * @reset - non zero to reset all filters before applying this filter.
2954 *
2955 * Notrace Filters denote which functions should not be enabled when tracing
2956 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2957 * for tracing.
2958 */
2959void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
2960 int len, int reset)
2961{
2962 ftrace_set_regex(ops, buf, len, reset, 0);
2963}
2964EXPORT_SYMBOL_GPL(ftrace_set_notrace);
2965/**
2966 * ftrace_set_filter - set a function to filter on in ftrace
2967 * @ops - the ops to set the filter with
77a2b37d
SR
2968 * @buf - the string that holds the function filter text.
2969 * @len - the length of the string.
2970 * @reset - non zero to reset all filters before applying this filter.
2971 *
2972 * Filters denote which functions should be enabled when tracing is enabled.
2973 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2974 */
936e074b 2975void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
77a2b37d 2976{
f45948e8 2977 ftrace_set_regex(&global_ops, buf, len, reset, 1);
41c52c0d 2978}
936e074b 2979EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
4eebcc81 2980
41c52c0d
SR
2981/**
2982 * ftrace_set_notrace - set a function to not trace in ftrace
936e074b 2983 * @ops - the ops to set the notrace filter with
41c52c0d
SR
2984 * @buf - the string that holds the function notrace text.
2985 * @len - the length of the string.
2986 * @reset - non zero to reset all filters before applying this filter.
2987 *
2988 * Notrace Filters denote which functions should not be enabled when tracing
2989 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2990 * for tracing.
2991 */
936e074b 2992void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
41c52c0d 2993{
f45948e8 2994 ftrace_set_regex(&global_ops, buf, len, reset, 0);
77a2b37d 2995}
936e074b 2996EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
77a2b37d 2997
2af15d6a
SR
2998/*
2999 * command line interface to allow users to set filters on boot up.
3000 */
3001#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
3002static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3003static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3004
3005static int __init set_ftrace_notrace(char *str)
3006{
3007 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3008 return 1;
3009}
3010__setup("ftrace_notrace=", set_ftrace_notrace);
3011
3012static int __init set_ftrace_filter(char *str)
3013{
3014 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3015 return 1;
3016}
3017__setup("ftrace_filter=", set_ftrace_filter);
3018
369bc18f 3019#ifdef CONFIG_FUNCTION_GRAPH_TRACER
f6060f46 3020static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
801c29fd
SR
3021static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3022
369bc18f
SA
3023static int __init set_graph_function(char *str)
3024{
06f43d66 3025 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
369bc18f
SA
3026 return 1;
3027}
3028__setup("ftrace_graph_filter=", set_graph_function);
3029
3030static void __init set_ftrace_early_graph(char *buf)
3031{
3032 int ret;
3033 char *func;
3034
3035 while (buf) {
3036 func = strsep(&buf, ",");
3037 /* we allow only one expression at a time */
3038 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3039 func);
3040 if (ret)
3041 printk(KERN_DEBUG "ftrace: function %s not "
3042 "traceable\n", func);
3043 }
3044}
3045#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3046
f45948e8
SR
3047static void __init
3048set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable)
2af15d6a
SR
3049{
3050 char *func;
3051
3052 while (buf) {
3053 func = strsep(&buf, ",");
f45948e8 3054 ftrace_set_regex(ops, func, strlen(func), 0, enable);
2af15d6a
SR
3055 }
3056}
3057
3058static void __init set_ftrace_early_filters(void)
3059{
3060 if (ftrace_filter_buf[0])
f45948e8 3061 set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1);
2af15d6a 3062 if (ftrace_notrace_buf[0])
f45948e8 3063 set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0);
369bc18f
SA
3064#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3065 if (ftrace_graph_buf[0])
3066 set_ftrace_early_graph(ftrace_graph_buf);
3067#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2af15d6a
SR
3068}
3069
e309b41d 3070static int
1cf41dd7 3071ftrace_regex_release(struct inode *inode, struct file *file)
5072c59f
SR
3072{
3073 struct seq_file *m = (struct seq_file *)file->private_data;
3074 struct ftrace_iterator *iter;
33dc9b12 3075 struct ftrace_hash **orig_hash;
689fd8b6 3076 struct trace_parser *parser;
ed926f9b 3077 int filter_hash;
33dc9b12 3078 int ret;
5072c59f 3079
41c52c0d 3080 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
3081 if (file->f_mode & FMODE_READ) {
3082 iter = m->private;
3083
3084 seq_release(inode, file);
3085 } else
3086 iter = file->private_data;
3087
689fd8b6 3088 parser = &iter->parser;
3089 if (trace_parser_loaded(parser)) {
3090 parser->buffer[parser->idx] = 0;
1cf41dd7 3091 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
5072c59f
SR
3092 }
3093
689fd8b6 3094 trace_parser_put(parser);
689fd8b6 3095
058e297d 3096 if (file->f_mode & FMODE_WRITE) {
ed926f9b
SR
3097 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3098
3099 if (filter_hash)
33dc9b12 3100 orig_hash = &iter->ops->filter_hash;
ed926f9b
SR
3101 else
3102 orig_hash = &iter->ops->notrace_hash;
33dc9b12 3103
058e297d 3104 mutex_lock(&ftrace_lock);
41fb61c2
SR
3105 ret = ftrace_hash_move(iter->ops, filter_hash,
3106 orig_hash, iter->hash);
3107 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3108 && ftrace_enabled)
3109 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
3110
058e297d
SR
3111 mutex_unlock(&ftrace_lock);
3112 }
33dc9b12
SR
3113 free_ftrace_hash(iter->hash);
3114 kfree(iter);
058e297d 3115
41c52c0d 3116 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
3117 return 0;
3118}
3119
5e2336a0 3120static const struct file_operations ftrace_avail_fops = {
5072c59f
SR
3121 .open = ftrace_avail_open,
3122 .read = seq_read,
3123 .llseek = seq_lseek,
3be04b47 3124 .release = seq_release_private,
5072c59f
SR
3125};
3126
647bcd03
SR
3127static const struct file_operations ftrace_enabled_fops = {
3128 .open = ftrace_enabled_open,
3129 .read = seq_read,
3130 .llseek = seq_lseek,
3131 .release = seq_release_private,
3132};
3133
5e2336a0 3134static const struct file_operations ftrace_filter_fops = {
5072c59f 3135 .open = ftrace_filter_open,
850a80cf 3136 .read = seq_read,
5072c59f 3137 .write = ftrace_filter_write,
98c4fd04 3138 .llseek = ftrace_regex_lseek,
1cf41dd7 3139 .release = ftrace_regex_release,
5072c59f
SR
3140};
3141
5e2336a0 3142static const struct file_operations ftrace_notrace_fops = {
41c52c0d 3143 .open = ftrace_notrace_open,
850a80cf 3144 .read = seq_read,
41c52c0d
SR
3145 .write = ftrace_notrace_write,
3146 .llseek = ftrace_regex_lseek,
1cf41dd7 3147 .release = ftrace_regex_release,
41c52c0d
SR
3148};
3149
ea4e2bc4
SR
3150#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3151
3152static DEFINE_MUTEX(graph_lock);
3153
3154int ftrace_graph_count;
c7c6b1fe 3155int ftrace_graph_filter_enabled;
ea4e2bc4
SR
3156unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3157
3158static void *
85951842 3159__g_next(struct seq_file *m, loff_t *pos)
ea4e2bc4 3160{
85951842 3161 if (*pos >= ftrace_graph_count)
ea4e2bc4 3162 return NULL;
a4ec5e0c 3163 return &ftrace_graph_funcs[*pos];
85951842 3164}
ea4e2bc4 3165
85951842
LZ
3166static void *
3167g_next(struct seq_file *m, void *v, loff_t *pos)
3168{
3169 (*pos)++;
3170 return __g_next(m, pos);
ea4e2bc4
SR
3171}
3172
3173static void *g_start(struct seq_file *m, loff_t *pos)
3174{
ea4e2bc4
SR
3175 mutex_lock(&graph_lock);
3176
f9349a8f 3177 /* Nothing, tell g_show to print all functions are enabled */
c7c6b1fe 3178 if (!ftrace_graph_filter_enabled && !*pos)
f9349a8f
FW
3179 return (void *)1;
3180
85951842 3181 return __g_next(m, pos);
ea4e2bc4
SR
3182}
3183
3184static void g_stop(struct seq_file *m, void *p)
3185{
3186 mutex_unlock(&graph_lock);
3187}
3188
3189static int g_show(struct seq_file *m, void *v)
3190{
3191 unsigned long *ptr = v;
ea4e2bc4
SR
3192
3193 if (!ptr)
3194 return 0;
3195
f9349a8f
FW
3196 if (ptr == (unsigned long *)1) {
3197 seq_printf(m, "#### all functions enabled ####\n");
3198 return 0;
3199 }
3200
b375a11a 3201 seq_printf(m, "%ps\n", (void *)*ptr);
ea4e2bc4
SR
3202
3203 return 0;
3204}
3205
88e9d34c 3206static const struct seq_operations ftrace_graph_seq_ops = {
ea4e2bc4
SR
3207 .start = g_start,
3208 .next = g_next,
3209 .stop = g_stop,
3210 .show = g_show,
3211};
3212
3213static int
3214ftrace_graph_open(struct inode *inode, struct file *file)
3215{
3216 int ret = 0;
3217
3218 if (unlikely(ftrace_disabled))
3219 return -ENODEV;
3220
3221 mutex_lock(&graph_lock);
3222 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 3223 (file->f_flags & O_TRUNC)) {
c7c6b1fe 3224 ftrace_graph_filter_enabled = 0;
ea4e2bc4
SR
3225 ftrace_graph_count = 0;
3226 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3227 }
a4ec5e0c 3228 mutex_unlock(&graph_lock);
ea4e2bc4 3229
a4ec5e0c 3230 if (file->f_mode & FMODE_READ)
ea4e2bc4 3231 ret = seq_open(file, &ftrace_graph_seq_ops);
ea4e2bc4
SR
3232
3233 return ret;
3234}
3235
87827111
LZ
3236static int
3237ftrace_graph_release(struct inode *inode, struct file *file)
3238{
3239 if (file->f_mode & FMODE_READ)
3240 seq_release(inode, file);
3241 return 0;
3242}
3243
ea4e2bc4 3244static int
f9349a8f 3245ftrace_set_func(unsigned long *array, int *idx, char *buffer)
ea4e2bc4 3246{
ea4e2bc4
SR
3247 struct dyn_ftrace *rec;
3248 struct ftrace_page *pg;
f9349a8f 3249 int search_len;
c7c6b1fe 3250 int fail = 1;
f9349a8f
FW
3251 int type, not;
3252 char *search;
3253 bool exists;
3254 int i;
ea4e2bc4 3255
f9349a8f 3256 /* decode regex */
3f6fe06d 3257 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
c7c6b1fe
LZ
3258 if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3259 return -EBUSY;
f9349a8f
FW
3260
3261 search_len = strlen(search);
3262
52baf119 3263 mutex_lock(&ftrace_lock);
45a4a237
SR
3264
3265 if (unlikely(ftrace_disabled)) {
3266 mutex_unlock(&ftrace_lock);
3267 return -ENODEV;
3268 }
3269
265c831c
SR
3270 do_for_each_ftrace_rec(pg, rec) {
3271
45a4a237 3272 if (rec->flags & FTRACE_FL_FREE)
265c831c
SR
3273 continue;
3274
b9df92d2 3275 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
c7c6b1fe 3276 /* if it is in the array */
f9349a8f 3277 exists = false;
c7c6b1fe 3278 for (i = 0; i < *idx; i++) {
f9349a8f
FW
3279 if (array[i] == rec->ip) {
3280 exists = true;
265c831c
SR
3281 break;
3282 }
c7c6b1fe
LZ
3283 }
3284
3285 if (!not) {
3286 fail = 0;
3287 if (!exists) {
3288 array[(*idx)++] = rec->ip;
3289 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3290 goto out;
3291 }
3292 } else {
3293 if (exists) {
3294 array[i] = array[--(*idx)];
3295 array[*idx] = 0;
3296 fail = 0;
3297 }
3298 }
ea4e2bc4 3299 }
265c831c 3300 } while_for_each_ftrace_rec();
c7c6b1fe 3301out:
52baf119 3302 mutex_unlock(&ftrace_lock);
ea4e2bc4 3303
c7c6b1fe
LZ
3304 if (fail)
3305 return -EINVAL;
3306
3307 ftrace_graph_filter_enabled = 1;
3308 return 0;
ea4e2bc4
SR
3309}
3310
3311static ssize_t
3312ftrace_graph_write(struct file *file, const char __user *ubuf,
3313 size_t cnt, loff_t *ppos)
3314{
689fd8b6 3315 struct trace_parser parser;
4ba7978e 3316 ssize_t read, ret;
ea4e2bc4 3317
c7c6b1fe 3318 if (!cnt)
ea4e2bc4
SR
3319 return 0;
3320
3321 mutex_lock(&graph_lock);
3322
689fd8b6 3323 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3324 ret = -ENOMEM;
1eb90f13 3325 goto out_unlock;
ea4e2bc4
SR
3326 }
3327
689fd8b6 3328 read = trace_get_user(&parser, ubuf, cnt, ppos);
ea4e2bc4 3329
4ba7978e 3330 if (read >= 0 && trace_parser_loaded((&parser))) {
689fd8b6 3331 parser.buffer[parser.idx] = 0;
3332
3333 /* we allow only one expression at a time */
a4ec5e0c 3334 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
689fd8b6 3335 parser.buffer);
ea4e2bc4 3336 if (ret)
1eb90f13 3337 goto out_free;
ea4e2bc4 3338 }
ea4e2bc4
SR
3339
3340 ret = read;
1eb90f13
LZ
3341
3342out_free:
689fd8b6 3343 trace_parser_put(&parser);
1eb90f13 3344out_unlock:
ea4e2bc4
SR
3345 mutex_unlock(&graph_lock);
3346
3347 return ret;
3348}
3349
3350static const struct file_operations ftrace_graph_fops = {
87827111
LZ
3351 .open = ftrace_graph_open,
3352 .read = seq_read,
3353 .write = ftrace_graph_write,
3354 .release = ftrace_graph_release,
6038f373 3355 .llseek = seq_lseek,
ea4e2bc4
SR
3356};
3357#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3358
df4fc315 3359static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
5072c59f 3360{
5072c59f 3361
5452af66
FW
3362 trace_create_file("available_filter_functions", 0444,
3363 d_tracer, NULL, &ftrace_avail_fops);
5072c59f 3364
647bcd03
SR
3365 trace_create_file("enabled_functions", 0444,
3366 d_tracer, NULL, &ftrace_enabled_fops);
3367
5452af66
FW
3368 trace_create_file("set_ftrace_filter", 0644, d_tracer,
3369 NULL, &ftrace_filter_fops);
41c52c0d 3370
5452af66 3371 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
41c52c0d 3372 NULL, &ftrace_notrace_fops);
ad90c0e3 3373
ea4e2bc4 3374#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5452af66 3375 trace_create_file("set_graph_function", 0444, d_tracer,
ea4e2bc4
SR
3376 NULL,
3377 &ftrace_graph_fops);
ea4e2bc4
SR
3378#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3379
5072c59f
SR
3380 return 0;
3381}
3382
5cb084bb 3383static int ftrace_process_locs(struct module *mod,
31e88909 3384 unsigned long *start,
68bf21aa
SR
3385 unsigned long *end)
3386{
3387 unsigned long *p;
3388 unsigned long addr;
4376cac6 3389 unsigned long flags = 0; /* Shut up gcc */
68bf21aa 3390
e6ea44e9 3391 mutex_lock(&ftrace_lock);
68bf21aa
SR
3392 p = start;
3393 while (p < end) {
3394 addr = ftrace_call_adjust(*p++);
20e5227e
SR
3395 /*
3396 * Some architecture linkers will pad between
3397 * the different mcount_loc sections of different
3398 * object files to satisfy alignments.
3399 * Skip any NULL pointers.
3400 */
3401 if (!addr)
3402 continue;
68bf21aa 3403 ftrace_record_ip(addr);
68bf21aa
SR
3404 }
3405
a4f18ed1 3406 /*
4376cac6
SR
3407 * We only need to disable interrupts on start up
3408 * because we are modifying code that an interrupt
3409 * may execute, and the modification is not atomic.
3410 * But for modules, nothing runs the code we modify
3411 * until we are finished with it, and there's no
3412 * reason to cause large interrupt latencies while we do it.
a4f18ed1 3413 */
4376cac6
SR
3414 if (!mod)
3415 local_irq_save(flags);
31e88909 3416 ftrace_update_code(mod);
4376cac6
SR
3417 if (!mod)
3418 local_irq_restore(flags);
e6ea44e9 3419 mutex_unlock(&ftrace_lock);
68bf21aa
SR
3420
3421 return 0;
3422}
3423
93eb677d 3424#ifdef CONFIG_MODULES
e7247a15 3425void ftrace_release_mod(struct module *mod)
93eb677d
SR
3426{
3427 struct dyn_ftrace *rec;
3428 struct ftrace_page *pg;
93eb677d 3429
45a4a237
SR
3430 mutex_lock(&ftrace_lock);
3431
e7247a15 3432 if (ftrace_disabled)
45a4a237 3433 goto out_unlock;
93eb677d 3434
93eb677d 3435 do_for_each_ftrace_rec(pg, rec) {
e7247a15 3436 if (within_module_core(rec->ip, mod)) {
93eb677d
SR
3437 /*
3438 * rec->ip is changed in ftrace_free_rec()
3439 * It should not between s and e if record was freed.
3440 */
3441 FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
3442 ftrace_free_rec(rec);
3443 }
3444 } while_for_each_ftrace_rec();
45a4a237 3445 out_unlock:
93eb677d
SR
3446 mutex_unlock(&ftrace_lock);
3447}
3448
3449static void ftrace_init_module(struct module *mod,
3450 unsigned long *start, unsigned long *end)
90d595fe 3451{
00fd61ae 3452 if (ftrace_disabled || start == end)
fed1939c 3453 return;
5cb084bb 3454 ftrace_process_locs(mod, start, end);
90d595fe
SR
3455}
3456
93eb677d
SR
3457static int ftrace_module_notify(struct notifier_block *self,
3458 unsigned long val, void *data)
3459{
3460 struct module *mod = data;
3461
3462 switch (val) {
3463 case MODULE_STATE_COMING:
3464 ftrace_init_module(mod, mod->ftrace_callsites,
3465 mod->ftrace_callsites +
3466 mod->num_ftrace_callsites);
3467 break;
3468 case MODULE_STATE_GOING:
e7247a15 3469 ftrace_release_mod(mod);
93eb677d
SR
3470 break;
3471 }
3472
3473 return 0;
3474}
3475#else
3476static int ftrace_module_notify(struct notifier_block *self,
3477 unsigned long val, void *data)
3478{
3479 return 0;
3480}
3481#endif /* CONFIG_MODULES */
3482
3483struct notifier_block ftrace_module_nb = {
3484 .notifier_call = ftrace_module_notify,
3485 .priority = 0,
3486};
3487
68bf21aa
SR
3488extern unsigned long __start_mcount_loc[];
3489extern unsigned long __stop_mcount_loc[];
3490
3491void __init ftrace_init(void)
3492{
3493 unsigned long count, addr, flags;
3494 int ret;
3495
3496 /* Keep the ftrace pointer to the stub */
3497 addr = (unsigned long)ftrace_stub;
3498
3499 local_irq_save(flags);
3500 ftrace_dyn_arch_init(&addr);
3501 local_irq_restore(flags);
3502
3503 /* ftrace_dyn_arch_init places the return code in addr */
3504 if (addr)
3505 goto failed;
3506
3507 count = __stop_mcount_loc - __start_mcount_loc;
3508
3509 ret = ftrace_dyn_table_alloc(count);
3510 if (ret)
3511 goto failed;
3512
3513 last_ftrace_enabled = ftrace_enabled = 1;
3514
5cb084bb 3515 ret = ftrace_process_locs(NULL,
31e88909 3516 __start_mcount_loc,
68bf21aa
SR
3517 __stop_mcount_loc);
3518
93eb677d 3519 ret = register_module_notifier(&ftrace_module_nb);
24ed0c4b 3520 if (ret)
93eb677d
SR
3521 pr_warning("Failed to register trace ftrace module notifier\n");
3522
2af15d6a
SR
3523 set_ftrace_early_filters();
3524
68bf21aa
SR
3525 return;
3526 failed:
3527 ftrace_disabled = 1;
3528}
68bf21aa 3529
3d083395 3530#else
0b6e4d56 3531
2b499381 3532static struct ftrace_ops global_ops = {
bd69c30b
SR
3533 .func = ftrace_stub,
3534};
3535
0b6e4d56
FW
3536static int __init ftrace_nodyn_init(void)
3537{
3538 ftrace_enabled = 1;
3539 return 0;
3540}
3541device_initcall(ftrace_nodyn_init);
3542
df4fc315
SR
3543static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3544static inline void ftrace_startup_enable(int command) { }
5a45cfe1 3545/* Keep as macros so we do not need to define the commands */
3b6cfdb1
SR
3546# define ftrace_startup(ops, command) \
3547 ({ \
3548 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
3549 0; \
3550 })
bd69c30b 3551# define ftrace_shutdown(ops, command) do { } while (0)
c7aafc54
IM
3552# define ftrace_startup_sysctl() do { } while (0)
3553# define ftrace_shutdown_sysctl() do { } while (0)
b848914c
SR
3554
3555static inline int
3556ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3557{
3558 return 1;
3559}
3560
3d083395
SR
3561#endif /* CONFIG_DYNAMIC_FTRACE */
3562
b848914c
SR
3563static void
3564ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3565{
cdbe61bf 3566 struct ftrace_ops *op;
b848914c 3567
b1cff0ad
SR
3568 if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
3569 return;
3570
3571 trace_recursion_set(TRACE_INTERNAL_BIT);
cdbe61bf
SR
3572 /*
3573 * Some of the ops may be dynamically allocated,
3574 * they must be freed after a synchronize_sched().
3575 */
3576 preempt_disable_notrace();
3577 op = rcu_dereference_raw(ftrace_ops_list);
b848914c
SR
3578 while (op != &ftrace_list_end) {
3579 if (ftrace_ops_test(op, ip))
3580 op->func(ip, parent_ip);
3581 op = rcu_dereference_raw(op->next);
3582 };
cdbe61bf 3583 preempt_enable_notrace();
b1cff0ad 3584 trace_recursion_clear(TRACE_INTERNAL_BIT);
b848914c
SR
3585}
3586
e32d8956 3587static void clear_ftrace_swapper(void)
978f3a45
SR
3588{
3589 struct task_struct *p;
e32d8956 3590 int cpu;
978f3a45 3591
e32d8956
SR
3592 get_online_cpus();
3593 for_each_online_cpu(cpu) {
3594 p = idle_task(cpu);
978f3a45 3595 clear_tsk_trace_trace(p);
e32d8956
SR
3596 }
3597 put_online_cpus();
3598}
978f3a45 3599
e32d8956
SR
3600static void set_ftrace_swapper(void)
3601{
3602 struct task_struct *p;
3603 int cpu;
3604
3605 get_online_cpus();
3606 for_each_online_cpu(cpu) {
3607 p = idle_task(cpu);
3608 set_tsk_trace_trace(p);
3609 }
3610 put_online_cpus();
978f3a45
SR
3611}
3612
e32d8956
SR
3613static void clear_ftrace_pid(struct pid *pid)
3614{
3615 struct task_struct *p;
3616
229c4ef8 3617 rcu_read_lock();
e32d8956
SR
3618 do_each_pid_task(pid, PIDTYPE_PID, p) {
3619 clear_tsk_trace_trace(p);
3620 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8
ON
3621 rcu_read_unlock();
3622
e32d8956
SR
3623 put_pid(pid);
3624}
3625
3626static void set_ftrace_pid(struct pid *pid)
978f3a45
SR
3627{
3628 struct task_struct *p;
3629
229c4ef8 3630 rcu_read_lock();
978f3a45
SR
3631 do_each_pid_task(pid, PIDTYPE_PID, p) {
3632 set_tsk_trace_trace(p);
3633 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8 3634 rcu_read_unlock();
978f3a45
SR
3635}
3636
756d17ee 3637static void clear_ftrace_pid_task(struct pid *pid)
e32d8956 3638{
756d17ee 3639 if (pid == ftrace_swapper_pid)
e32d8956
SR
3640 clear_ftrace_swapper();
3641 else
756d17ee 3642 clear_ftrace_pid(pid);
e32d8956
SR
3643}
3644
3645static void set_ftrace_pid_task(struct pid *pid)
3646{
3647 if (pid == ftrace_swapper_pid)
3648 set_ftrace_swapper();
3649 else
3650 set_ftrace_pid(pid);
3651}
3652
756d17ee 3653static int ftrace_pid_add(int p)
df4fc315 3654{
978f3a45 3655 struct pid *pid;
756d17ee 3656 struct ftrace_pid *fpid;
3657 int ret = -EINVAL;
df4fc315 3658
756d17ee 3659 mutex_lock(&ftrace_lock);
df4fc315 3660
756d17ee 3661 if (!p)
3662 pid = ftrace_swapper_pid;
3663 else
3664 pid = find_get_pid(p);
df4fc315 3665
756d17ee 3666 if (!pid)
3667 goto out;
df4fc315 3668
756d17ee 3669 ret = 0;
df4fc315 3670
756d17ee 3671 list_for_each_entry(fpid, &ftrace_pids, list)
3672 if (fpid->pid == pid)
3673 goto out_put;
978f3a45 3674
756d17ee 3675 ret = -ENOMEM;
df4fc315 3676
756d17ee 3677 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
3678 if (!fpid)
3679 goto out_put;
df4fc315 3680
756d17ee 3681 list_add(&fpid->list, &ftrace_pids);
3682 fpid->pid = pid;
0ef8cde5 3683
756d17ee 3684 set_ftrace_pid_task(pid);
978f3a45 3685
756d17ee 3686 ftrace_update_pid_func();
3687 ftrace_startup_enable(0);
3688
3689 mutex_unlock(&ftrace_lock);
3690 return 0;
3691
3692out_put:
3693 if (pid != ftrace_swapper_pid)
3694 put_pid(pid);
978f3a45 3695
756d17ee 3696out:
3697 mutex_unlock(&ftrace_lock);
3698 return ret;
3699}
3700
3701static void ftrace_pid_reset(void)
3702{
3703 struct ftrace_pid *fpid, *safe;
978f3a45 3704
756d17ee 3705 mutex_lock(&ftrace_lock);
3706 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
3707 struct pid *pid = fpid->pid;
3708
3709 clear_ftrace_pid_task(pid);
3710
3711 list_del(&fpid->list);
3712 kfree(fpid);
df4fc315
SR
3713 }
3714
df4fc315
SR
3715 ftrace_update_pid_func();
3716 ftrace_startup_enable(0);
3717
e6ea44e9 3718 mutex_unlock(&ftrace_lock);
756d17ee 3719}
df4fc315 3720
756d17ee 3721static void *fpid_start(struct seq_file *m, loff_t *pos)
3722{
3723 mutex_lock(&ftrace_lock);
3724
3725 if (list_empty(&ftrace_pids) && (!*pos))
3726 return (void *) 1;
3727
3728 return seq_list_start(&ftrace_pids, *pos);
3729}
3730
3731static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
3732{
3733 if (v == (void *)1)
3734 return NULL;
3735
3736 return seq_list_next(v, &ftrace_pids, pos);
3737}
3738
3739static void fpid_stop(struct seq_file *m, void *p)
3740{
3741 mutex_unlock(&ftrace_lock);
3742}
3743
3744static int fpid_show(struct seq_file *m, void *v)
3745{
3746 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
3747
3748 if (v == (void *)1) {
3749 seq_printf(m, "no pid\n");
3750 return 0;
3751 }
3752
3753 if (fpid->pid == ftrace_swapper_pid)
3754 seq_printf(m, "swapper tasks\n");
3755 else
3756 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
3757
3758 return 0;
3759}
3760
3761static const struct seq_operations ftrace_pid_sops = {
3762 .start = fpid_start,
3763 .next = fpid_next,
3764 .stop = fpid_stop,
3765 .show = fpid_show,
3766};
3767
3768static int
3769ftrace_pid_open(struct inode *inode, struct file *file)
3770{
3771 int ret = 0;
3772
3773 if ((file->f_mode & FMODE_WRITE) &&
3774 (file->f_flags & O_TRUNC))
3775 ftrace_pid_reset();
3776
3777 if (file->f_mode & FMODE_READ)
3778 ret = seq_open(file, &ftrace_pid_sops);
3779
3780 return ret;
3781}
3782
df4fc315
SR
3783static ssize_t
3784ftrace_pid_write(struct file *filp, const char __user *ubuf,
3785 size_t cnt, loff_t *ppos)
3786{
457dc928 3787 char buf[64], *tmp;
df4fc315
SR
3788 long val;
3789 int ret;
3790
3791 if (cnt >= sizeof(buf))
3792 return -EINVAL;
3793
3794 if (copy_from_user(&buf, ubuf, cnt))
3795 return -EFAULT;
3796
3797 buf[cnt] = 0;
3798
756d17ee 3799 /*
3800 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3801 * to clean the filter quietly.
3802 */
457dc928
IM
3803 tmp = strstrip(buf);
3804 if (strlen(tmp) == 0)
756d17ee 3805 return 1;
3806
457dc928 3807 ret = strict_strtol(tmp, 10, &val);
df4fc315
SR
3808 if (ret < 0)
3809 return ret;
3810
756d17ee 3811 ret = ftrace_pid_add(val);
df4fc315 3812
756d17ee 3813 return ret ? ret : cnt;
3814}
df4fc315 3815
756d17ee 3816static int
3817ftrace_pid_release(struct inode *inode, struct file *file)
3818{
3819 if (file->f_mode & FMODE_READ)
3820 seq_release(inode, file);
df4fc315 3821
756d17ee 3822 return 0;
df4fc315
SR
3823}
3824
5e2336a0 3825static const struct file_operations ftrace_pid_fops = {
756d17ee 3826 .open = ftrace_pid_open,
3827 .write = ftrace_pid_write,
3828 .read = seq_read,
3829 .llseek = seq_lseek,
3830 .release = ftrace_pid_release,
df4fc315
SR
3831};
3832
3833static __init int ftrace_init_debugfs(void)
3834{
3835 struct dentry *d_tracer;
df4fc315
SR
3836
3837 d_tracer = tracing_init_dentry();
3838 if (!d_tracer)
3839 return 0;
3840
3841 ftrace_init_dyn_debugfs(d_tracer);
3842
5452af66
FW
3843 trace_create_file("set_ftrace_pid", 0644, d_tracer,
3844 NULL, &ftrace_pid_fops);
493762fc
SR
3845
3846 ftrace_profile_debugfs(d_tracer);
3847
df4fc315
SR
3848 return 0;
3849}
df4fc315
SR
3850fs_initcall(ftrace_init_debugfs);
3851
a2bb6a3d 3852/**
81adbdc0 3853 * ftrace_kill - kill ftrace
a2bb6a3d
SR
3854 *
3855 * This function should be used by panic code. It stops ftrace
3856 * but in a not so nice way. If you need to simply kill ftrace
3857 * from a non-atomic section, use ftrace_kill.
3858 */
81adbdc0 3859void ftrace_kill(void)
a2bb6a3d
SR
3860{
3861 ftrace_disabled = 1;
3862 ftrace_enabled = 0;
a2bb6a3d
SR
3863 clear_ftrace_function();
3864}
3865
e0a413f6
SR
3866/**
3867 * Test if ftrace is dead or not.
3868 */
3869int ftrace_is_dead(void)
3870{
3871 return ftrace_disabled;
3872}
3873
16444a8a 3874/**
3d083395
SR
3875 * register_ftrace_function - register a function for profiling
3876 * @ops - ops structure that holds the function for profiling.
16444a8a 3877 *
3d083395
SR
3878 * Register a function to be called by all functions in the
3879 * kernel.
3880 *
3881 * Note: @ops->func and all the functions it calls must be labeled
3882 * with "notrace", otherwise it will go into a
3883 * recursive loop.
16444a8a 3884 */
3d083395 3885int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 3886{
45a4a237 3887 int ret = -1;
4eebcc81 3888
e6ea44e9 3889 mutex_lock(&ftrace_lock);
e7d3737e 3890
45a4a237
SR
3891 if (unlikely(ftrace_disabled))
3892 goto out_unlock;
3893
b0fc494f 3894 ret = __register_ftrace_function(ops);
b848914c 3895 if (!ret)
a1cd6173 3896 ret = ftrace_startup(ops, 0);
b848914c 3897
b0fc494f 3898
45a4a237 3899 out_unlock:
e6ea44e9 3900 mutex_unlock(&ftrace_lock);
b0fc494f 3901 return ret;
3d083395 3902}
cdbe61bf 3903EXPORT_SYMBOL_GPL(register_ftrace_function);
3d083395
SR
3904
3905/**
32632920 3906 * unregister_ftrace_function - unregister a function for profiling.
3d083395
SR
3907 * @ops - ops structure that holds the function to unregister
3908 *
3909 * Unregister a function that was added to be called by ftrace profiling.
3910 */
3911int unregister_ftrace_function(struct ftrace_ops *ops)
3912{
3913 int ret;
3914
e6ea44e9 3915 mutex_lock(&ftrace_lock);
3d083395 3916 ret = __unregister_ftrace_function(ops);
b848914c
SR
3917 if (!ret)
3918 ftrace_shutdown(ops, 0);
e6ea44e9 3919 mutex_unlock(&ftrace_lock);
b0fc494f
SR
3920
3921 return ret;
3922}
cdbe61bf 3923EXPORT_SYMBOL_GPL(unregister_ftrace_function);
b0fc494f 3924
e309b41d 3925int
b0fc494f 3926ftrace_enable_sysctl(struct ctl_table *table, int write,
8d65af78 3927 void __user *buffer, size_t *lenp,
b0fc494f
SR
3928 loff_t *ppos)
3929{
45a4a237 3930 int ret = -ENODEV;
4eebcc81 3931
e6ea44e9 3932 mutex_lock(&ftrace_lock);
b0fc494f 3933
45a4a237
SR
3934 if (unlikely(ftrace_disabled))
3935 goto out;
3936
3937 ret = proc_dointvec(table, write, buffer, lenp, ppos);
b0fc494f 3938
a32c7765 3939 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
b0fc494f
SR
3940 goto out;
3941
a32c7765 3942 last_ftrace_enabled = !!ftrace_enabled;
b0fc494f
SR
3943
3944 if (ftrace_enabled) {
3945
3946 ftrace_startup_sysctl();
3947
3948 /* we are starting ftrace again */
b848914c
SR
3949 if (ftrace_ops_list != &ftrace_list_end) {
3950 if (ftrace_ops_list->next == &ftrace_list_end)
3951 ftrace_trace_function = ftrace_ops_list->func;
b0fc494f 3952 else
b848914c 3953 ftrace_trace_function = ftrace_ops_list_func;
b0fc494f
SR
3954 }
3955
3956 } else {
3957 /* stopping ftrace calls (just send to ftrace_stub) */
3958 ftrace_trace_function = ftrace_stub;
3959
3960 ftrace_shutdown_sysctl();
3961 }
3962
3963 out:
e6ea44e9 3964 mutex_unlock(&ftrace_lock);
3d083395 3965 return ret;
16444a8a 3966}
f17845e5 3967
fb52607a 3968#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e7d3737e 3969
597af815 3970static int ftrace_graph_active;
4a2b8dda 3971static struct notifier_block ftrace_suspend_notifier;
e7d3737e 3972
e49dc19c
SR
3973int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3974{
3975 return 0;
3976}
3977
287b6e68
FW
3978/* The callbacks that hook a function */
3979trace_func_graph_ret_t ftrace_graph_return =
3980 (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 3981trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
f201ae23
FW
3982
3983/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3984static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3985{
3986 int i;
3987 int ret = 0;
3988 unsigned long flags;
3989 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3990 struct task_struct *g, *t;
3991
3992 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3993 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3994 * sizeof(struct ftrace_ret_stack),
3995 GFP_KERNEL);
3996 if (!ret_stack_list[i]) {
3997 start = 0;
3998 end = i;
3999 ret = -ENOMEM;
4000 goto free;
4001 }
4002 }
4003
4004 read_lock_irqsave(&tasklist_lock, flags);
4005 do_each_thread(g, t) {
4006 if (start == end) {
4007 ret = -EAGAIN;
4008 goto unlock;
4009 }
4010
4011 if (t->ret_stack == NULL) {
380c4b14 4012 atomic_set(&t->tracing_graph_pause, 0);
f201ae23 4013 atomic_set(&t->trace_overrun, 0);
26c01624
SR
4014 t->curr_ret_stack = -1;
4015 /* Make sure the tasks see the -1 first: */
4016 smp_wmb();
4017 t->ret_stack = ret_stack_list[start++];
f201ae23
FW
4018 }
4019 } while_each_thread(g, t);
4020
4021unlock:
4022 read_unlock_irqrestore(&tasklist_lock, flags);
4023free:
4024 for (i = start; i < end; i++)
4025 kfree(ret_stack_list[i]);
4026 return ret;
4027}
4028
8aef2d28 4029static void
38516ab5
SR
4030ftrace_graph_probe_sched_switch(void *ignore,
4031 struct task_struct *prev, struct task_struct *next)
8aef2d28
SR
4032{
4033 unsigned long long timestamp;
4034 int index;
4035
be6f164a
SR
4036 /*
4037 * Does the user want to count the time a function was asleep.
4038 * If so, do not update the time stamps.
4039 */
4040 if (trace_flags & TRACE_ITER_SLEEP_TIME)
4041 return;
4042
8aef2d28
SR
4043 timestamp = trace_clock_local();
4044
4045 prev->ftrace_timestamp = timestamp;
4046
4047 /* only process tasks that we timestamped */
4048 if (!next->ftrace_timestamp)
4049 return;
4050
4051 /*
4052 * Update all the counters in next to make up for the
4053 * time next was sleeping.
4054 */
4055 timestamp -= next->ftrace_timestamp;
4056
4057 for (index = next->curr_ret_stack; index >= 0; index--)
4058 next->ret_stack[index].calltime += timestamp;
4059}
4060
f201ae23 4061/* Allocate a return stack for each task */
fb52607a 4062static int start_graph_tracing(void)
f201ae23
FW
4063{
4064 struct ftrace_ret_stack **ret_stack_list;
5b058bcd 4065 int ret, cpu;
f201ae23
FW
4066
4067 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4068 sizeof(struct ftrace_ret_stack *),
4069 GFP_KERNEL);
4070
4071 if (!ret_stack_list)
4072 return -ENOMEM;
4073
5b058bcd 4074 /* The cpu_boot init_task->ret_stack will never be freed */
179c498a
SR
4075 for_each_online_cpu(cpu) {
4076 if (!idle_task(cpu)->ret_stack)
868baf07 4077 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
179c498a 4078 }
5b058bcd 4079
f201ae23
FW
4080 do {
4081 ret = alloc_retstack_tasklist(ret_stack_list);
4082 } while (ret == -EAGAIN);
4083
8aef2d28 4084 if (!ret) {
38516ab5 4085 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
8aef2d28
SR
4086 if (ret)
4087 pr_info("ftrace_graph: Couldn't activate tracepoint"
4088 " probe to kernel_sched_switch\n");
4089 }
4090
f201ae23
FW
4091 kfree(ret_stack_list);
4092 return ret;
4093}
4094
4a2b8dda
FW
4095/*
4096 * Hibernation protection.
4097 * The state of the current task is too much unstable during
4098 * suspend/restore to disk. We want to protect against that.
4099 */
4100static int
4101ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4102 void *unused)
4103{
4104 switch (state) {
4105 case PM_HIBERNATION_PREPARE:
4106 pause_graph_tracing();
4107 break;
4108
4109 case PM_POST_HIBERNATION:
4110 unpause_graph_tracing();
4111 break;
4112 }
4113 return NOTIFY_DONE;
4114}
4115
287b6e68
FW
4116int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4117 trace_func_graph_ent_t entryfunc)
15e6cb36 4118{
e7d3737e
FW
4119 int ret = 0;
4120
e6ea44e9 4121 mutex_lock(&ftrace_lock);
e7d3737e 4122
05ce5818 4123 /* we currently allow only one tracer registered at a time */
597af815 4124 if (ftrace_graph_active) {
05ce5818
SR
4125 ret = -EBUSY;
4126 goto out;
4127 }
4128
4a2b8dda
FW
4129 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4130 register_pm_notifier(&ftrace_suspend_notifier);
4131
597af815 4132 ftrace_graph_active++;
fb52607a 4133 ret = start_graph_tracing();
f201ae23 4134 if (ret) {
597af815 4135 ftrace_graph_active--;
f201ae23
FW
4136 goto out;
4137 }
e53a6319 4138
287b6e68
FW
4139 ftrace_graph_return = retfunc;
4140 ftrace_graph_entry = entryfunc;
e53a6319 4141
a1cd6173 4142 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
e7d3737e
FW
4143
4144out:
e6ea44e9 4145 mutex_unlock(&ftrace_lock);
e7d3737e 4146 return ret;
15e6cb36
FW
4147}
4148
fb52607a 4149void unregister_ftrace_graph(void)
15e6cb36 4150{
e6ea44e9 4151 mutex_lock(&ftrace_lock);
e7d3737e 4152
597af815 4153 if (unlikely(!ftrace_graph_active))
2aad1b76
SR
4154 goto out;
4155
597af815 4156 ftrace_graph_active--;
287b6e68 4157 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 4158 ftrace_graph_entry = ftrace_graph_entry_stub;
bd69c30b 4159 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4a2b8dda 4160 unregister_pm_notifier(&ftrace_suspend_notifier);
38516ab5 4161 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
e7d3737e 4162
2aad1b76 4163 out:
e6ea44e9 4164 mutex_unlock(&ftrace_lock);
15e6cb36 4165}
f201ae23 4166
868baf07
SR
4167static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4168
4169static void
4170graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4171{
4172 atomic_set(&t->tracing_graph_pause, 0);
4173 atomic_set(&t->trace_overrun, 0);
4174 t->ftrace_timestamp = 0;
25985edc 4175 /* make curr_ret_stack visible before we add the ret_stack */
868baf07
SR
4176 smp_wmb();
4177 t->ret_stack = ret_stack;
4178}
4179
4180/*
4181 * Allocate a return stack for the idle task. May be the first
4182 * time through, or it may be done by CPU hotplug online.
4183 */
4184void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4185{
4186 t->curr_ret_stack = -1;
4187 /*
4188 * The idle task has no parent, it either has its own
4189 * stack or no stack at all.
4190 */
4191 if (t->ret_stack)
4192 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4193
4194 if (ftrace_graph_active) {
4195 struct ftrace_ret_stack *ret_stack;
4196
4197 ret_stack = per_cpu(idle_ret_stack, cpu);
4198 if (!ret_stack) {
4199 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4200 * sizeof(struct ftrace_ret_stack),
4201 GFP_KERNEL);
4202 if (!ret_stack)
4203 return;
4204 per_cpu(idle_ret_stack, cpu) = ret_stack;
4205 }
4206 graph_init_task(t, ret_stack);
4207 }
4208}
4209
f201ae23 4210/* Allocate a return stack for newly created task */
fb52607a 4211void ftrace_graph_init_task(struct task_struct *t)
f201ae23 4212{
84047e36
SR
4213 /* Make sure we do not use the parent ret_stack */
4214 t->ret_stack = NULL;
ea14eb71 4215 t->curr_ret_stack = -1;
84047e36 4216
597af815 4217 if (ftrace_graph_active) {
82310a32
SR
4218 struct ftrace_ret_stack *ret_stack;
4219
4220 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
f201ae23
FW
4221 * sizeof(struct ftrace_ret_stack),
4222 GFP_KERNEL);
82310a32 4223 if (!ret_stack)
f201ae23 4224 return;
868baf07 4225 graph_init_task(t, ret_stack);
84047e36 4226 }
f201ae23
FW
4227}
4228
fb52607a 4229void ftrace_graph_exit_task(struct task_struct *t)
f201ae23 4230{
eae849ca
FW
4231 struct ftrace_ret_stack *ret_stack = t->ret_stack;
4232
f201ae23 4233 t->ret_stack = NULL;
eae849ca
FW
4234 /* NULL must become visible to IRQs before we free it: */
4235 barrier();
4236
4237 kfree(ret_stack);
f201ae23 4238}
14a866c5
SR
4239
4240void ftrace_graph_stop(void)
4241{
4242 ftrace_stop();
4243}
15e6cb36 4244#endif