2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/module.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/slab.h>
29 #include <linux/ctype.h>
30 #include <linux/sort.h>
31 #include <linux/list.h>
32 #include <linux/hash.h>
33 #include <linux/rcupdate.h>
35 #include <trace/events/sched.h>
37 #include <asm/setup.h>
39 #include "trace_output.h"
40 #include "trace_stat.h"
42 #define FTRACE_WARN_ON(cond) \
50 #define FTRACE_WARN_ON_ONCE(cond) \
53 if (WARN_ON_ONCE(___r)) \
58 /* hash bits for specific function selection */
59 #define FTRACE_HASH_BITS 7
60 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
61 #define FTRACE_HASH_DEFAULT_BITS 10
62 #define FTRACE_HASH_MAX_BITS 12
64 /* ftrace_enabled is a method to turn ftrace on or off */
65 int ftrace_enabled __read_mostly;
66 static int last_ftrace_enabled;
68 /* Quick disabling of function tracer. */
69 int function_trace_stop;
71 /* List for set_ftrace_pid's pids. */
72 LIST_HEAD(ftrace_pids);
74 struct list_head list;
79 * ftrace_disabled is set when an anomaly is discovered.
80 * ftrace_disabled is much stronger than ftrace_enabled.
82 static int ftrace_disabled __read_mostly;
84 static DEFINE_MUTEX(ftrace_lock);
86 static struct ftrace_ops ftrace_list_end __read_mostly = {
90 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
91 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
92 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
93 static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
94 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
95 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
96 static struct ftrace_ops global_ops;
99 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
102 * Traverse the ftrace_global_list, invoking all entries. The reason that we
103 * can use rcu_dereference_raw() is that elements removed from this list
104 * are simply leaked, so there is no need to interact with a grace-period
105 * mechanism. The rcu_dereference_raw() calls are needed to handle
106 * concurrent insertions into the ftrace_global_list.
108 * Silly Alpha and silly pointer-speculation compiler optimizations!
110 static void ftrace_global_list_func(unsigned long ip,
111 unsigned long parent_ip)
113 struct ftrace_ops *op;
115 if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
118 trace_recursion_set(TRACE_GLOBAL_BIT);
119 op = rcu_dereference_raw(ftrace_global_list); /*see above*/
120 while (op != &ftrace_list_end) {
121 op->func(ip, parent_ip);
122 op = rcu_dereference_raw(op->next); /*see above*/
124 trace_recursion_clear(TRACE_GLOBAL_BIT);
127 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
129 if (!test_tsk_trace_trace(current))
132 ftrace_pid_function(ip, parent_ip);
135 static void set_ftrace_pid_function(ftrace_func_t func)
137 /* do not set ftrace_pid_function to itself! */
138 if (func != ftrace_pid_func)
139 ftrace_pid_function = func;
143 * clear_ftrace_function - reset the ftrace function
145 * This NULLs the ftrace function and in essence stops
146 * tracing. There may be lag
148 void clear_ftrace_function(void)
150 ftrace_trace_function = ftrace_stub;
151 __ftrace_trace_function = ftrace_stub;
152 __ftrace_trace_function_delay = ftrace_stub;
153 ftrace_pid_function = ftrace_stub;
156 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
158 * For those archs that do not test ftrace_trace_stop in their
159 * mcount call site, we need to do it from C.
161 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
163 if (function_trace_stop)
166 __ftrace_trace_function(ip, parent_ip);
170 static void update_global_ops(void)
175 * If there's only one function registered, then call that
176 * function directly. Otherwise, we need to iterate over the
177 * registered callers.
179 if (ftrace_global_list == &ftrace_list_end ||
180 ftrace_global_list->next == &ftrace_list_end)
181 func = ftrace_global_list->func;
183 func = ftrace_global_list_func;
185 /* If we filter on pids, update to use the pid function */
186 if (!list_empty(&ftrace_pids)) {
187 set_ftrace_pid_function(func);
188 func = ftrace_pid_func;
191 global_ops.func = func;
194 static void update_ftrace_function(void)
201 * If we are at the end of the list and this ops is
202 * not dynamic, then have the mcount trampoline call
203 * the function directly
205 if (ftrace_ops_list == &ftrace_list_end ||
206 (ftrace_ops_list->next == &ftrace_list_end &&
207 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
208 func = ftrace_ops_list->func;
210 func = ftrace_ops_list_func;
212 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
213 ftrace_trace_function = func;
215 #ifdef CONFIG_DYNAMIC_FTRACE
216 /* do not update till all functions have been modified */
217 __ftrace_trace_function_delay = func;
219 __ftrace_trace_function = func;
221 ftrace_trace_function = ftrace_test_stop_func;
225 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
229 * We are entering ops into the list but another
230 * CPU might be walking that list. We need to make sure
231 * the ops->next pointer is valid before another CPU sees
232 * the ops pointer included into the list.
234 rcu_assign_pointer(*list, ops);
237 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
239 struct ftrace_ops **p;
242 * If we are removing the last function, then simply point
243 * to the ftrace_stub.
245 if (*list == ops && ops->next == &ftrace_list_end) {
246 *list = &ftrace_list_end;
250 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
261 static int __register_ftrace_function(struct ftrace_ops *ops)
266 if (FTRACE_WARN_ON(ops == &global_ops))
269 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
272 if (!core_kernel_data((unsigned long)ops))
273 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
275 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
276 int first = ftrace_global_list == &ftrace_list_end;
277 add_ftrace_ops(&ftrace_global_list, ops);
278 ops->flags |= FTRACE_OPS_FL_ENABLED;
280 add_ftrace_ops(&ftrace_ops_list, &global_ops);
282 add_ftrace_ops(&ftrace_ops_list, ops);
285 update_ftrace_function();
290 static int __unregister_ftrace_function(struct ftrace_ops *ops)
297 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
300 if (FTRACE_WARN_ON(ops == &global_ops))
303 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
304 ret = remove_ftrace_ops(&ftrace_global_list, ops);
305 if (!ret && ftrace_global_list == &ftrace_list_end)
306 ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
308 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
310 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
316 update_ftrace_function();
319 * Dynamic ops may be freed, we must make sure that all
320 * callers are done before leaving this function.
322 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
328 static void ftrace_update_pid_func(void)
330 /* Only do something if we are tracing something */
331 if (ftrace_trace_function == ftrace_stub)
334 update_ftrace_function();
337 #ifdef CONFIG_FUNCTION_PROFILER
338 struct ftrace_profile {
339 struct hlist_node node;
341 unsigned long counter;
342 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
343 unsigned long long time;
344 unsigned long long time_squared;
348 struct ftrace_profile_page {
349 struct ftrace_profile_page *next;
351 struct ftrace_profile records[];
354 struct ftrace_profile_stat {
356 struct hlist_head *hash;
357 struct ftrace_profile_page *pages;
358 struct ftrace_profile_page *start;
359 struct tracer_stat stat;
362 #define PROFILE_RECORDS_SIZE \
363 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
365 #define PROFILES_PER_PAGE \
366 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
368 static int ftrace_profile_bits __read_mostly;
369 static int ftrace_profile_enabled __read_mostly;
371 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
372 static DEFINE_MUTEX(ftrace_profile_lock);
374 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
376 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
379 function_stat_next(void *v, int idx)
381 struct ftrace_profile *rec = v;
382 struct ftrace_profile_page *pg;
384 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
390 if ((void *)rec >= (void *)&pg->records[pg->index]) {
394 rec = &pg->records[0];
402 static void *function_stat_start(struct tracer_stat *trace)
404 struct ftrace_profile_stat *stat =
405 container_of(trace, struct ftrace_profile_stat, stat);
407 if (!stat || !stat->start)
410 return function_stat_next(&stat->start->records[0], 0);
413 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
414 /* function graph compares on total time */
415 static int function_stat_cmp(void *p1, void *p2)
417 struct ftrace_profile *a = p1;
418 struct ftrace_profile *b = p2;
420 if (a->time < b->time)
422 if (a->time > b->time)
428 /* not function graph compares against hits */
429 static int function_stat_cmp(void *p1, void *p2)
431 struct ftrace_profile *a = p1;
432 struct ftrace_profile *b = p2;
434 if (a->counter < b->counter)
436 if (a->counter > b->counter)
443 static int function_stat_headers(struct seq_file *m)
445 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
446 seq_printf(m, " Function "
449 "--- ---- --- ---\n");
451 seq_printf(m, " Function Hit\n"
457 static int function_stat_show(struct seq_file *m, void *v)
459 struct ftrace_profile *rec = v;
460 char str[KSYM_SYMBOL_LEN];
462 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
463 static struct trace_seq s;
464 unsigned long long avg;
465 unsigned long long stddev;
467 mutex_lock(&ftrace_profile_lock);
469 /* we raced with function_profile_reset() */
470 if (unlikely(rec->counter == 0)) {
475 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
476 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
478 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
481 do_div(avg, rec->counter);
483 /* Sample standard deviation (s^2) */
484 if (rec->counter <= 1)
487 stddev = rec->time_squared - rec->counter * avg * avg;
489 * Divide only 1000 for ns^2 -> us^2 conversion.
490 * trace_print_graph_duration will divide 1000 again.
492 do_div(stddev, (rec->counter - 1) * 1000);
496 trace_print_graph_duration(rec->time, &s);
497 trace_seq_puts(&s, " ");
498 trace_print_graph_duration(avg, &s);
499 trace_seq_puts(&s, " ");
500 trace_print_graph_duration(stddev, &s);
501 trace_print_seq(m, &s);
505 mutex_unlock(&ftrace_profile_lock);
510 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
512 struct ftrace_profile_page *pg;
514 pg = stat->pages = stat->start;
517 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
522 memset(stat->hash, 0,
523 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
526 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
528 struct ftrace_profile_page *pg;
533 /* If we already allocated, do nothing */
537 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
541 #ifdef CONFIG_DYNAMIC_FTRACE
542 functions = ftrace_update_tot_cnt;
545 * We do not know the number of functions that exist because
546 * dynamic tracing is what counts them. With past experience
547 * we have around 20K functions. That should be more than enough.
548 * It is highly unlikely we will execute every function in
554 pg = stat->start = stat->pages;
556 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
558 for (i = 0; i < pages; i++) {
559 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
570 unsigned long tmp = (unsigned long)pg;
576 free_page((unsigned long)stat->pages);
583 static int ftrace_profile_init_cpu(int cpu)
585 struct ftrace_profile_stat *stat;
588 stat = &per_cpu(ftrace_profile_stats, cpu);
591 /* If the profile is already created, simply reset it */
592 ftrace_profile_reset(stat);
597 * We are profiling all functions, but usually only a few thousand
598 * functions are hit. We'll make a hash of 1024 items.
600 size = FTRACE_PROFILE_HASH_SIZE;
602 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
607 if (!ftrace_profile_bits) {
610 for (; size; size >>= 1)
611 ftrace_profile_bits++;
614 /* Preallocate the function profiling pages */
615 if (ftrace_profile_pages_init(stat) < 0) {
624 static int ftrace_profile_init(void)
629 for_each_online_cpu(cpu) {
630 ret = ftrace_profile_init_cpu(cpu);
638 /* interrupts must be disabled */
639 static struct ftrace_profile *
640 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
642 struct ftrace_profile *rec;
643 struct hlist_head *hhd;
644 struct hlist_node *n;
647 key = hash_long(ip, ftrace_profile_bits);
648 hhd = &stat->hash[key];
650 if (hlist_empty(hhd))
653 hlist_for_each_entry_rcu(rec, n, hhd, node) {
661 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
662 struct ftrace_profile *rec)
666 key = hash_long(rec->ip, ftrace_profile_bits);
667 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
671 * The memory is already allocated, this simply finds a new record to use.
673 static struct ftrace_profile *
674 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
676 struct ftrace_profile *rec = NULL;
678 /* prevent recursion (from NMIs) */
679 if (atomic_inc_return(&stat->disabled) != 1)
683 * Try to find the function again since an NMI
684 * could have added it
686 rec = ftrace_find_profiled_func(stat, ip);
690 if (stat->pages->index == PROFILES_PER_PAGE) {
691 if (!stat->pages->next)
693 stat->pages = stat->pages->next;
696 rec = &stat->pages->records[stat->pages->index++];
698 ftrace_add_profile(stat, rec);
701 atomic_dec(&stat->disabled);
707 function_profile_call(unsigned long ip, unsigned long parent_ip)
709 struct ftrace_profile_stat *stat;
710 struct ftrace_profile *rec;
713 if (!ftrace_profile_enabled)
716 local_irq_save(flags);
718 stat = &__get_cpu_var(ftrace_profile_stats);
719 if (!stat->hash || !ftrace_profile_enabled)
722 rec = ftrace_find_profiled_func(stat, ip);
724 rec = ftrace_profile_alloc(stat, ip);
731 local_irq_restore(flags);
734 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
735 static int profile_graph_entry(struct ftrace_graph_ent *trace)
737 function_profile_call(trace->func, 0);
741 static void profile_graph_return(struct ftrace_graph_ret *trace)
743 struct ftrace_profile_stat *stat;
744 unsigned long long calltime;
745 struct ftrace_profile *rec;
748 local_irq_save(flags);
749 stat = &__get_cpu_var(ftrace_profile_stats);
750 if (!stat->hash || !ftrace_profile_enabled)
753 /* If the calltime was zero'd ignore it */
754 if (!trace->calltime)
757 calltime = trace->rettime - trace->calltime;
759 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
762 index = trace->depth;
764 /* Append this call time to the parent time to subtract */
766 current->ret_stack[index - 1].subtime += calltime;
768 if (current->ret_stack[index].subtime < calltime)
769 calltime -= current->ret_stack[index].subtime;
774 rec = ftrace_find_profiled_func(stat, trace->func);
776 rec->time += calltime;
777 rec->time_squared += calltime * calltime;
781 local_irq_restore(flags);
784 static int register_ftrace_profiler(void)
786 return register_ftrace_graph(&profile_graph_return,
787 &profile_graph_entry);
790 static void unregister_ftrace_profiler(void)
792 unregister_ftrace_graph();
795 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
796 .func = function_profile_call,
799 static int register_ftrace_profiler(void)
801 return register_ftrace_function(&ftrace_profile_ops);
804 static void unregister_ftrace_profiler(void)
806 unregister_ftrace_function(&ftrace_profile_ops);
808 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
811 ftrace_profile_write(struct file *filp, const char __user *ubuf,
812 size_t cnt, loff_t *ppos)
817 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
823 mutex_lock(&ftrace_profile_lock);
824 if (ftrace_profile_enabled ^ val) {
826 ret = ftrace_profile_init();
832 ret = register_ftrace_profiler();
837 ftrace_profile_enabled = 1;
839 ftrace_profile_enabled = 0;
841 * unregister_ftrace_profiler calls stop_machine
842 * so this acts like an synchronize_sched.
844 unregister_ftrace_profiler();
848 mutex_unlock(&ftrace_profile_lock);
856 ftrace_profile_read(struct file *filp, char __user *ubuf,
857 size_t cnt, loff_t *ppos)
859 char buf[64]; /* big enough to hold a number */
862 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
863 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
866 static const struct file_operations ftrace_profile_fops = {
867 .open = tracing_open_generic,
868 .read = ftrace_profile_read,
869 .write = ftrace_profile_write,
870 .llseek = default_llseek,
873 /* used to initialize the real stat files */
874 static struct tracer_stat function_stats __initdata = {
876 .stat_start = function_stat_start,
877 .stat_next = function_stat_next,
878 .stat_cmp = function_stat_cmp,
879 .stat_headers = function_stat_headers,
880 .stat_show = function_stat_show
883 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
885 struct ftrace_profile_stat *stat;
886 struct dentry *entry;
891 for_each_possible_cpu(cpu) {
892 stat = &per_cpu(ftrace_profile_stats, cpu);
894 /* allocate enough for function name + cpu number */
895 name = kmalloc(32, GFP_KERNEL);
898 * The files created are permanent, if something happens
899 * we still do not free memory.
902 "Could not allocate stat file for cpu %d\n",
906 stat->stat = function_stats;
907 snprintf(name, 32, "function%d", cpu);
908 stat->stat.name = name;
909 ret = register_stat_tracer(&stat->stat);
912 "Could not register function stat for cpu %d\n",
919 entry = debugfs_create_file("function_profile_enabled", 0644,
920 d_tracer, NULL, &ftrace_profile_fops);
922 pr_warning("Could not create debugfs "
923 "'function_profile_enabled' entry\n");
926 #else /* CONFIG_FUNCTION_PROFILER */
927 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
930 #endif /* CONFIG_FUNCTION_PROFILER */
932 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
934 #ifdef CONFIG_DYNAMIC_FTRACE
936 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
937 # error Dynamic ftrace depends on MCOUNT_RECORD
940 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
942 struct ftrace_func_probe {
943 struct hlist_node node;
944 struct ftrace_probe_ops *ops;
951 struct ftrace_func_entry {
952 struct hlist_node hlist;
957 unsigned long size_bits;
958 struct hlist_head *buckets;
964 * We make these constant because no one should touch them,
965 * but they are used as the default "empty hash", to avoid allocating
966 * it all the time. These are in a read only section such that if
967 * anyone does try to modify it, it will cause an exception.
969 static const struct hlist_head empty_buckets[1];
970 static const struct ftrace_hash empty_hash = {
971 .buckets = (struct hlist_head *)empty_buckets,
973 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
975 static struct ftrace_ops global_ops = {
977 .notrace_hash = EMPTY_HASH,
978 .filter_hash = EMPTY_HASH,
981 static DEFINE_MUTEX(ftrace_regex_lock);
984 struct ftrace_page *next;
985 struct dyn_ftrace *records;
990 static struct ftrace_page *ftrace_new_pgs;
992 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
993 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
995 /* estimate from running different kernels */
996 #define NR_TO_INIT 10000
998 static struct ftrace_page *ftrace_pages_start;
999 static struct ftrace_page *ftrace_pages;
1001 static struct ftrace_func_entry *
1002 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1005 struct ftrace_func_entry *entry;
1006 struct hlist_head *hhd;
1007 struct hlist_node *n;
1012 if (hash->size_bits > 0)
1013 key = hash_long(ip, hash->size_bits);
1017 hhd = &hash->buckets[key];
1019 hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1020 if (entry->ip == ip)
1026 static void __add_hash_entry(struct ftrace_hash *hash,
1027 struct ftrace_func_entry *entry)
1029 struct hlist_head *hhd;
1032 if (hash->size_bits)
1033 key = hash_long(entry->ip, hash->size_bits);
1037 hhd = &hash->buckets[key];
1038 hlist_add_head(&entry->hlist, hhd);
1042 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1044 struct ftrace_func_entry *entry;
1046 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1051 __add_hash_entry(hash, entry);
1057 free_hash_entry(struct ftrace_hash *hash,
1058 struct ftrace_func_entry *entry)
1060 hlist_del(&entry->hlist);
1066 remove_hash_entry(struct ftrace_hash *hash,
1067 struct ftrace_func_entry *entry)
1069 hlist_del(&entry->hlist);
1073 static void ftrace_hash_clear(struct ftrace_hash *hash)
1075 struct hlist_head *hhd;
1076 struct hlist_node *tp, *tn;
1077 struct ftrace_func_entry *entry;
1078 int size = 1 << hash->size_bits;
1084 for (i = 0; i < size; i++) {
1085 hhd = &hash->buckets[i];
1086 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1087 free_hash_entry(hash, entry);
1089 FTRACE_WARN_ON(hash->count);
1092 static void free_ftrace_hash(struct ftrace_hash *hash)
1094 if (!hash || hash == EMPTY_HASH)
1096 ftrace_hash_clear(hash);
1097 kfree(hash->buckets);
1101 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1103 struct ftrace_hash *hash;
1105 hash = container_of(rcu, struct ftrace_hash, rcu);
1106 free_ftrace_hash(hash);
1109 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1111 if (!hash || hash == EMPTY_HASH)
1113 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1116 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1118 struct ftrace_hash *hash;
1121 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1125 size = 1 << size_bits;
1126 hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
1128 if (!hash->buckets) {
1133 hash->size_bits = size_bits;
1138 static struct ftrace_hash *
1139 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1141 struct ftrace_func_entry *entry;
1142 struct ftrace_hash *new_hash;
1143 struct hlist_node *tp;
1148 new_hash = alloc_ftrace_hash(size_bits);
1153 if (!hash || !hash->count)
1156 size = 1 << hash->size_bits;
1157 for (i = 0; i < size; i++) {
1158 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1159 ret = add_hash_entry(new_hash, entry->ip);
1165 FTRACE_WARN_ON(new_hash->count != hash->count);
1170 free_ftrace_hash(new_hash);
1175 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1177 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1180 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1181 struct ftrace_hash **dst, struct ftrace_hash *src)
1183 struct ftrace_func_entry *entry;
1184 struct hlist_node *tp, *tn;
1185 struct hlist_head *hhd;
1186 struct ftrace_hash *old_hash;
1187 struct ftrace_hash *new_hash;
1189 int size = src->count;
1195 * Remove the current set, update the hash and add
1198 ftrace_hash_rec_disable(ops, enable);
1201 * If the new source is empty, just free dst and assign it
1205 free_ftrace_hash_rcu(*dst);
1206 rcu_assign_pointer(*dst, EMPTY_HASH);
1207 /* still need to update the function records */
1213 * Make the hash size about 1/2 the # found
1215 for (size /= 2; size; size >>= 1)
1218 /* Don't allocate too much */
1219 if (bits > FTRACE_HASH_MAX_BITS)
1220 bits = FTRACE_HASH_MAX_BITS;
1223 new_hash = alloc_ftrace_hash(bits);
1227 size = 1 << src->size_bits;
1228 for (i = 0; i < size; i++) {
1229 hhd = &src->buckets[i];
1230 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1232 key = hash_long(entry->ip, bits);
1235 remove_hash_entry(src, entry);
1236 __add_hash_entry(new_hash, entry);
1241 rcu_assign_pointer(*dst, new_hash);
1242 free_ftrace_hash_rcu(old_hash);
1247 * Enable regardless of ret:
1248 * On success, we enable the new hash.
1249 * On failure, we re-enable the original hash.
1251 ftrace_hash_rec_enable(ops, enable);
1257 * Test the hashes for this ops to see if we want to call
1258 * the ops->func or not.
1260 * It's a match if the ip is in the ops->filter_hash or
1261 * the filter_hash does not exist or is empty,
1263 * the ip is not in the ops->notrace_hash.
1265 * This needs to be called with preemption disabled as
1266 * the hashes are freed with call_rcu_sched().
1269 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1271 struct ftrace_hash *filter_hash;
1272 struct ftrace_hash *notrace_hash;
1275 filter_hash = rcu_dereference_raw(ops->filter_hash);
1276 notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1278 if ((!filter_hash || !filter_hash->count ||
1279 ftrace_lookup_ip(filter_hash, ip)) &&
1280 (!notrace_hash || !notrace_hash->count ||
1281 !ftrace_lookup_ip(notrace_hash, ip)))
1290 * This is a double for. Do not use 'break' to break out of the loop,
1291 * you must use a goto.
1293 #define do_for_each_ftrace_rec(pg, rec) \
1294 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1296 for (_____i = 0; _____i < pg->index; _____i++) { \
1297 rec = &pg->records[_____i];
1299 #define while_for_each_ftrace_rec() \
1304 * ftrace_location - return true if the ip giving is a traced location
1305 * @ip: the instruction pointer to check
1307 * Returns 1 if @ip given is a pointer to a ftrace location.
1308 * That is, the instruction that is either a NOP or call to
1309 * the function tracer. It checks the ftrace internal tables to
1310 * determine if the address belongs or not.
1312 int ftrace_location(unsigned long ip)
1314 struct ftrace_page *pg;
1315 struct dyn_ftrace *rec;
1317 do_for_each_ftrace_rec(pg, rec) {
1320 } while_for_each_ftrace_rec();
1325 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1329 struct ftrace_hash *hash;
1330 struct ftrace_hash *other_hash;
1331 struct ftrace_page *pg;
1332 struct dyn_ftrace *rec;
1336 /* Only update if the ops has been registered */
1337 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1341 * In the filter_hash case:
1342 * If the count is zero, we update all records.
1343 * Otherwise we just update the items in the hash.
1345 * In the notrace_hash case:
1346 * We enable the update in the hash.
1347 * As disabling notrace means enabling the tracing,
1348 * and enabling notrace means disabling, the inc variable
1352 hash = ops->filter_hash;
1353 other_hash = ops->notrace_hash;
1354 if (!hash || !hash->count)
1358 hash = ops->notrace_hash;
1359 other_hash = ops->filter_hash;
1361 * If the notrace hash has no items,
1362 * then there's nothing to do.
1364 if (hash && !hash->count)
1368 do_for_each_ftrace_rec(pg, rec) {
1369 int in_other_hash = 0;
1375 * Only the filter_hash affects all records.
1376 * Update if the record is not in the notrace hash.
1378 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1381 in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip);
1382 in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip);
1387 if (filter_hash && in_hash && !in_other_hash)
1389 else if (!filter_hash && in_hash &&
1390 (in_other_hash || !other_hash->count))
1398 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1401 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1406 /* Shortcut, if we handled all records, we are done. */
1407 if (!all && count == hash->count)
1409 } while_for_each_ftrace_rec();
1412 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1415 __ftrace_hash_rec_update(ops, filter_hash, 0);
1418 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1421 __ftrace_hash_rec_update(ops, filter_hash, 1);
1424 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
1426 if (ftrace_pages->index == ftrace_pages->size) {
1427 /* We should have allocated enough */
1428 if (WARN_ON(!ftrace_pages->next))
1430 ftrace_pages = ftrace_pages->next;
1433 return &ftrace_pages->records[ftrace_pages->index++];
1436 static struct dyn_ftrace *
1437 ftrace_record_ip(unsigned long ip)
1439 struct dyn_ftrace *rec;
1441 if (ftrace_disabled)
1444 rec = ftrace_alloc_dyn_node(ip);
1453 static void print_ip_ins(const char *fmt, unsigned char *p)
1457 printk(KERN_CONT "%s", fmt);
1459 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1460 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1464 * ftrace_bug - report and shutdown function tracer
1465 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1466 * @ip: The address that failed
1468 * The arch code that enables or disables the function tracing
1469 * can call ftrace_bug() when it has detected a problem in
1470 * modifying the code. @failed should be one of either:
1471 * EFAULT - if the problem happens on reading the @ip address
1472 * EINVAL - if what is read at @ip is not what was expected
1473 * EPERM - if the problem happens on writting to the @ip address
1475 void ftrace_bug(int failed, unsigned long ip)
1479 FTRACE_WARN_ON_ONCE(1);
1480 pr_info("ftrace faulted on modifying ");
1484 FTRACE_WARN_ON_ONCE(1);
1485 pr_info("ftrace failed to modify ");
1487 print_ip_ins(" actual: ", (unsigned char *)ip);
1488 printk(KERN_CONT "\n");
1491 FTRACE_WARN_ON_ONCE(1);
1492 pr_info("ftrace faulted on writing ");
1496 FTRACE_WARN_ON_ONCE(1);
1497 pr_info("ftrace faulted on unknown error ");
1503 /* Return 1 if the address range is reserved for ftrace */
1504 int ftrace_text_reserved(void *start, void *end)
1506 struct dyn_ftrace *rec;
1507 struct ftrace_page *pg;
1509 do_for_each_ftrace_rec(pg, rec) {
1510 if (rec->ip <= (unsigned long)end &&
1511 rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1513 } while_for_each_ftrace_rec();
1517 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1519 unsigned long flag = 0UL;
1522 * If we are updating calls:
1524 * If the record has a ref count, then we need to enable it
1525 * because someone is using it.
1527 * Otherwise we make sure its disabled.
1529 * If we are disabling calls, then disable all records that
1532 if (enable && (rec->flags & ~FTRACE_FL_MASK))
1533 flag = FTRACE_FL_ENABLED;
1535 /* If the state of this record hasn't changed, then do nothing */
1536 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1537 return FTRACE_UPDATE_IGNORE;
1541 rec->flags |= FTRACE_FL_ENABLED;
1542 return FTRACE_UPDATE_MAKE_CALL;
1546 rec->flags &= ~FTRACE_FL_ENABLED;
1548 return FTRACE_UPDATE_MAKE_NOP;
1552 * ftrace_update_record, set a record that now is tracing or not
1553 * @rec: the record to update
1554 * @enable: set to 1 if the record is tracing, zero to force disable
1556 * The records that represent all functions that can be traced need
1557 * to be updated when tracing has been enabled.
1559 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1561 return ftrace_check_record(rec, enable, 1);
1565 * ftrace_test_record, check if the record has been enabled or not
1566 * @rec: the record to test
1567 * @enable: set to 1 to check if enabled, 0 if it is disabled
1569 * The arch code may need to test if a record is already set to
1570 * tracing to determine how to modify the function code that it
1573 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1575 return ftrace_check_record(rec, enable, 0);
1579 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1581 unsigned long ftrace_addr;
1584 ftrace_addr = (unsigned long)FTRACE_ADDR;
1586 ret = ftrace_update_record(rec, enable);
1589 case FTRACE_UPDATE_IGNORE:
1592 case FTRACE_UPDATE_MAKE_CALL:
1593 return ftrace_make_call(rec, ftrace_addr);
1595 case FTRACE_UPDATE_MAKE_NOP:
1596 return ftrace_make_nop(NULL, rec, ftrace_addr);
1599 return -1; /* unknow ftrace bug */
1602 static void ftrace_replace_code(int update)
1604 struct dyn_ftrace *rec;
1605 struct ftrace_page *pg;
1608 if (unlikely(ftrace_disabled))
1611 do_for_each_ftrace_rec(pg, rec) {
1612 failed = __ftrace_replace_code(rec, update);
1614 ftrace_bug(failed, rec->ip);
1615 /* Stop processing */
1618 } while_for_each_ftrace_rec();
1621 struct ftrace_rec_iter {
1622 struct ftrace_page *pg;
1627 * ftrace_rec_iter_start, start up iterating over traced functions
1629 * Returns an iterator handle that is used to iterate over all
1630 * the records that represent address locations where functions
1633 * May return NULL if no records are available.
1635 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1638 * We only use a single iterator.
1639 * Protected by the ftrace_lock mutex.
1641 static struct ftrace_rec_iter ftrace_rec_iter;
1642 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1644 iter->pg = ftrace_pages_start;
1647 /* Could have empty pages */
1648 while (iter->pg && !iter->pg->index)
1649 iter->pg = iter->pg->next;
1658 * ftrace_rec_iter_next, get the next record to process.
1659 * @iter: The handle to the iterator.
1661 * Returns the next iterator after the given iterator @iter.
1663 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1667 if (iter->index >= iter->pg->index) {
1668 iter->pg = iter->pg->next;
1671 /* Could have empty pages */
1672 while (iter->pg && !iter->pg->index)
1673 iter->pg = iter->pg->next;
1683 * ftrace_rec_iter_record, get the record at the iterator location
1684 * @iter: The current iterator location
1686 * Returns the record that the current @iter is at.
1688 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1690 return &iter->pg->records[iter->index];
1694 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1701 if (unlikely(ftrace_disabled))
1704 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1706 ftrace_bug(ret, ip);
1713 * archs can override this function if they must do something
1714 * before the modifying code is performed.
1716 int __weak ftrace_arch_code_modify_prepare(void)
1722 * archs can override this function if they must do something
1723 * after the modifying code is performed.
1725 int __weak ftrace_arch_code_modify_post_process(void)
1730 static int __ftrace_modify_code(void *data)
1732 int *command = data;
1734 if (*command & FTRACE_UPDATE_CALLS)
1735 ftrace_replace_code(1);
1736 else if (*command & FTRACE_DISABLE_CALLS)
1737 ftrace_replace_code(0);
1739 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1740 ftrace_update_ftrace_func(ftrace_trace_function);
1742 if (*command & FTRACE_START_FUNC_RET)
1743 ftrace_enable_ftrace_graph_caller();
1744 else if (*command & FTRACE_STOP_FUNC_RET)
1745 ftrace_disable_ftrace_graph_caller();
1751 * ftrace_run_stop_machine, go back to the stop machine method
1752 * @command: The command to tell ftrace what to do
1754 * If an arch needs to fall back to the stop machine method, the
1755 * it can call this function.
1757 void ftrace_run_stop_machine(int command)
1759 stop_machine(__ftrace_modify_code, &command, NULL);
1763 * arch_ftrace_update_code, modify the code to trace or not trace
1764 * @command: The command that needs to be done
1766 * Archs can override this function if it does not need to
1767 * run stop_machine() to modify code.
1769 void __weak arch_ftrace_update_code(int command)
1771 ftrace_run_stop_machine(command);
1774 static void ftrace_run_update_code(int command)
1778 ret = ftrace_arch_code_modify_prepare();
1779 FTRACE_WARN_ON(ret);
1783 * Do not call function tracer while we update the code.
1784 * We are in stop machine.
1786 function_trace_stop++;
1789 * By default we use stop_machine() to modify the code.
1790 * But archs can do what ever they want as long as it
1791 * is safe. The stop_machine() is the safest, but also
1792 * produces the most overhead.
1794 arch_ftrace_update_code(command);
1796 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
1798 * For archs that call ftrace_test_stop_func(), we must
1799 * wait till after we update all the function callers
1800 * before we update the callback. This keeps different
1801 * ops that record different functions from corrupting
1804 __ftrace_trace_function = __ftrace_trace_function_delay;
1806 function_trace_stop--;
1808 ret = ftrace_arch_code_modify_post_process();
1809 FTRACE_WARN_ON(ret);
1812 static ftrace_func_t saved_ftrace_func;
1813 static int ftrace_start_up;
1814 static int global_start_up;
1816 static void ftrace_startup_enable(int command)
1818 if (saved_ftrace_func != ftrace_trace_function) {
1819 saved_ftrace_func = ftrace_trace_function;
1820 command |= FTRACE_UPDATE_TRACE_FUNC;
1823 if (!command || !ftrace_enabled)
1826 ftrace_run_update_code(command);
1829 static int ftrace_startup(struct ftrace_ops *ops, int command)
1831 bool hash_enable = true;
1833 if (unlikely(ftrace_disabled))
1837 command |= FTRACE_UPDATE_CALLS;
1839 /* ops marked global share the filter hashes */
1840 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1842 /* Don't update hash if global is already set */
1843 if (global_start_up)
1844 hash_enable = false;
1848 ops->flags |= FTRACE_OPS_FL_ENABLED;
1850 ftrace_hash_rec_enable(ops, 1);
1852 ftrace_startup_enable(command);
1857 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
1859 bool hash_disable = true;
1861 if (unlikely(ftrace_disabled))
1866 * Just warn in case of unbalance, no need to kill ftrace, it's not
1867 * critical but the ftrace_call callers may be never nopped again after
1868 * further ftrace uses.
1870 WARN_ON_ONCE(ftrace_start_up < 0);
1872 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1875 WARN_ON_ONCE(global_start_up < 0);
1876 /* Don't update hash if global still has users */
1877 if (global_start_up) {
1878 WARN_ON_ONCE(!ftrace_start_up);
1879 hash_disable = false;
1884 ftrace_hash_rec_disable(ops, 1);
1886 if (ops != &global_ops || !global_start_up)
1887 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
1889 command |= FTRACE_UPDATE_CALLS;
1891 if (saved_ftrace_func != ftrace_trace_function) {
1892 saved_ftrace_func = ftrace_trace_function;
1893 command |= FTRACE_UPDATE_TRACE_FUNC;
1896 if (!command || !ftrace_enabled)
1899 ftrace_run_update_code(command);
1902 static void ftrace_startup_sysctl(void)
1904 if (unlikely(ftrace_disabled))
1907 /* Force update next time */
1908 saved_ftrace_func = NULL;
1909 /* ftrace_start_up is true if we want ftrace running */
1910 if (ftrace_start_up)
1911 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
1914 static void ftrace_shutdown_sysctl(void)
1916 if (unlikely(ftrace_disabled))
1919 /* ftrace_start_up is true if ftrace is running */
1920 if (ftrace_start_up)
1921 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
1924 static cycle_t ftrace_update_time;
1925 static unsigned long ftrace_update_cnt;
1926 unsigned long ftrace_update_tot_cnt;
1928 static int ops_traces_mod(struct ftrace_ops *ops)
1930 struct ftrace_hash *hash;
1932 hash = ops->filter_hash;
1933 return !!(!hash || !hash->count);
1936 static int ftrace_update_code(struct module *mod)
1938 struct ftrace_page *pg;
1939 struct dyn_ftrace *p;
1940 cycle_t start, stop;
1941 unsigned long ref = 0;
1945 * When adding a module, we need to check if tracers are
1946 * currently enabled and if they are set to trace all functions.
1947 * If they are, we need to enable the module functions as well
1948 * as update the reference counts for those function records.
1951 struct ftrace_ops *ops;
1953 for (ops = ftrace_ops_list;
1954 ops != &ftrace_list_end; ops = ops->next) {
1955 if (ops->flags & FTRACE_OPS_FL_ENABLED &&
1956 ops_traces_mod(ops))
1961 start = ftrace_now(raw_smp_processor_id());
1962 ftrace_update_cnt = 0;
1964 for (pg = ftrace_new_pgs; pg; pg = pg->next) {
1966 for (i = 0; i < pg->index; i++) {
1967 /* If something went wrong, bail without enabling anything */
1968 if (unlikely(ftrace_disabled))
1971 p = &pg->records[i];
1975 * Do the initial record conversion from mcount jump
1976 * to the NOP instructions.
1978 if (!ftrace_code_disable(mod, p))
1981 ftrace_update_cnt++;
1984 * If the tracing is enabled, go ahead and enable the record.
1986 * The reason not to enable the record immediatelly is the
1987 * inherent check of ftrace_make_nop/ftrace_make_call for
1988 * correct previous instructions. Making first the NOP
1989 * conversion puts the module to the correct state, thus
1990 * passing the ftrace_make_call check.
1992 if (ftrace_start_up && ref) {
1993 int failed = __ftrace_replace_code(p, 1);
1995 ftrace_bug(failed, p->ip);
2000 ftrace_new_pgs = NULL;
2002 stop = ftrace_now(raw_smp_processor_id());
2003 ftrace_update_time = stop - start;
2004 ftrace_update_tot_cnt += ftrace_update_cnt;
2009 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2014 if (WARN_ON(!count))
2017 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2020 * We want to fill as much as possible. No more than a page
2023 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2027 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2030 /* if we can't allocate this size, try something smaller */
2037 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2046 static struct ftrace_page *
2047 ftrace_allocate_pages(unsigned long num_to_init)
2049 struct ftrace_page *start_pg;
2050 struct ftrace_page *pg;
2057 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2062 * Try to allocate as much as possible in one continues
2063 * location that fills in all of the space. We want to
2064 * waste as little space as possible.
2067 cnt = ftrace_allocate_records(pg, num_to_init);
2075 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2086 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2087 free_pages((unsigned long)pg->records, order);
2088 start_pg = pg->next;
2092 pr_info("ftrace: FAILED to allocate memory for functions\n");
2096 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2101 pr_info("ftrace: No functions to be traced?\n");
2105 cnt = num_to_init / ENTRIES_PER_PAGE;
2106 pr_info("ftrace: allocating %ld entries in %d pages\n",
2107 num_to_init, cnt + 1);
2113 FTRACE_ITER_FILTER = (1 << 0),
2114 FTRACE_ITER_NOTRACE = (1 << 1),
2115 FTRACE_ITER_PRINTALL = (1 << 2),
2116 FTRACE_ITER_HASH = (1 << 3),
2117 FTRACE_ITER_ENABLED = (1 << 4),
2120 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2122 struct ftrace_iterator {
2125 struct ftrace_page *pg;
2126 struct dyn_ftrace *func;
2127 struct ftrace_func_probe *probe;
2128 struct trace_parser parser;
2129 struct ftrace_hash *hash;
2130 struct ftrace_ops *ops;
2137 t_hash_next(struct seq_file *m, loff_t *pos)
2139 struct ftrace_iterator *iter = m->private;
2140 struct hlist_node *hnd = NULL;
2141 struct hlist_head *hhd;
2147 hnd = &iter->probe->node;
2149 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2152 hhd = &ftrace_func_hash[iter->hidx];
2154 if (hlist_empty(hhd)) {
2170 if (WARN_ON_ONCE(!hnd))
2173 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2178 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2180 struct ftrace_iterator *iter = m->private;
2184 if (iter->func_pos > *pos)
2188 for (l = 0; l <= (*pos - iter->func_pos); ) {
2189 p = t_hash_next(m, &l);
2196 /* Only set this if we have an item */
2197 iter->flags |= FTRACE_ITER_HASH;
2203 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2205 struct ftrace_func_probe *rec;
2208 if (WARN_ON_ONCE(!rec))
2211 if (rec->ops->print)
2212 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2214 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2217 seq_printf(m, ":%p", rec->data);
2224 t_next(struct seq_file *m, void *v, loff_t *pos)
2226 struct ftrace_iterator *iter = m->private;
2227 struct ftrace_ops *ops = &global_ops;
2228 struct dyn_ftrace *rec = NULL;
2230 if (unlikely(ftrace_disabled))
2233 if (iter->flags & FTRACE_ITER_HASH)
2234 return t_hash_next(m, pos);
2237 iter->pos = iter->func_pos = *pos;
2239 if (iter->flags & FTRACE_ITER_PRINTALL)
2240 return t_hash_start(m, pos);
2243 if (iter->idx >= iter->pg->index) {
2244 if (iter->pg->next) {
2245 iter->pg = iter->pg->next;
2250 rec = &iter->pg->records[iter->idx++];
2251 if (((iter->flags & FTRACE_ITER_FILTER) &&
2252 !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2254 ((iter->flags & FTRACE_ITER_NOTRACE) &&
2255 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2257 ((iter->flags & FTRACE_ITER_ENABLED) &&
2258 !(rec->flags & ~FTRACE_FL_MASK))) {
2266 return t_hash_start(m, pos);
2273 static void reset_iter_read(struct ftrace_iterator *iter)
2277 iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
2280 static void *t_start(struct seq_file *m, loff_t *pos)
2282 struct ftrace_iterator *iter = m->private;
2283 struct ftrace_ops *ops = &global_ops;
2287 mutex_lock(&ftrace_lock);
2289 if (unlikely(ftrace_disabled))
2293 * If an lseek was done, then reset and start from beginning.
2295 if (*pos < iter->pos)
2296 reset_iter_read(iter);
2299 * For set_ftrace_filter reading, if we have the filter
2300 * off, we can short cut and just print out that all
2301 * functions are enabled.
2303 if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
2305 return t_hash_start(m, pos);
2306 iter->flags |= FTRACE_ITER_PRINTALL;
2307 /* reset in case of seek/pread */
2308 iter->flags &= ~FTRACE_ITER_HASH;
2312 if (iter->flags & FTRACE_ITER_HASH)
2313 return t_hash_start(m, pos);
2316 * Unfortunately, we need to restart at ftrace_pages_start
2317 * every time we let go of the ftrace_mutex. This is because
2318 * those pointers can change without the lock.
2320 iter->pg = ftrace_pages_start;
2322 for (l = 0; l <= *pos; ) {
2323 p = t_next(m, p, &l);
2329 if (iter->flags & FTRACE_ITER_FILTER)
2330 return t_hash_start(m, pos);
2338 static void t_stop(struct seq_file *m, void *p)
2340 mutex_unlock(&ftrace_lock);
2343 static int t_show(struct seq_file *m, void *v)
2345 struct ftrace_iterator *iter = m->private;
2346 struct dyn_ftrace *rec;
2348 if (iter->flags & FTRACE_ITER_HASH)
2349 return t_hash_show(m, iter);
2351 if (iter->flags & FTRACE_ITER_PRINTALL) {
2352 seq_printf(m, "#### all functions enabled ####\n");
2361 seq_printf(m, "%ps", (void *)rec->ip);
2362 if (iter->flags & FTRACE_ITER_ENABLED)
2363 seq_printf(m, " (%ld)",
2364 rec->flags & ~FTRACE_FL_MASK);
2365 seq_printf(m, "\n");
2370 static const struct seq_operations show_ftrace_seq_ops = {
2378 ftrace_avail_open(struct inode *inode, struct file *file)
2380 struct ftrace_iterator *iter;
2383 if (unlikely(ftrace_disabled))
2386 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2390 iter->pg = ftrace_pages_start;
2392 ret = seq_open(file, &show_ftrace_seq_ops);
2394 struct seq_file *m = file->private_data;
2405 ftrace_enabled_open(struct inode *inode, struct file *file)
2407 struct ftrace_iterator *iter;
2410 if (unlikely(ftrace_disabled))
2413 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2417 iter->pg = ftrace_pages_start;
2418 iter->flags = FTRACE_ITER_ENABLED;
2420 ret = seq_open(file, &show_ftrace_seq_ops);
2422 struct seq_file *m = file->private_data;
2432 static void ftrace_filter_reset(struct ftrace_hash *hash)
2434 mutex_lock(&ftrace_lock);
2435 ftrace_hash_clear(hash);
2436 mutex_unlock(&ftrace_lock);
2440 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2441 struct inode *inode, struct file *file)
2443 struct ftrace_iterator *iter;
2444 struct ftrace_hash *hash;
2447 if (unlikely(ftrace_disabled))
2450 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2454 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2459 if (flag & FTRACE_ITER_NOTRACE)
2460 hash = ops->notrace_hash;
2462 hash = ops->filter_hash;
2467 if (file->f_mode & FMODE_WRITE) {
2468 mutex_lock(&ftrace_lock);
2469 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2470 mutex_unlock(&ftrace_lock);
2473 trace_parser_put(&iter->parser);
2479 mutex_lock(&ftrace_regex_lock);
2481 if ((file->f_mode & FMODE_WRITE) &&
2482 (file->f_flags & O_TRUNC))
2483 ftrace_filter_reset(iter->hash);
2485 if (file->f_mode & FMODE_READ) {
2486 iter->pg = ftrace_pages_start;
2488 ret = seq_open(file, &show_ftrace_seq_ops);
2490 struct seq_file *m = file->private_data;
2494 free_ftrace_hash(iter->hash);
2495 trace_parser_put(&iter->parser);
2499 file->private_data = iter;
2500 mutex_unlock(&ftrace_regex_lock);
2506 ftrace_filter_open(struct inode *inode, struct file *file)
2508 return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER,
2513 ftrace_notrace_open(struct inode *inode, struct file *file)
2515 return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2520 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
2524 if (file->f_mode & FMODE_READ)
2525 ret = seq_lseek(file, offset, origin);
2527 file->f_pos = ret = 1;
2532 static int ftrace_match(char *str, char *regex, int len, int type)
2539 if (strcmp(str, regex) == 0)
2542 case MATCH_FRONT_ONLY:
2543 if (strncmp(str, regex, len) == 0)
2546 case MATCH_MIDDLE_ONLY:
2547 if (strstr(str, regex))
2550 case MATCH_END_ONLY:
2552 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2561 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2563 struct ftrace_func_entry *entry;
2566 entry = ftrace_lookup_ip(hash, rec->ip);
2568 /* Do nothing if it doesn't exist */
2572 free_hash_entry(hash, entry);
2574 /* Do nothing if it exists */
2578 ret = add_hash_entry(hash, rec->ip);
2584 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2585 char *regex, int len, int type)
2587 char str[KSYM_SYMBOL_LEN];
2590 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2593 /* module lookup requires matching the module */
2594 if (!modname || strcmp(modname, mod))
2597 /* blank search means to match all funcs in the mod */
2602 return ftrace_match(str, regex, len, type);
2606 match_records(struct ftrace_hash *hash, char *buff,
2607 int len, char *mod, int not)
2609 unsigned search_len = 0;
2610 struct ftrace_page *pg;
2611 struct dyn_ftrace *rec;
2612 int type = MATCH_FULL;
2613 char *search = buff;
2618 type = filter_parse_regex(buff, len, &search, ¬);
2619 search_len = strlen(search);
2622 mutex_lock(&ftrace_lock);
2624 if (unlikely(ftrace_disabled))
2627 do_for_each_ftrace_rec(pg, rec) {
2628 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2629 ret = enter_record(hash, rec, not);
2636 } while_for_each_ftrace_rec();
2638 mutex_unlock(&ftrace_lock);
2644 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2646 return match_records(hash, buff, len, NULL, 0);
2650 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2654 /* blank or '*' mean the same */
2655 if (strcmp(buff, "*") == 0)
2658 /* handle the case of 'dont filter this module' */
2659 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2664 return match_records(hash, buff, strlen(buff), mod, not);
2668 * We register the module command as a template to show others how
2669 * to register the a command as well.
2673 ftrace_mod_callback(struct ftrace_hash *hash,
2674 char *func, char *cmd, char *param, int enable)
2680 * cmd == 'mod' because we only registered this func
2681 * for the 'mod' ftrace_func_command.
2682 * But if you register one func with multiple commands,
2683 * you can tell which command was used by the cmd
2687 /* we must have a module name */
2691 mod = strsep(¶m, ":");
2695 ret = ftrace_match_module_records(hash, func, mod);
2704 static struct ftrace_func_command ftrace_mod_cmd = {
2706 .func = ftrace_mod_callback,
2709 static int __init ftrace_mod_cmd_init(void)
2711 return register_ftrace_command(&ftrace_mod_cmd);
2713 device_initcall(ftrace_mod_cmd_init);
2716 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
2718 struct ftrace_func_probe *entry;
2719 struct hlist_head *hhd;
2720 struct hlist_node *n;
2723 key = hash_long(ip, FTRACE_HASH_BITS);
2725 hhd = &ftrace_func_hash[key];
2727 if (hlist_empty(hhd))
2731 * Disable preemption for these calls to prevent a RCU grace
2732 * period. This syncs the hash iteration and freeing of items
2733 * on the hash. rcu_read_lock is too dangerous here.
2735 preempt_disable_notrace();
2736 hlist_for_each_entry_rcu(entry, n, hhd, node) {
2737 if (entry->ip == ip)
2738 entry->ops->func(ip, parent_ip, &entry->data);
2740 preempt_enable_notrace();
2743 static struct ftrace_ops trace_probe_ops __read_mostly =
2745 .func = function_trace_probe_call,
2748 static int ftrace_probe_registered;
2750 static void __enable_ftrace_function_probe(void)
2755 if (ftrace_probe_registered)
2758 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2759 struct hlist_head *hhd = &ftrace_func_hash[i];
2763 /* Nothing registered? */
2764 if (i == FTRACE_FUNC_HASHSIZE)
2767 ret = __register_ftrace_function(&trace_probe_ops);
2769 ret = ftrace_startup(&trace_probe_ops, 0);
2771 ftrace_probe_registered = 1;
2774 static void __disable_ftrace_function_probe(void)
2779 if (!ftrace_probe_registered)
2782 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2783 struct hlist_head *hhd = &ftrace_func_hash[i];
2788 /* no more funcs left */
2789 ret = __unregister_ftrace_function(&trace_probe_ops);
2791 ftrace_shutdown(&trace_probe_ops, 0);
2793 ftrace_probe_registered = 0;
2797 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2799 struct ftrace_func_probe *entry =
2800 container_of(rhp, struct ftrace_func_probe, rcu);
2802 if (entry->ops->free)
2803 entry->ops->free(&entry->data);
2809 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2812 struct ftrace_func_probe *entry;
2813 struct ftrace_page *pg;
2814 struct dyn_ftrace *rec;
2820 type = filter_parse_regex(glob, strlen(glob), &search, ¬);
2821 len = strlen(search);
2823 /* we do not support '!' for function probes */
2827 mutex_lock(&ftrace_lock);
2829 if (unlikely(ftrace_disabled))
2832 do_for_each_ftrace_rec(pg, rec) {
2834 if (!ftrace_match_record(rec, NULL, search, len, type))
2837 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2839 /* If we did not process any, then return error */
2850 * The caller might want to do something special
2851 * for each function we find. We call the callback
2852 * to give the caller an opportunity to do so.
2854 if (ops->callback) {
2855 if (ops->callback(rec->ip, &entry->data) < 0) {
2856 /* caller does not like this func */
2863 entry->ip = rec->ip;
2865 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2866 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2868 } while_for_each_ftrace_rec();
2869 __enable_ftrace_function_probe();
2872 mutex_unlock(&ftrace_lock);
2878 PROBE_TEST_FUNC = 1,
2883 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2884 void *data, int flags)
2886 struct ftrace_func_probe *entry;
2887 struct hlist_node *n, *tmp;
2888 char str[KSYM_SYMBOL_LEN];
2889 int type = MATCH_FULL;
2893 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2898 type = filter_parse_regex(glob, strlen(glob), &search, ¬);
2899 len = strlen(search);
2901 /* we do not support '!' for function probes */
2906 mutex_lock(&ftrace_lock);
2907 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2908 struct hlist_head *hhd = &ftrace_func_hash[i];
2910 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2912 /* break up if statements for readability */
2913 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2916 if ((flags & PROBE_TEST_DATA) && entry->data != data)
2919 /* do this last, since it is the most expensive */
2921 kallsyms_lookup(entry->ip, NULL, NULL,
2923 if (!ftrace_match(str, glob, len, type))
2927 hlist_del(&entry->node);
2928 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2931 __disable_ftrace_function_probe();
2932 mutex_unlock(&ftrace_lock);
2936 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2939 __unregister_ftrace_function_probe(glob, ops, data,
2940 PROBE_TEST_FUNC | PROBE_TEST_DATA);
2944 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2946 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2949 void unregister_ftrace_function_probe_all(char *glob)
2951 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2954 static LIST_HEAD(ftrace_commands);
2955 static DEFINE_MUTEX(ftrace_cmd_mutex);
2957 int register_ftrace_command(struct ftrace_func_command *cmd)
2959 struct ftrace_func_command *p;
2962 mutex_lock(&ftrace_cmd_mutex);
2963 list_for_each_entry(p, &ftrace_commands, list) {
2964 if (strcmp(cmd->name, p->name) == 0) {
2969 list_add(&cmd->list, &ftrace_commands);
2971 mutex_unlock(&ftrace_cmd_mutex);
2976 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2978 struct ftrace_func_command *p, *n;
2981 mutex_lock(&ftrace_cmd_mutex);
2982 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2983 if (strcmp(cmd->name, p->name) == 0) {
2985 list_del_init(&p->list);
2990 mutex_unlock(&ftrace_cmd_mutex);
2995 static int ftrace_process_regex(struct ftrace_hash *hash,
2996 char *buff, int len, int enable)
2998 char *func, *command, *next = buff;
2999 struct ftrace_func_command *p;
3002 func = strsep(&next, ":");
3005 ret = ftrace_match_records(hash, func, len);
3015 command = strsep(&next, ":");
3017 mutex_lock(&ftrace_cmd_mutex);
3018 list_for_each_entry(p, &ftrace_commands, list) {
3019 if (strcmp(p->name, command) == 0) {
3020 ret = p->func(hash, func, command, next, enable);
3025 mutex_unlock(&ftrace_cmd_mutex);
3031 ftrace_regex_write(struct file *file, const char __user *ubuf,
3032 size_t cnt, loff_t *ppos, int enable)
3034 struct ftrace_iterator *iter;
3035 struct trace_parser *parser;
3041 mutex_lock(&ftrace_regex_lock);
3044 if (unlikely(ftrace_disabled))
3047 if (file->f_mode & FMODE_READ) {
3048 struct seq_file *m = file->private_data;
3051 iter = file->private_data;
3053 parser = &iter->parser;
3054 read = trace_get_user(parser, ubuf, cnt, ppos);
3056 if (read >= 0 && trace_parser_loaded(parser) &&
3057 !trace_parser_cont(parser)) {
3058 ret = ftrace_process_regex(iter->hash, parser->buffer,
3059 parser->idx, enable);
3060 trace_parser_clear(parser);
3067 mutex_unlock(&ftrace_regex_lock);
3073 ftrace_filter_write(struct file *file, const char __user *ubuf,
3074 size_t cnt, loff_t *ppos)
3076 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3080 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3081 size_t cnt, loff_t *ppos)
3083 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3087 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3088 int reset, int enable)
3090 struct ftrace_hash **orig_hash;
3091 struct ftrace_hash *hash;
3094 /* All global ops uses the global ops filters */
3095 if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3098 if (unlikely(ftrace_disabled))
3102 orig_hash = &ops->filter_hash;
3104 orig_hash = &ops->notrace_hash;
3106 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3110 mutex_lock(&ftrace_regex_lock);
3112 ftrace_filter_reset(hash);
3114 ftrace_match_records(hash, buf, len);
3116 mutex_lock(&ftrace_lock);
3117 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3118 if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
3120 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3122 mutex_unlock(&ftrace_lock);
3124 mutex_unlock(&ftrace_regex_lock);
3126 free_ftrace_hash(hash);
3131 * ftrace_set_filter - set a function to filter on in ftrace
3132 * @ops - the ops to set the filter with
3133 * @buf - the string that holds the function filter text.
3134 * @len - the length of the string.
3135 * @reset - non zero to reset all filters before applying this filter.
3137 * Filters denote which functions should be enabled when tracing is enabled.
3138 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3140 void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3143 ftrace_set_regex(ops, buf, len, reset, 1);
3145 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3148 * ftrace_set_notrace - set a function to not trace in ftrace
3149 * @ops - the ops to set the notrace filter with
3150 * @buf - the string that holds the function notrace text.
3151 * @len - the length of the string.
3152 * @reset - non zero to reset all filters before applying this filter.
3154 * Notrace Filters denote which functions should not be enabled when tracing
3155 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3158 void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3161 ftrace_set_regex(ops, buf, len, reset, 0);
3163 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3165 * ftrace_set_filter - set a function to filter on in ftrace
3166 * @ops - the ops to set the filter with
3167 * @buf - the string that holds the function filter text.
3168 * @len - the length of the string.
3169 * @reset - non zero to reset all filters before applying this filter.
3171 * Filters denote which functions should be enabled when tracing is enabled.
3172 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3174 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3176 ftrace_set_regex(&global_ops, buf, len, reset, 1);
3178 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3181 * ftrace_set_notrace - set a function to not trace in ftrace
3182 * @ops - the ops to set the notrace filter with
3183 * @buf - the string that holds the function notrace text.
3184 * @len - the length of the string.
3185 * @reset - non zero to reset all filters before applying this filter.
3187 * Notrace Filters denote which functions should not be enabled when tracing
3188 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3191 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3193 ftrace_set_regex(&global_ops, buf, len, reset, 0);
3195 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3198 * command line interface to allow users to set filters on boot up.
3200 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
3201 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3202 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3204 static int __init set_ftrace_notrace(char *str)
3206 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3209 __setup("ftrace_notrace=", set_ftrace_notrace);
3211 static int __init set_ftrace_filter(char *str)
3213 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3216 __setup("ftrace_filter=", set_ftrace_filter);
3218 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3219 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3220 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3222 static int __init set_graph_function(char *str)
3224 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3227 __setup("ftrace_graph_filter=", set_graph_function);
3229 static void __init set_ftrace_early_graph(char *buf)
3235 func = strsep(&buf, ",");
3236 /* we allow only one expression at a time */
3237 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3240 printk(KERN_DEBUG "ftrace: function %s not "
3241 "traceable\n", func);
3244 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3247 set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3252 func = strsep(&buf, ",");
3253 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3257 static void __init set_ftrace_early_filters(void)
3259 if (ftrace_filter_buf[0])
3260 set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1);
3261 if (ftrace_notrace_buf[0])
3262 set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0);
3263 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3264 if (ftrace_graph_buf[0])
3265 set_ftrace_early_graph(ftrace_graph_buf);
3266 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3270 ftrace_regex_release(struct inode *inode, struct file *file)
3272 struct seq_file *m = (struct seq_file *)file->private_data;
3273 struct ftrace_iterator *iter;
3274 struct ftrace_hash **orig_hash;
3275 struct trace_parser *parser;
3279 mutex_lock(&ftrace_regex_lock);
3280 if (file->f_mode & FMODE_READ) {
3283 seq_release(inode, file);
3285 iter = file->private_data;
3287 parser = &iter->parser;
3288 if (trace_parser_loaded(parser)) {
3289 parser->buffer[parser->idx] = 0;
3290 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3293 trace_parser_put(parser);
3295 if (file->f_mode & FMODE_WRITE) {
3296 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3299 orig_hash = &iter->ops->filter_hash;
3301 orig_hash = &iter->ops->notrace_hash;
3303 mutex_lock(&ftrace_lock);
3304 ret = ftrace_hash_move(iter->ops, filter_hash,
3305 orig_hash, iter->hash);
3306 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3308 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3310 mutex_unlock(&ftrace_lock);
3312 free_ftrace_hash(iter->hash);
3315 mutex_unlock(&ftrace_regex_lock);
3319 static const struct file_operations ftrace_avail_fops = {
3320 .open = ftrace_avail_open,
3322 .llseek = seq_lseek,
3323 .release = seq_release_private,
3326 static const struct file_operations ftrace_enabled_fops = {
3327 .open = ftrace_enabled_open,
3329 .llseek = seq_lseek,
3330 .release = seq_release_private,
3333 static const struct file_operations ftrace_filter_fops = {
3334 .open = ftrace_filter_open,
3336 .write = ftrace_filter_write,
3337 .llseek = ftrace_regex_lseek,
3338 .release = ftrace_regex_release,
3341 static const struct file_operations ftrace_notrace_fops = {
3342 .open = ftrace_notrace_open,
3344 .write = ftrace_notrace_write,
3345 .llseek = ftrace_regex_lseek,
3346 .release = ftrace_regex_release,
3349 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3351 static DEFINE_MUTEX(graph_lock);
3353 int ftrace_graph_count;
3354 int ftrace_graph_filter_enabled;
3355 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3358 __g_next(struct seq_file *m, loff_t *pos)
3360 if (*pos >= ftrace_graph_count)
3362 return &ftrace_graph_funcs[*pos];
3366 g_next(struct seq_file *m, void *v, loff_t *pos)
3369 return __g_next(m, pos);
3372 static void *g_start(struct seq_file *m, loff_t *pos)
3374 mutex_lock(&graph_lock);
3376 /* Nothing, tell g_show to print all functions are enabled */
3377 if (!ftrace_graph_filter_enabled && !*pos)
3380 return __g_next(m, pos);
3383 static void g_stop(struct seq_file *m, void *p)
3385 mutex_unlock(&graph_lock);
3388 static int g_show(struct seq_file *m, void *v)
3390 unsigned long *ptr = v;
3395 if (ptr == (unsigned long *)1) {
3396 seq_printf(m, "#### all functions enabled ####\n");
3400 seq_printf(m, "%ps\n", (void *)*ptr);
3405 static const struct seq_operations ftrace_graph_seq_ops = {
3413 ftrace_graph_open(struct inode *inode, struct file *file)
3417 if (unlikely(ftrace_disabled))
3420 mutex_lock(&graph_lock);
3421 if ((file->f_mode & FMODE_WRITE) &&
3422 (file->f_flags & O_TRUNC)) {
3423 ftrace_graph_filter_enabled = 0;
3424 ftrace_graph_count = 0;
3425 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3427 mutex_unlock(&graph_lock);
3429 if (file->f_mode & FMODE_READ)
3430 ret = seq_open(file, &ftrace_graph_seq_ops);
3436 ftrace_graph_release(struct inode *inode, struct file *file)
3438 if (file->f_mode & FMODE_READ)
3439 seq_release(inode, file);
3444 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3446 struct dyn_ftrace *rec;
3447 struct ftrace_page *pg;
3456 type = filter_parse_regex(buffer, strlen(buffer), &search, ¬);
3457 if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3460 search_len = strlen(search);
3462 mutex_lock(&ftrace_lock);
3464 if (unlikely(ftrace_disabled)) {
3465 mutex_unlock(&ftrace_lock);
3469 do_for_each_ftrace_rec(pg, rec) {
3471 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3472 /* if it is in the array */
3474 for (i = 0; i < *idx; i++) {
3475 if (array[i] == rec->ip) {
3484 array[(*idx)++] = rec->ip;
3485 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3490 array[i] = array[--(*idx)];
3496 } while_for_each_ftrace_rec();
3498 mutex_unlock(&ftrace_lock);
3503 ftrace_graph_filter_enabled = 1;
3508 ftrace_graph_write(struct file *file, const char __user *ubuf,
3509 size_t cnt, loff_t *ppos)
3511 struct trace_parser parser;
3517 mutex_lock(&graph_lock);
3519 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3524 read = trace_get_user(&parser, ubuf, cnt, ppos);
3526 if (read >= 0 && trace_parser_loaded((&parser))) {
3527 parser.buffer[parser.idx] = 0;
3529 /* we allow only one expression at a time */
3530 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3539 trace_parser_put(&parser);
3541 mutex_unlock(&graph_lock);
3546 static const struct file_operations ftrace_graph_fops = {
3547 .open = ftrace_graph_open,
3549 .write = ftrace_graph_write,
3550 .release = ftrace_graph_release,
3551 .llseek = seq_lseek,
3553 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3555 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3558 trace_create_file("available_filter_functions", 0444,
3559 d_tracer, NULL, &ftrace_avail_fops);
3561 trace_create_file("enabled_functions", 0444,
3562 d_tracer, NULL, &ftrace_enabled_fops);
3564 trace_create_file("set_ftrace_filter", 0644, d_tracer,
3565 NULL, &ftrace_filter_fops);
3567 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3568 NULL, &ftrace_notrace_fops);
3570 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3571 trace_create_file("set_graph_function", 0444, d_tracer,
3573 &ftrace_graph_fops);
3574 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3579 static void ftrace_swap_recs(void *a, void *b, int size)
3581 struct dyn_ftrace *reca = a;
3582 struct dyn_ftrace *recb = b;
3583 struct dyn_ftrace t;
3590 static int ftrace_cmp_recs(const void *a, const void *b)
3592 const struct dyn_ftrace *reca = a;
3593 const struct dyn_ftrace *recb = b;
3595 if (reca->ip > recb->ip)
3597 if (reca->ip < recb->ip)
3602 static int ftrace_process_locs(struct module *mod,
3603 unsigned long *start,
3606 struct ftrace_page *pg;
3607 unsigned long count;
3610 unsigned long flags = 0; /* Shut up gcc */
3613 count = end - start;
3618 pg = ftrace_allocate_pages(count);
3622 mutex_lock(&ftrace_lock);
3625 * Core and each module needs their own pages, as
3626 * modules will free them when they are removed.
3627 * Force a new page to be allocated for modules.
3630 WARN_ON(ftrace_pages || ftrace_pages_start);
3631 /* First initialization */
3632 ftrace_pages = ftrace_pages_start = pg;
3637 if (WARN_ON(ftrace_pages->next)) {
3638 /* Hmm, we have free pages? */
3639 while (ftrace_pages->next)
3640 ftrace_pages = ftrace_pages->next;
3643 ftrace_pages->next = pg;
3649 addr = ftrace_call_adjust(*p++);
3651 * Some architecture linkers will pad between
3652 * the different mcount_loc sections of different
3653 * object files to satisfy alignments.
3654 * Skip any NULL pointers.
3658 if (!ftrace_record_ip(addr))
3662 /* These new locations need to be initialized */
3663 ftrace_new_pgs = pg;
3665 /* Make each individual set of pages sorted by ips */
3666 for (; pg; pg = pg->next)
3667 sort(pg->records, pg->index, sizeof(struct dyn_ftrace),
3668 ftrace_cmp_recs, ftrace_swap_recs);
3671 * We only need to disable interrupts on start up
3672 * because we are modifying code that an interrupt
3673 * may execute, and the modification is not atomic.
3674 * But for modules, nothing runs the code we modify
3675 * until we are finished with it, and there's no
3676 * reason to cause large interrupt latencies while we do it.
3679 local_irq_save(flags);
3680 ftrace_update_code(mod);
3682 local_irq_restore(flags);
3685 mutex_unlock(&ftrace_lock);
3690 #ifdef CONFIG_MODULES
3692 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
3694 void ftrace_release_mod(struct module *mod)
3696 struct dyn_ftrace *rec;
3697 struct ftrace_page **last_pg;
3698 struct ftrace_page *pg;
3701 mutex_lock(&ftrace_lock);
3703 if (ftrace_disabled)
3707 * Each module has its own ftrace_pages, remove
3708 * them from the list.
3710 last_pg = &ftrace_pages_start;
3711 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
3712 rec = &pg->records[0];
3713 if (within_module_core(rec->ip, mod)) {
3715 * As core pages are first, the first
3716 * page should never be a module page.
3718 if (WARN_ON(pg == ftrace_pages_start))
3721 /* Check if we are deleting the last page */
3722 if (pg == ftrace_pages)
3723 ftrace_pages = next_to_ftrace_page(last_pg);
3725 *last_pg = pg->next;
3726 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3727 free_pages((unsigned long)pg->records, order);
3730 last_pg = &pg->next;
3733 mutex_unlock(&ftrace_lock);
3736 static void ftrace_init_module(struct module *mod,
3737 unsigned long *start, unsigned long *end)
3739 if (ftrace_disabled || start == end)
3741 ftrace_process_locs(mod, start, end);
3744 static int ftrace_module_notify(struct notifier_block *self,
3745 unsigned long val, void *data)
3747 struct module *mod = data;
3750 case MODULE_STATE_COMING:
3751 ftrace_init_module(mod, mod->ftrace_callsites,
3752 mod->ftrace_callsites +
3753 mod->num_ftrace_callsites);
3755 case MODULE_STATE_GOING:
3756 ftrace_release_mod(mod);
3763 static int ftrace_module_notify(struct notifier_block *self,
3764 unsigned long val, void *data)
3768 #endif /* CONFIG_MODULES */
3770 struct notifier_block ftrace_module_nb = {
3771 .notifier_call = ftrace_module_notify,
3775 extern unsigned long __start_mcount_loc[];
3776 extern unsigned long __stop_mcount_loc[];
3778 void __init ftrace_init(void)
3780 unsigned long count, addr, flags;
3783 /* Keep the ftrace pointer to the stub */
3784 addr = (unsigned long)ftrace_stub;
3786 local_irq_save(flags);
3787 ftrace_dyn_arch_init(&addr);
3788 local_irq_restore(flags);
3790 /* ftrace_dyn_arch_init places the return code in addr */
3794 count = __stop_mcount_loc - __start_mcount_loc;
3796 ret = ftrace_dyn_table_alloc(count);
3800 last_ftrace_enabled = ftrace_enabled = 1;
3802 ret = ftrace_process_locs(NULL,
3806 ret = register_module_notifier(&ftrace_module_nb);
3808 pr_warning("Failed to register trace ftrace module notifier\n");
3810 set_ftrace_early_filters();
3814 ftrace_disabled = 1;
3819 static struct ftrace_ops global_ops = {
3820 .func = ftrace_stub,
3823 static int __init ftrace_nodyn_init(void)
3828 device_initcall(ftrace_nodyn_init);
3830 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3831 static inline void ftrace_startup_enable(int command) { }
3832 /* Keep as macros so we do not need to define the commands */
3833 # define ftrace_startup(ops, command) \
3835 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
3838 # define ftrace_shutdown(ops, command) do { } while (0)
3839 # define ftrace_startup_sysctl() do { } while (0)
3840 # define ftrace_shutdown_sysctl() do { } while (0)
3843 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3848 #endif /* CONFIG_DYNAMIC_FTRACE */
3851 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3853 struct ftrace_ops *op;
3855 if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
3858 trace_recursion_set(TRACE_INTERNAL_BIT);
3860 * Some of the ops may be dynamically allocated,
3861 * they must be freed after a synchronize_sched().
3863 preempt_disable_notrace();
3864 op = rcu_dereference_raw(ftrace_ops_list);
3865 while (op != &ftrace_list_end) {
3866 if (ftrace_ops_test(op, ip))
3867 op->func(ip, parent_ip);
3868 op = rcu_dereference_raw(op->next);
3870 preempt_enable_notrace();
3871 trace_recursion_clear(TRACE_INTERNAL_BIT);
3874 static void clear_ftrace_swapper(void)
3876 struct task_struct *p;
3880 for_each_online_cpu(cpu) {
3882 clear_tsk_trace_trace(p);
3887 static void set_ftrace_swapper(void)
3889 struct task_struct *p;
3893 for_each_online_cpu(cpu) {
3895 set_tsk_trace_trace(p);
3900 static void clear_ftrace_pid(struct pid *pid)
3902 struct task_struct *p;
3905 do_each_pid_task(pid, PIDTYPE_PID, p) {
3906 clear_tsk_trace_trace(p);
3907 } while_each_pid_task(pid, PIDTYPE_PID, p);
3913 static void set_ftrace_pid(struct pid *pid)
3915 struct task_struct *p;
3918 do_each_pid_task(pid, PIDTYPE_PID, p) {
3919 set_tsk_trace_trace(p);
3920 } while_each_pid_task(pid, PIDTYPE_PID, p);
3924 static void clear_ftrace_pid_task(struct pid *pid)
3926 if (pid == ftrace_swapper_pid)
3927 clear_ftrace_swapper();
3929 clear_ftrace_pid(pid);
3932 static void set_ftrace_pid_task(struct pid *pid)
3934 if (pid == ftrace_swapper_pid)
3935 set_ftrace_swapper();
3937 set_ftrace_pid(pid);
3940 static int ftrace_pid_add(int p)
3943 struct ftrace_pid *fpid;
3946 mutex_lock(&ftrace_lock);
3949 pid = ftrace_swapper_pid;
3951 pid = find_get_pid(p);
3958 list_for_each_entry(fpid, &ftrace_pids, list)
3959 if (fpid->pid == pid)
3964 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
3968 list_add(&fpid->list, &ftrace_pids);
3971 set_ftrace_pid_task(pid);
3973 ftrace_update_pid_func();
3974 ftrace_startup_enable(0);
3976 mutex_unlock(&ftrace_lock);
3980 if (pid != ftrace_swapper_pid)
3984 mutex_unlock(&ftrace_lock);
3988 static void ftrace_pid_reset(void)
3990 struct ftrace_pid *fpid, *safe;
3992 mutex_lock(&ftrace_lock);
3993 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
3994 struct pid *pid = fpid->pid;
3996 clear_ftrace_pid_task(pid);
3998 list_del(&fpid->list);
4002 ftrace_update_pid_func();
4003 ftrace_startup_enable(0);
4005 mutex_unlock(&ftrace_lock);
4008 static void *fpid_start(struct seq_file *m, loff_t *pos)
4010 mutex_lock(&ftrace_lock);
4012 if (list_empty(&ftrace_pids) && (!*pos))
4015 return seq_list_start(&ftrace_pids, *pos);
4018 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4023 return seq_list_next(v, &ftrace_pids, pos);
4026 static void fpid_stop(struct seq_file *m, void *p)
4028 mutex_unlock(&ftrace_lock);
4031 static int fpid_show(struct seq_file *m, void *v)
4033 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4035 if (v == (void *)1) {
4036 seq_printf(m, "no pid\n");
4040 if (fpid->pid == ftrace_swapper_pid)
4041 seq_printf(m, "swapper tasks\n");
4043 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4048 static const struct seq_operations ftrace_pid_sops = {
4049 .start = fpid_start,
4056 ftrace_pid_open(struct inode *inode, struct file *file)
4060 if ((file->f_mode & FMODE_WRITE) &&
4061 (file->f_flags & O_TRUNC))
4064 if (file->f_mode & FMODE_READ)
4065 ret = seq_open(file, &ftrace_pid_sops);
4071 ftrace_pid_write(struct file *filp, const char __user *ubuf,
4072 size_t cnt, loff_t *ppos)
4078 if (cnt >= sizeof(buf))
4081 if (copy_from_user(&buf, ubuf, cnt))
4087 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4088 * to clean the filter quietly.
4090 tmp = strstrip(buf);
4091 if (strlen(tmp) == 0)
4094 ret = strict_strtol(tmp, 10, &val);
4098 ret = ftrace_pid_add(val);
4100 return ret ? ret : cnt;
4104 ftrace_pid_release(struct inode *inode, struct file *file)
4106 if (file->f_mode & FMODE_READ)
4107 seq_release(inode, file);
4112 static const struct file_operations ftrace_pid_fops = {
4113 .open = ftrace_pid_open,
4114 .write = ftrace_pid_write,
4116 .llseek = seq_lseek,
4117 .release = ftrace_pid_release,
4120 static __init int ftrace_init_debugfs(void)
4122 struct dentry *d_tracer;
4124 d_tracer = tracing_init_dentry();
4128 ftrace_init_dyn_debugfs(d_tracer);
4130 trace_create_file("set_ftrace_pid", 0644, d_tracer,
4131 NULL, &ftrace_pid_fops);
4133 ftrace_profile_debugfs(d_tracer);
4137 fs_initcall(ftrace_init_debugfs);
4140 * ftrace_kill - kill ftrace
4142 * This function should be used by panic code. It stops ftrace
4143 * but in a not so nice way. If you need to simply kill ftrace
4144 * from a non-atomic section, use ftrace_kill.
4146 void ftrace_kill(void)
4148 ftrace_disabled = 1;
4150 clear_ftrace_function();
4154 * Test if ftrace is dead or not.
4156 int ftrace_is_dead(void)
4158 return ftrace_disabled;
4162 * register_ftrace_function - register a function for profiling
4163 * @ops - ops structure that holds the function for profiling.
4165 * Register a function to be called by all functions in the
4168 * Note: @ops->func and all the functions it calls must be labeled
4169 * with "notrace", otherwise it will go into a
4172 int register_ftrace_function(struct ftrace_ops *ops)
4176 mutex_lock(&ftrace_lock);
4178 if (unlikely(ftrace_disabled))
4181 ret = __register_ftrace_function(ops);
4183 ret = ftrace_startup(ops, 0);
4187 mutex_unlock(&ftrace_lock);
4190 EXPORT_SYMBOL_GPL(register_ftrace_function);
4193 * unregister_ftrace_function - unregister a function for profiling.
4194 * @ops - ops structure that holds the function to unregister
4196 * Unregister a function that was added to be called by ftrace profiling.
4198 int unregister_ftrace_function(struct ftrace_ops *ops)
4202 mutex_lock(&ftrace_lock);
4203 ret = __unregister_ftrace_function(ops);
4205 ftrace_shutdown(ops, 0);
4206 mutex_unlock(&ftrace_lock);
4210 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4213 ftrace_enable_sysctl(struct ctl_table *table, int write,
4214 void __user *buffer, size_t *lenp,
4219 mutex_lock(&ftrace_lock);
4221 if (unlikely(ftrace_disabled))
4224 ret = proc_dointvec(table, write, buffer, lenp, ppos);
4226 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4229 last_ftrace_enabled = !!ftrace_enabled;
4231 if (ftrace_enabled) {
4233 ftrace_startup_sysctl();
4235 /* we are starting ftrace again */
4236 if (ftrace_ops_list != &ftrace_list_end) {
4237 if (ftrace_ops_list->next == &ftrace_list_end)
4238 ftrace_trace_function = ftrace_ops_list->func;
4240 ftrace_trace_function = ftrace_ops_list_func;
4244 /* stopping ftrace calls (just send to ftrace_stub) */
4245 ftrace_trace_function = ftrace_stub;
4247 ftrace_shutdown_sysctl();
4251 mutex_unlock(&ftrace_lock);
4255 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4257 static int ftrace_graph_active;
4258 static struct notifier_block ftrace_suspend_notifier;
4260 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4265 /* The callbacks that hook a function */
4266 trace_func_graph_ret_t ftrace_graph_return =
4267 (trace_func_graph_ret_t)ftrace_stub;
4268 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4270 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4271 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4275 unsigned long flags;
4276 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4277 struct task_struct *g, *t;
4279 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4280 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4281 * sizeof(struct ftrace_ret_stack),
4283 if (!ret_stack_list[i]) {
4291 read_lock_irqsave(&tasklist_lock, flags);
4292 do_each_thread(g, t) {
4298 if (t->ret_stack == NULL) {
4299 atomic_set(&t->tracing_graph_pause, 0);
4300 atomic_set(&t->trace_overrun, 0);
4301 t->curr_ret_stack = -1;
4302 /* Make sure the tasks see the -1 first: */
4304 t->ret_stack = ret_stack_list[start++];
4306 } while_each_thread(g, t);
4309 read_unlock_irqrestore(&tasklist_lock, flags);
4311 for (i = start; i < end; i++)
4312 kfree(ret_stack_list[i]);
4317 ftrace_graph_probe_sched_switch(void *ignore,
4318 struct task_struct *prev, struct task_struct *next)
4320 unsigned long long timestamp;
4324 * Does the user want to count the time a function was asleep.
4325 * If so, do not update the time stamps.
4327 if (trace_flags & TRACE_ITER_SLEEP_TIME)
4330 timestamp = trace_clock_local();
4332 prev->ftrace_timestamp = timestamp;
4334 /* only process tasks that we timestamped */
4335 if (!next->ftrace_timestamp)
4339 * Update all the counters in next to make up for the
4340 * time next was sleeping.
4342 timestamp -= next->ftrace_timestamp;
4344 for (index = next->curr_ret_stack; index >= 0; index--)
4345 next->ret_stack[index].calltime += timestamp;
4348 /* Allocate a return stack for each task */
4349 static int start_graph_tracing(void)
4351 struct ftrace_ret_stack **ret_stack_list;
4354 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4355 sizeof(struct ftrace_ret_stack *),
4358 if (!ret_stack_list)
4361 /* The cpu_boot init_task->ret_stack will never be freed */
4362 for_each_online_cpu(cpu) {
4363 if (!idle_task(cpu)->ret_stack)
4364 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4368 ret = alloc_retstack_tasklist(ret_stack_list);
4369 } while (ret == -EAGAIN);
4372 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4374 pr_info("ftrace_graph: Couldn't activate tracepoint"
4375 " probe to kernel_sched_switch\n");
4378 kfree(ret_stack_list);
4383 * Hibernation protection.
4384 * The state of the current task is too much unstable during
4385 * suspend/restore to disk. We want to protect against that.
4388 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4392 case PM_HIBERNATION_PREPARE:
4393 pause_graph_tracing();
4396 case PM_POST_HIBERNATION:
4397 unpause_graph_tracing();
4403 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4404 trace_func_graph_ent_t entryfunc)
4408 mutex_lock(&ftrace_lock);
4410 /* we currently allow only one tracer registered at a time */
4411 if (ftrace_graph_active) {
4416 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4417 register_pm_notifier(&ftrace_suspend_notifier);
4419 ftrace_graph_active++;
4420 ret = start_graph_tracing();
4422 ftrace_graph_active--;
4426 ftrace_graph_return = retfunc;
4427 ftrace_graph_entry = entryfunc;
4429 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4432 mutex_unlock(&ftrace_lock);
4436 void unregister_ftrace_graph(void)
4438 mutex_lock(&ftrace_lock);
4440 if (unlikely(!ftrace_graph_active))
4443 ftrace_graph_active--;
4444 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4445 ftrace_graph_entry = ftrace_graph_entry_stub;
4446 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4447 unregister_pm_notifier(&ftrace_suspend_notifier);
4448 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4451 mutex_unlock(&ftrace_lock);
4454 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4457 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4459 atomic_set(&t->tracing_graph_pause, 0);
4460 atomic_set(&t->trace_overrun, 0);
4461 t->ftrace_timestamp = 0;
4462 /* make curr_ret_stack visible before we add the ret_stack */
4464 t->ret_stack = ret_stack;
4468 * Allocate a return stack for the idle task. May be the first
4469 * time through, or it may be done by CPU hotplug online.
4471 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4473 t->curr_ret_stack = -1;
4475 * The idle task has no parent, it either has its own
4476 * stack or no stack at all.
4479 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4481 if (ftrace_graph_active) {
4482 struct ftrace_ret_stack *ret_stack;
4484 ret_stack = per_cpu(idle_ret_stack, cpu);
4486 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4487 * sizeof(struct ftrace_ret_stack),
4491 per_cpu(idle_ret_stack, cpu) = ret_stack;
4493 graph_init_task(t, ret_stack);
4497 /* Allocate a return stack for newly created task */
4498 void ftrace_graph_init_task(struct task_struct *t)
4500 /* Make sure we do not use the parent ret_stack */
4501 t->ret_stack = NULL;
4502 t->curr_ret_stack = -1;
4504 if (ftrace_graph_active) {
4505 struct ftrace_ret_stack *ret_stack;
4507 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4508 * sizeof(struct ftrace_ret_stack),
4512 graph_init_task(t, ret_stack);
4516 void ftrace_graph_exit_task(struct task_struct *t)
4518 struct ftrace_ret_stack *ret_stack = t->ret_stack;
4520 t->ret_stack = NULL;
4521 /* NULL must become visible to IRQs before we free it: */
4527 void ftrace_graph_stop(void)