2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
31 #include <asm/ftrace.h>
35 #define FTRACE_WARN_ON(cond) \
41 #define FTRACE_WARN_ON_ONCE(cond) \
43 if (WARN_ON_ONCE(cond)) \
47 /* ftrace_enabled is a method to turn ftrace on or off */
48 int ftrace_enabled __read_mostly;
49 static int last_ftrace_enabled;
51 /* set when tracing only a pid */
52 struct pid *ftrace_pid_trace;
53 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
55 /* Quick disabling of function tracer. */
56 int function_trace_stop;
59 * ftrace_disabled is set when an anomaly is discovered.
60 * ftrace_disabled is much stronger than ftrace_enabled.
62 static int ftrace_disabled __read_mostly;
64 static DEFINE_SPINLOCK(ftrace_lock);
65 static DEFINE_MUTEX(ftrace_sysctl_lock);
66 static DEFINE_MUTEX(ftrace_start_lock);
68 static struct ftrace_ops ftrace_list_end __read_mostly =
73 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
74 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
75 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
76 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
78 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
80 struct ftrace_ops *op = ftrace_list;
82 /* in case someone actually ports this to alpha! */
83 read_barrier_depends();
85 while (op != &ftrace_list_end) {
87 read_barrier_depends();
88 op->func(ip, parent_ip);
93 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
95 if (!test_tsk_trace_trace(current))
98 ftrace_pid_function(ip, parent_ip);
101 static void set_ftrace_pid_function(ftrace_func_t func)
103 /* do not set ftrace_pid_function to itself! */
104 if (func != ftrace_pid_func)
105 ftrace_pid_function = func;
109 * clear_ftrace_function - reset the ftrace function
111 * This NULLs the ftrace function and in essence stops
112 * tracing. There may be lag
114 void clear_ftrace_function(void)
116 ftrace_trace_function = ftrace_stub;
117 __ftrace_trace_function = ftrace_stub;
118 ftrace_pid_function = ftrace_stub;
121 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
123 * For those archs that do not test ftrace_trace_stop in their
124 * mcount call site, we need to do it from C.
126 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
128 if (function_trace_stop)
131 __ftrace_trace_function(ip, parent_ip);
135 static int __register_ftrace_function(struct ftrace_ops *ops)
137 /* should not be called from interrupt context */
138 spin_lock(&ftrace_lock);
140 ops->next = ftrace_list;
142 * We are entering ops into the ftrace_list but another
143 * CPU might be walking that list. We need to make sure
144 * the ops->next pointer is valid before another CPU sees
145 * the ops pointer included into the ftrace_list.
150 if (ftrace_enabled) {
153 if (ops->next == &ftrace_list_end)
156 func = ftrace_list_func;
158 if (ftrace_pid_trace) {
159 set_ftrace_pid_function(func);
160 func = ftrace_pid_func;
164 * For one func, simply call it directly.
165 * For more than one func, call the chain.
167 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
168 ftrace_trace_function = func;
170 __ftrace_trace_function = func;
171 ftrace_trace_function = ftrace_test_stop_func;
175 spin_unlock(&ftrace_lock);
180 static int __unregister_ftrace_function(struct ftrace_ops *ops)
182 struct ftrace_ops **p;
185 /* should not be called from interrupt context */
186 spin_lock(&ftrace_lock);
189 * If we are removing the last function, then simply point
190 * to the ftrace_stub.
192 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
193 ftrace_trace_function = ftrace_stub;
194 ftrace_list = &ftrace_list_end;
198 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
209 if (ftrace_enabled) {
210 /* If we only have one func left, then call that directly */
211 if (ftrace_list->next == &ftrace_list_end) {
212 ftrace_func_t func = ftrace_list->func;
214 if (ftrace_pid_trace) {
215 set_ftrace_pid_function(func);
216 func = ftrace_pid_func;
218 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
219 ftrace_trace_function = func;
221 __ftrace_trace_function = func;
227 spin_unlock(&ftrace_lock);
232 static void ftrace_update_pid_func(void)
236 /* should not be called from interrupt context */
237 spin_lock(&ftrace_lock);
239 if (ftrace_trace_function == ftrace_stub)
242 func = ftrace_trace_function;
244 if (ftrace_pid_trace) {
245 set_ftrace_pid_function(func);
246 func = ftrace_pid_func;
248 if (func == ftrace_pid_func)
249 func = ftrace_pid_function;
252 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
253 ftrace_trace_function = func;
255 __ftrace_trace_function = func;
259 spin_unlock(&ftrace_lock);
262 #ifdef CONFIG_DYNAMIC_FTRACE
263 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
264 # error Dynamic ftrace depends on MCOUNT_RECORD
268 FTRACE_ENABLE_CALLS = (1 << 0),
269 FTRACE_DISABLE_CALLS = (1 << 1),
270 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
271 FTRACE_ENABLE_MCOUNT = (1 << 3),
272 FTRACE_DISABLE_MCOUNT = (1 << 4),
273 FTRACE_START_FUNC_RET = (1 << 5),
274 FTRACE_STOP_FUNC_RET = (1 << 6),
277 static int ftrace_filtered;
279 static LIST_HEAD(ftrace_new_addrs);
281 static DEFINE_MUTEX(ftrace_regex_lock);
284 struct ftrace_page *next;
286 struct dyn_ftrace records[];
289 #define ENTRIES_PER_PAGE \
290 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
292 /* estimate from running different kernels */
293 #define NR_TO_INIT 10000
295 static struct ftrace_page *ftrace_pages_start;
296 static struct ftrace_page *ftrace_pages;
298 static struct dyn_ftrace *ftrace_free_records;
301 * This is a double for. Do not use 'break' to break out of the loop,
302 * you must use a goto.
304 #define do_for_each_ftrace_rec(pg, rec) \
305 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
307 for (_____i = 0; _____i < pg->index; _____i++) { \
308 rec = &pg->records[_____i];
310 #define while_for_each_ftrace_rec() \
314 #ifdef CONFIG_KPROBES
316 static int frozen_record_count;
318 static inline void freeze_record(struct dyn_ftrace *rec)
320 if (!(rec->flags & FTRACE_FL_FROZEN)) {
321 rec->flags |= FTRACE_FL_FROZEN;
322 frozen_record_count++;
326 static inline void unfreeze_record(struct dyn_ftrace *rec)
328 if (rec->flags & FTRACE_FL_FROZEN) {
329 rec->flags &= ~FTRACE_FL_FROZEN;
330 frozen_record_count--;
334 static inline int record_frozen(struct dyn_ftrace *rec)
336 return rec->flags & FTRACE_FL_FROZEN;
339 # define freeze_record(rec) ({ 0; })
340 # define unfreeze_record(rec) ({ 0; })
341 # define record_frozen(rec) ({ 0; })
342 #endif /* CONFIG_KPROBES */
344 static void ftrace_free_rec(struct dyn_ftrace *rec)
346 rec->ip = (unsigned long)ftrace_free_records;
347 ftrace_free_records = rec;
348 rec->flags |= FTRACE_FL_FREE;
351 void ftrace_release(void *start, unsigned long size)
353 struct dyn_ftrace *rec;
354 struct ftrace_page *pg;
355 unsigned long s = (unsigned long)start;
356 unsigned long e = s + size;
358 if (ftrace_disabled || !start)
361 /* should not be called from interrupt context */
362 spin_lock(&ftrace_lock);
364 do_for_each_ftrace_rec(pg, rec) {
365 if ((rec->ip >= s) && (rec->ip < e))
366 ftrace_free_rec(rec);
367 } while_for_each_ftrace_rec();
369 spin_unlock(&ftrace_lock);
372 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
374 struct dyn_ftrace *rec;
376 /* First check for freed records */
377 if (ftrace_free_records) {
378 rec = ftrace_free_records;
380 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
381 FTRACE_WARN_ON_ONCE(1);
382 ftrace_free_records = NULL;
386 ftrace_free_records = (void *)rec->ip;
387 memset(rec, 0, sizeof(*rec));
391 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
392 if (!ftrace_pages->next) {
393 /* allocate another page */
395 (void *)get_zeroed_page(GFP_KERNEL);
396 if (!ftrace_pages->next)
399 ftrace_pages = ftrace_pages->next;
402 return &ftrace_pages->records[ftrace_pages->index++];
405 static struct dyn_ftrace *
406 ftrace_record_ip(unsigned long ip)
408 struct dyn_ftrace *rec;
413 rec = ftrace_alloc_dyn_node(ip);
419 list_add(&rec->list, &ftrace_new_addrs);
424 static void print_ip_ins(const char *fmt, unsigned char *p)
428 printk(KERN_CONT "%s", fmt);
430 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
431 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
434 static void ftrace_bug(int failed, unsigned long ip)
438 FTRACE_WARN_ON_ONCE(1);
439 pr_info("ftrace faulted on modifying ");
443 FTRACE_WARN_ON_ONCE(1);
444 pr_info("ftrace failed to modify ");
446 print_ip_ins(" actual: ", (unsigned char *)ip);
447 printk(KERN_CONT "\n");
450 FTRACE_WARN_ON_ONCE(1);
451 pr_info("ftrace faulted on writing ");
455 FTRACE_WARN_ON_ONCE(1);
456 pr_info("ftrace faulted on unknown error ");
463 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
465 unsigned long ip, fl;
466 unsigned long ftrace_addr;
468 ftrace_addr = (unsigned long)FTRACE_ADDR;
473 * If this record is not to be traced and
474 * it is not enabled then do nothing.
476 * If this record is not to be traced and
477 * it is enabled then disable it.
480 if (rec->flags & FTRACE_FL_NOTRACE) {
481 if (rec->flags & FTRACE_FL_ENABLED)
482 rec->flags &= ~FTRACE_FL_ENABLED;
486 } else if (ftrace_filtered && enable) {
491 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
493 /* Record is filtered and enabled, do nothing */
494 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
497 /* Record is not filtered or enabled, do nothing */
501 /* Record is not filtered but enabled, disable it */
502 if (fl == FTRACE_FL_ENABLED)
503 rec->flags &= ~FTRACE_FL_ENABLED;
505 /* Otherwise record is filtered but not enabled, enable it */
506 rec->flags |= FTRACE_FL_ENABLED;
508 /* Disable or not filtered */
511 /* if record is enabled, do nothing */
512 if (rec->flags & FTRACE_FL_ENABLED)
515 rec->flags |= FTRACE_FL_ENABLED;
519 /* if record is not enabled, do nothing */
520 if (!(rec->flags & FTRACE_FL_ENABLED))
523 rec->flags &= ~FTRACE_FL_ENABLED;
527 if (rec->flags & FTRACE_FL_ENABLED)
528 return ftrace_make_call(rec, ftrace_addr);
530 return ftrace_make_nop(NULL, rec, ftrace_addr);
533 static void ftrace_replace_code(int enable)
536 struct dyn_ftrace *rec;
537 struct ftrace_page *pg;
539 do_for_each_ftrace_rec(pg, rec) {
541 * Skip over free records and records that have
544 if (rec->flags & FTRACE_FL_FREE ||
545 rec->flags & FTRACE_FL_FAILED)
548 /* ignore updates to this record's mcount site */
549 if (get_kprobe((void *)rec->ip)) {
553 unfreeze_record(rec);
556 failed = __ftrace_replace_code(rec, enable);
557 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
558 rec->flags |= FTRACE_FL_FAILED;
559 if ((system_state == SYSTEM_BOOTING) ||
560 !core_kernel_text(rec->ip)) {
561 ftrace_free_rec(rec);
563 ftrace_bug(failed, rec->ip);
565 } while_for_each_ftrace_rec();
569 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
576 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
579 rec->flags |= FTRACE_FL_FAILED;
585 static int __ftrace_modify_code(void *data)
589 if (*command & FTRACE_ENABLE_CALLS)
590 ftrace_replace_code(1);
591 else if (*command & FTRACE_DISABLE_CALLS)
592 ftrace_replace_code(0);
594 if (*command & FTRACE_UPDATE_TRACE_FUNC)
595 ftrace_update_ftrace_func(ftrace_trace_function);
597 if (*command & FTRACE_START_FUNC_RET)
598 ftrace_enable_ftrace_graph_caller();
599 else if (*command & FTRACE_STOP_FUNC_RET)
600 ftrace_disable_ftrace_graph_caller();
605 static void ftrace_run_update_code(int command)
607 stop_machine(__ftrace_modify_code, &command, NULL);
610 static ftrace_func_t saved_ftrace_func;
611 static int ftrace_start_up;
613 static void ftrace_startup_enable(int command)
615 if (saved_ftrace_func != ftrace_trace_function) {
616 saved_ftrace_func = ftrace_trace_function;
617 command |= FTRACE_UPDATE_TRACE_FUNC;
620 if (!command || !ftrace_enabled)
623 ftrace_run_update_code(command);
626 static void ftrace_startup(int command)
628 if (unlikely(ftrace_disabled))
631 mutex_lock(&ftrace_start_lock);
633 command |= FTRACE_ENABLE_CALLS;
635 ftrace_startup_enable(command);
637 mutex_unlock(&ftrace_start_lock);
640 static void ftrace_shutdown(int command)
642 if (unlikely(ftrace_disabled))
645 mutex_lock(&ftrace_start_lock);
647 if (!ftrace_start_up)
648 command |= FTRACE_DISABLE_CALLS;
650 if (saved_ftrace_func != ftrace_trace_function) {
651 saved_ftrace_func = ftrace_trace_function;
652 command |= FTRACE_UPDATE_TRACE_FUNC;
655 if (!command || !ftrace_enabled)
658 ftrace_run_update_code(command);
660 mutex_unlock(&ftrace_start_lock);
663 static void ftrace_startup_sysctl(void)
665 int command = FTRACE_ENABLE_MCOUNT;
667 if (unlikely(ftrace_disabled))
670 mutex_lock(&ftrace_start_lock);
671 /* Force update next time */
672 saved_ftrace_func = NULL;
673 /* ftrace_start_up is true if we want ftrace running */
675 command |= FTRACE_ENABLE_CALLS;
677 ftrace_run_update_code(command);
678 mutex_unlock(&ftrace_start_lock);
681 static void ftrace_shutdown_sysctl(void)
683 int command = FTRACE_DISABLE_MCOUNT;
685 if (unlikely(ftrace_disabled))
688 mutex_lock(&ftrace_start_lock);
689 /* ftrace_start_up is true if ftrace is running */
691 command |= FTRACE_DISABLE_CALLS;
693 ftrace_run_update_code(command);
694 mutex_unlock(&ftrace_start_lock);
697 static cycle_t ftrace_update_time;
698 static unsigned long ftrace_update_cnt;
699 unsigned long ftrace_update_tot_cnt;
701 static int ftrace_update_code(struct module *mod)
703 struct dyn_ftrace *p, *t;
706 start = ftrace_now(raw_smp_processor_id());
707 ftrace_update_cnt = 0;
709 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
711 /* If something went wrong, bail without enabling anything */
712 if (unlikely(ftrace_disabled))
715 list_del_init(&p->list);
717 /* convert record (i.e, patch mcount-call with NOP) */
718 if (ftrace_code_disable(mod, p)) {
719 p->flags |= FTRACE_FL_CONVERTED;
725 stop = ftrace_now(raw_smp_processor_id());
726 ftrace_update_time = stop - start;
727 ftrace_update_tot_cnt += ftrace_update_cnt;
732 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
734 struct ftrace_page *pg;
738 /* allocate a few pages */
739 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
740 if (!ftrace_pages_start)
744 * Allocate a few more pages.
746 * TODO: have some parser search vmlinux before
747 * final linking to find all calls to ftrace.
749 * a) know how many pages to allocate.
751 * b) set up the table then.
753 * The dynamic code is still necessary for
757 pg = ftrace_pages = ftrace_pages_start;
759 cnt = num_to_init / ENTRIES_PER_PAGE;
760 pr_info("ftrace: allocating %ld entries in %d pages\n",
761 num_to_init, cnt + 1);
763 for (i = 0; i < cnt; i++) {
764 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
766 /* If we fail, we'll try later anyway */
777 FTRACE_ITER_FILTER = (1 << 0),
778 FTRACE_ITER_CONT = (1 << 1),
779 FTRACE_ITER_NOTRACE = (1 << 2),
780 FTRACE_ITER_FAILURES = (1 << 3),
781 FTRACE_ITER_PRINTALL = (1 << 4),
784 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
786 struct ftrace_iterator {
787 struct ftrace_page *pg;
790 unsigned char buffer[FTRACE_BUFF_MAX+1];
796 t_next(struct seq_file *m, void *v, loff_t *pos)
798 struct ftrace_iterator *iter = m->private;
799 struct dyn_ftrace *rec = NULL;
803 if (iter->flags & FTRACE_ITER_PRINTALL)
806 /* should not be called from interrupt context */
807 spin_lock(&ftrace_lock);
809 if (iter->idx >= iter->pg->index) {
810 if (iter->pg->next) {
811 iter->pg = iter->pg->next;
818 rec = &iter->pg->records[iter->idx++];
819 if ((rec->flags & FTRACE_FL_FREE) ||
821 (!(iter->flags & FTRACE_ITER_FAILURES) &&
822 (rec->flags & FTRACE_FL_FAILED)) ||
824 ((iter->flags & FTRACE_ITER_FAILURES) &&
825 !(rec->flags & FTRACE_FL_FAILED)) ||
827 ((iter->flags & FTRACE_ITER_FILTER) &&
828 !(rec->flags & FTRACE_FL_FILTER)) ||
830 ((iter->flags & FTRACE_ITER_NOTRACE) &&
831 !(rec->flags & FTRACE_FL_NOTRACE))) {
836 spin_unlock(&ftrace_lock);
841 static void *t_start(struct seq_file *m, loff_t *pos)
843 struct ftrace_iterator *iter = m->private;
847 * For set_ftrace_filter reading, if we have the filter
848 * off, we can short cut and just print out that all
849 * functions are enabled.
851 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
854 iter->flags |= FTRACE_ITER_PRINTALL;
866 p = t_next(m, p, pos);
871 static void t_stop(struct seq_file *m, void *p)
875 static int t_show(struct seq_file *m, void *v)
877 struct ftrace_iterator *iter = m->private;
878 struct dyn_ftrace *rec = v;
879 char str[KSYM_SYMBOL_LEN];
881 if (iter->flags & FTRACE_ITER_PRINTALL) {
882 seq_printf(m, "#### all functions enabled ####\n");
889 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
891 seq_printf(m, "%s\n", str);
896 static struct seq_operations show_ftrace_seq_ops = {
904 ftrace_avail_open(struct inode *inode, struct file *file)
906 struct ftrace_iterator *iter;
909 if (unlikely(ftrace_disabled))
912 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
916 iter->pg = ftrace_pages_start;
918 ret = seq_open(file, &show_ftrace_seq_ops);
920 struct seq_file *m = file->private_data;
930 int ftrace_avail_release(struct inode *inode, struct file *file)
932 struct seq_file *m = (struct seq_file *)file->private_data;
933 struct ftrace_iterator *iter = m->private;
935 seq_release(inode, file);
942 ftrace_failures_open(struct inode *inode, struct file *file)
946 struct ftrace_iterator *iter;
948 ret = ftrace_avail_open(inode, file);
950 m = (struct seq_file *)file->private_data;
951 iter = (struct ftrace_iterator *)m->private;
952 iter->flags = FTRACE_ITER_FAILURES;
959 static void ftrace_filter_reset(int enable)
961 struct ftrace_page *pg;
962 struct dyn_ftrace *rec;
963 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
965 /* should not be called from interrupt context */
966 spin_lock(&ftrace_lock);
969 do_for_each_ftrace_rec(pg, rec) {
970 if (rec->flags & FTRACE_FL_FAILED)
973 } while_for_each_ftrace_rec();
975 spin_unlock(&ftrace_lock);
979 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
981 struct ftrace_iterator *iter;
984 if (unlikely(ftrace_disabled))
987 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
991 mutex_lock(&ftrace_regex_lock);
992 if ((file->f_mode & FMODE_WRITE) &&
993 !(file->f_flags & O_APPEND))
994 ftrace_filter_reset(enable);
996 if (file->f_mode & FMODE_READ) {
997 iter->pg = ftrace_pages_start;
998 iter->flags = enable ? FTRACE_ITER_FILTER :
1001 ret = seq_open(file, &show_ftrace_seq_ops);
1003 struct seq_file *m = file->private_data;
1008 file->private_data = iter;
1009 mutex_unlock(&ftrace_regex_lock);
1015 ftrace_filter_open(struct inode *inode, struct file *file)
1017 return ftrace_regex_open(inode, file, 1);
1021 ftrace_notrace_open(struct inode *inode, struct file *file)
1023 return ftrace_regex_open(inode, file, 0);
1027 ftrace_regex_read(struct file *file, char __user *ubuf,
1028 size_t cnt, loff_t *ppos)
1030 if (file->f_mode & FMODE_READ)
1031 return seq_read(file, ubuf, cnt, ppos);
1037 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1041 if (file->f_mode & FMODE_READ)
1042 ret = seq_lseek(file, offset, origin);
1044 file->f_pos = ret = 1;
1057 ftrace_match(unsigned char *buff, int len, int enable)
1059 char str[KSYM_SYMBOL_LEN];
1060 char *search = NULL;
1061 struct ftrace_page *pg;
1062 struct dyn_ftrace *rec;
1063 int type = MATCH_FULL;
1064 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1065 unsigned i, match = 0, search_len = 0;
1068 if (buff[0] == '!') {
1074 for (i = 0; i < len; i++) {
1075 if (buff[i] == '*') {
1077 search = buff + i + 1;
1078 type = MATCH_END_ONLY;
1079 search_len = len - (i + 1);
1081 if (type == MATCH_END_ONLY) {
1082 type = MATCH_MIDDLE_ONLY;
1085 type = MATCH_FRONT_ONLY;
1093 /* should not be called from interrupt context */
1094 spin_lock(&ftrace_lock);
1096 ftrace_filtered = 1;
1097 do_for_each_ftrace_rec(pg, rec) {
1101 if (rec->flags & FTRACE_FL_FAILED)
1103 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1106 if (strcmp(str, buff) == 0)
1109 case MATCH_FRONT_ONLY:
1110 if (memcmp(str, buff, match) == 0)
1113 case MATCH_MIDDLE_ONLY:
1114 if (strstr(str, search))
1117 case MATCH_END_ONLY:
1118 ptr = strstr(str, search);
1119 if (ptr && (ptr[search_len] == 0))
1125 rec->flags &= ~flag;
1129 } while_for_each_ftrace_rec();
1130 spin_unlock(&ftrace_lock);
1134 ftrace_regex_write(struct file *file, const char __user *ubuf,
1135 size_t cnt, loff_t *ppos, int enable)
1137 struct ftrace_iterator *iter;
1142 if (!cnt || cnt < 0)
1145 mutex_lock(&ftrace_regex_lock);
1147 if (file->f_mode & FMODE_READ) {
1148 struct seq_file *m = file->private_data;
1151 iter = file->private_data;
1154 iter->flags &= ~FTRACE_ITER_CONT;
1155 iter->buffer_idx = 0;
1158 ret = get_user(ch, ubuf++);
1164 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1165 /* skip white space */
1166 while (cnt && isspace(ch)) {
1167 ret = get_user(ch, ubuf++);
1175 file->f_pos += read;
1180 iter->buffer_idx = 0;
1183 while (cnt && !isspace(ch)) {
1184 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1185 iter->buffer[iter->buffer_idx++] = ch;
1190 ret = get_user(ch, ubuf++);
1199 iter->buffer[iter->buffer_idx] = 0;
1200 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1201 iter->buffer_idx = 0;
1203 iter->flags |= FTRACE_ITER_CONT;
1206 file->f_pos += read;
1210 mutex_unlock(&ftrace_regex_lock);
1216 ftrace_filter_write(struct file *file, const char __user *ubuf,
1217 size_t cnt, loff_t *ppos)
1219 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1223 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1224 size_t cnt, loff_t *ppos)
1226 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1230 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1232 if (unlikely(ftrace_disabled))
1235 mutex_lock(&ftrace_regex_lock);
1237 ftrace_filter_reset(enable);
1239 ftrace_match(buf, len, enable);
1240 mutex_unlock(&ftrace_regex_lock);
1244 * ftrace_set_filter - set a function to filter on in ftrace
1245 * @buf - the string that holds the function filter text.
1246 * @len - the length of the string.
1247 * @reset - non zero to reset all filters before applying this filter.
1249 * Filters denote which functions should be enabled when tracing is enabled.
1250 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1252 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1254 ftrace_set_regex(buf, len, reset, 1);
1258 * ftrace_set_notrace - set a function to not trace in ftrace
1259 * @buf - the string that holds the function notrace text.
1260 * @len - the length of the string.
1261 * @reset - non zero to reset all filters before applying this filter.
1263 * Notrace Filters denote which functions should not be enabled when tracing
1264 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1267 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1269 ftrace_set_regex(buf, len, reset, 0);
1273 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1275 struct seq_file *m = (struct seq_file *)file->private_data;
1276 struct ftrace_iterator *iter;
1278 mutex_lock(&ftrace_regex_lock);
1279 if (file->f_mode & FMODE_READ) {
1282 seq_release(inode, file);
1284 iter = file->private_data;
1286 if (iter->buffer_idx) {
1288 iter->buffer[iter->buffer_idx] = 0;
1289 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1292 mutex_lock(&ftrace_sysctl_lock);
1293 mutex_lock(&ftrace_start_lock);
1294 if (ftrace_start_up && ftrace_enabled)
1295 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1296 mutex_unlock(&ftrace_start_lock);
1297 mutex_unlock(&ftrace_sysctl_lock);
1300 mutex_unlock(&ftrace_regex_lock);
1305 ftrace_filter_release(struct inode *inode, struct file *file)
1307 return ftrace_regex_release(inode, file, 1);
1311 ftrace_notrace_release(struct inode *inode, struct file *file)
1313 return ftrace_regex_release(inode, file, 0);
1316 static struct file_operations ftrace_avail_fops = {
1317 .open = ftrace_avail_open,
1319 .llseek = seq_lseek,
1320 .release = ftrace_avail_release,
1323 static struct file_operations ftrace_failures_fops = {
1324 .open = ftrace_failures_open,
1326 .llseek = seq_lseek,
1327 .release = ftrace_avail_release,
1330 static struct file_operations ftrace_filter_fops = {
1331 .open = ftrace_filter_open,
1332 .read = ftrace_regex_read,
1333 .write = ftrace_filter_write,
1334 .llseek = ftrace_regex_lseek,
1335 .release = ftrace_filter_release,
1338 static struct file_operations ftrace_notrace_fops = {
1339 .open = ftrace_notrace_open,
1340 .read = ftrace_regex_read,
1341 .write = ftrace_notrace_write,
1342 .llseek = ftrace_regex_lseek,
1343 .release = ftrace_notrace_release,
1346 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1348 static DEFINE_MUTEX(graph_lock);
1350 int ftrace_graph_count;
1351 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1354 g_next(struct seq_file *m, void *v, loff_t *pos)
1356 unsigned long *array = m->private;
1361 if (index >= ftrace_graph_count)
1364 return &array[index];
1367 static void *g_start(struct seq_file *m, loff_t *pos)
1371 mutex_lock(&graph_lock);
1373 p = g_next(m, p, pos);
1378 static void g_stop(struct seq_file *m, void *p)
1380 mutex_unlock(&graph_lock);
1383 static int g_show(struct seq_file *m, void *v)
1385 unsigned long *ptr = v;
1386 char str[KSYM_SYMBOL_LEN];
1391 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1393 seq_printf(m, "%s\n", str);
1398 static struct seq_operations ftrace_graph_seq_ops = {
1406 ftrace_graph_open(struct inode *inode, struct file *file)
1410 if (unlikely(ftrace_disabled))
1413 mutex_lock(&graph_lock);
1414 if ((file->f_mode & FMODE_WRITE) &&
1415 !(file->f_flags & O_APPEND)) {
1416 ftrace_graph_count = 0;
1417 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1420 if (file->f_mode & FMODE_READ) {
1421 ret = seq_open(file, &ftrace_graph_seq_ops);
1423 struct seq_file *m = file->private_data;
1424 m->private = ftrace_graph_funcs;
1427 file->private_data = ftrace_graph_funcs;
1428 mutex_unlock(&graph_lock);
1434 ftrace_graph_read(struct file *file, char __user *ubuf,
1435 size_t cnt, loff_t *ppos)
1437 if (file->f_mode & FMODE_READ)
1438 return seq_read(file, ubuf, cnt, ppos);
1444 ftrace_set_func(unsigned long *array, int idx, char *buffer)
1446 char str[KSYM_SYMBOL_LEN];
1447 struct dyn_ftrace *rec;
1448 struct ftrace_page *pg;
1452 if (ftrace_disabled)
1455 /* should not be called from interrupt context */
1456 spin_lock(&ftrace_lock);
1458 do_for_each_ftrace_rec(pg, rec) {
1460 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1463 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1464 if (strcmp(str, buffer) == 0) {
1465 /* Return 1 if we add it to the array */
1467 for (j = 0; j < idx; j++)
1468 if (array[j] == rec->ip) {
1473 array[idx] = rec->ip;
1476 } while_for_each_ftrace_rec();
1478 spin_unlock(&ftrace_lock);
1480 return found ? 0 : -EINVAL;
1484 ftrace_graph_write(struct file *file, const char __user *ubuf,
1485 size_t cnt, loff_t *ppos)
1487 unsigned char buffer[FTRACE_BUFF_MAX+1];
1488 unsigned long *array;
1494 if (!cnt || cnt < 0)
1497 mutex_lock(&graph_lock);
1499 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1504 if (file->f_mode & FMODE_READ) {
1505 struct seq_file *m = file->private_data;
1508 array = file->private_data;
1510 ret = get_user(ch, ubuf++);
1516 /* skip white space */
1517 while (cnt && isspace(ch)) {
1518 ret = get_user(ch, ubuf++);
1531 while (cnt && !isspace(ch)) {
1532 if (index < FTRACE_BUFF_MAX)
1533 buffer[index++] = ch;
1538 ret = get_user(ch, ubuf++);
1546 /* we allow only one at a time */
1547 ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1551 ftrace_graph_count++;
1553 file->f_pos += read;
1557 mutex_unlock(&graph_lock);
1562 static const struct file_operations ftrace_graph_fops = {
1563 .open = ftrace_graph_open,
1564 .read = ftrace_graph_read,
1565 .write = ftrace_graph_write,
1567 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1569 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1571 struct dentry *entry;
1573 entry = debugfs_create_file("available_filter_functions", 0444,
1574 d_tracer, NULL, &ftrace_avail_fops);
1576 pr_warning("Could not create debugfs "
1577 "'available_filter_functions' entry\n");
1579 entry = debugfs_create_file("failures", 0444,
1580 d_tracer, NULL, &ftrace_failures_fops);
1582 pr_warning("Could not create debugfs 'failures' entry\n");
1584 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1585 NULL, &ftrace_filter_fops);
1587 pr_warning("Could not create debugfs "
1588 "'set_ftrace_filter' entry\n");
1590 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1591 NULL, &ftrace_notrace_fops);
1593 pr_warning("Could not create debugfs "
1594 "'set_ftrace_notrace' entry\n");
1596 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1597 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
1599 &ftrace_graph_fops);
1601 pr_warning("Could not create debugfs "
1602 "'set_graph_function' entry\n");
1603 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1608 static int ftrace_convert_nops(struct module *mod,
1609 unsigned long *start,
1614 unsigned long flags;
1616 mutex_lock(&ftrace_start_lock);
1619 addr = ftrace_call_adjust(*p++);
1621 * Some architecture linkers will pad between
1622 * the different mcount_loc sections of different
1623 * object files to satisfy alignments.
1624 * Skip any NULL pointers.
1628 ftrace_record_ip(addr);
1631 /* disable interrupts to prevent kstop machine */
1632 local_irq_save(flags);
1633 ftrace_update_code(mod);
1634 local_irq_restore(flags);
1635 mutex_unlock(&ftrace_start_lock);
1640 void ftrace_init_module(struct module *mod,
1641 unsigned long *start, unsigned long *end)
1643 if (ftrace_disabled || start == end)
1645 ftrace_convert_nops(mod, start, end);
1648 extern unsigned long __start_mcount_loc[];
1649 extern unsigned long __stop_mcount_loc[];
1651 void __init ftrace_init(void)
1653 unsigned long count, addr, flags;
1656 /* Keep the ftrace pointer to the stub */
1657 addr = (unsigned long)ftrace_stub;
1659 local_irq_save(flags);
1660 ftrace_dyn_arch_init(&addr);
1661 local_irq_restore(flags);
1663 /* ftrace_dyn_arch_init places the return code in addr */
1667 count = __stop_mcount_loc - __start_mcount_loc;
1669 ret = ftrace_dyn_table_alloc(count);
1673 last_ftrace_enabled = ftrace_enabled = 1;
1675 ret = ftrace_convert_nops(NULL,
1681 ftrace_disabled = 1;
1686 static int __init ftrace_nodyn_init(void)
1691 device_initcall(ftrace_nodyn_init);
1693 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1694 static inline void ftrace_startup_enable(int command) { }
1695 /* Keep as macros so we do not need to define the commands */
1696 # define ftrace_startup(command) do { } while (0)
1697 # define ftrace_shutdown(command) do { } while (0)
1698 # define ftrace_startup_sysctl() do { } while (0)
1699 # define ftrace_shutdown_sysctl() do { } while (0)
1700 #endif /* CONFIG_DYNAMIC_FTRACE */
1703 ftrace_pid_read(struct file *file, char __user *ubuf,
1704 size_t cnt, loff_t *ppos)
1709 if (ftrace_pid_trace == ftrace_swapper_pid)
1710 r = sprintf(buf, "swapper tasks\n");
1711 else if (ftrace_pid_trace)
1712 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
1714 r = sprintf(buf, "no pid\n");
1716 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1719 static void clear_ftrace_swapper(void)
1721 struct task_struct *p;
1725 for_each_online_cpu(cpu) {
1727 clear_tsk_trace_trace(p);
1732 static void set_ftrace_swapper(void)
1734 struct task_struct *p;
1738 for_each_online_cpu(cpu) {
1740 set_tsk_trace_trace(p);
1745 static void clear_ftrace_pid(struct pid *pid)
1747 struct task_struct *p;
1750 do_each_pid_task(pid, PIDTYPE_PID, p) {
1751 clear_tsk_trace_trace(p);
1752 } while_each_pid_task(pid, PIDTYPE_PID, p);
1758 static void set_ftrace_pid(struct pid *pid)
1760 struct task_struct *p;
1763 do_each_pid_task(pid, PIDTYPE_PID, p) {
1764 set_tsk_trace_trace(p);
1765 } while_each_pid_task(pid, PIDTYPE_PID, p);
1769 static void clear_ftrace_pid_task(struct pid **pid)
1771 if (*pid == ftrace_swapper_pid)
1772 clear_ftrace_swapper();
1774 clear_ftrace_pid(*pid);
1779 static void set_ftrace_pid_task(struct pid *pid)
1781 if (pid == ftrace_swapper_pid)
1782 set_ftrace_swapper();
1784 set_ftrace_pid(pid);
1788 ftrace_pid_write(struct file *filp, const char __user *ubuf,
1789 size_t cnt, loff_t *ppos)
1796 if (cnt >= sizeof(buf))
1799 if (copy_from_user(&buf, ubuf, cnt))
1804 ret = strict_strtol(buf, 10, &val);
1808 mutex_lock(&ftrace_start_lock);
1810 /* disable pid tracing */
1811 if (!ftrace_pid_trace)
1814 clear_ftrace_pid_task(&ftrace_pid_trace);
1817 /* swapper task is special */
1819 pid = ftrace_swapper_pid;
1820 if (pid == ftrace_pid_trace)
1823 pid = find_get_pid(val);
1825 if (pid == ftrace_pid_trace) {
1831 if (ftrace_pid_trace)
1832 clear_ftrace_pid_task(&ftrace_pid_trace);
1837 ftrace_pid_trace = pid;
1839 set_ftrace_pid_task(ftrace_pid_trace);
1842 /* update the function call */
1843 ftrace_update_pid_func();
1844 ftrace_startup_enable(0);
1847 mutex_unlock(&ftrace_start_lock);
1852 static struct file_operations ftrace_pid_fops = {
1853 .read = ftrace_pid_read,
1854 .write = ftrace_pid_write,
1857 static __init int ftrace_init_debugfs(void)
1859 struct dentry *d_tracer;
1860 struct dentry *entry;
1862 d_tracer = tracing_init_dentry();
1866 ftrace_init_dyn_debugfs(d_tracer);
1868 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1869 NULL, &ftrace_pid_fops);
1871 pr_warning("Could not create debugfs "
1872 "'set_ftrace_pid' entry\n");
1876 fs_initcall(ftrace_init_debugfs);
1879 * ftrace_kill - kill ftrace
1881 * This function should be used by panic code. It stops ftrace
1882 * but in a not so nice way. If you need to simply kill ftrace
1883 * from a non-atomic section, use ftrace_kill.
1885 void ftrace_kill(void)
1887 ftrace_disabled = 1;
1889 clear_ftrace_function();
1893 * register_ftrace_function - register a function for profiling
1894 * @ops - ops structure that holds the function for profiling.
1896 * Register a function to be called by all functions in the
1899 * Note: @ops->func and all the functions it calls must be labeled
1900 * with "notrace", otherwise it will go into a
1903 int register_ftrace_function(struct ftrace_ops *ops)
1907 if (unlikely(ftrace_disabled))
1910 mutex_lock(&ftrace_sysctl_lock);
1912 ret = __register_ftrace_function(ops);
1915 mutex_unlock(&ftrace_sysctl_lock);
1920 * unregister_ftrace_function - unregister a function for profiling.
1921 * @ops - ops structure that holds the function to unregister
1923 * Unregister a function that was added to be called by ftrace profiling.
1925 int unregister_ftrace_function(struct ftrace_ops *ops)
1929 mutex_lock(&ftrace_sysctl_lock);
1930 ret = __unregister_ftrace_function(ops);
1932 mutex_unlock(&ftrace_sysctl_lock);
1938 ftrace_enable_sysctl(struct ctl_table *table, int write,
1939 struct file *file, void __user *buffer, size_t *lenp,
1944 if (unlikely(ftrace_disabled))
1947 mutex_lock(&ftrace_sysctl_lock);
1949 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1951 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1954 last_ftrace_enabled = ftrace_enabled;
1956 if (ftrace_enabled) {
1958 ftrace_startup_sysctl();
1960 /* we are starting ftrace again */
1961 if (ftrace_list != &ftrace_list_end) {
1962 if (ftrace_list->next == &ftrace_list_end)
1963 ftrace_trace_function = ftrace_list->func;
1965 ftrace_trace_function = ftrace_list_func;
1969 /* stopping ftrace calls (just send to ftrace_stub) */
1970 ftrace_trace_function = ftrace_stub;
1972 ftrace_shutdown_sysctl();
1976 mutex_unlock(&ftrace_sysctl_lock);
1980 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1982 static atomic_t ftrace_graph_active;
1983 static struct notifier_block ftrace_suspend_notifier;
1985 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
1990 /* The callbacks that hook a function */
1991 trace_func_graph_ret_t ftrace_graph_return =
1992 (trace_func_graph_ret_t)ftrace_stub;
1993 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
1995 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1996 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2000 unsigned long flags;
2001 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
2002 struct task_struct *g, *t;
2004 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
2005 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
2006 * sizeof(struct ftrace_ret_stack),
2008 if (!ret_stack_list[i]) {
2016 read_lock_irqsave(&tasklist_lock, flags);
2017 do_each_thread(g, t) {
2023 if (t->ret_stack == NULL) {
2024 t->curr_ret_stack = -1;
2025 /* Make sure IRQs see the -1 first: */
2027 t->ret_stack = ret_stack_list[start++];
2028 atomic_set(&t->tracing_graph_pause, 0);
2029 atomic_set(&t->trace_overrun, 0);
2031 } while_each_thread(g, t);
2034 read_unlock_irqrestore(&tasklist_lock, flags);
2036 for (i = start; i < end; i++)
2037 kfree(ret_stack_list[i]);
2041 /* Allocate a return stack for each task */
2042 static int start_graph_tracing(void)
2044 struct ftrace_ret_stack **ret_stack_list;
2047 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2048 sizeof(struct ftrace_ret_stack *),
2051 if (!ret_stack_list)
2055 ret = alloc_retstack_tasklist(ret_stack_list);
2056 } while (ret == -EAGAIN);
2058 kfree(ret_stack_list);
2063 * Hibernation protection.
2064 * The state of the current task is too much unstable during
2065 * suspend/restore to disk. We want to protect against that.
2068 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2072 case PM_HIBERNATION_PREPARE:
2073 pause_graph_tracing();
2076 case PM_POST_HIBERNATION:
2077 unpause_graph_tracing();
2083 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2084 trace_func_graph_ent_t entryfunc)
2088 mutex_lock(&ftrace_sysctl_lock);
2090 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2091 register_pm_notifier(&ftrace_suspend_notifier);
2093 atomic_inc(&ftrace_graph_active);
2094 ret = start_graph_tracing();
2096 atomic_dec(&ftrace_graph_active);
2100 ftrace_graph_return = retfunc;
2101 ftrace_graph_entry = entryfunc;
2103 ftrace_startup(FTRACE_START_FUNC_RET);
2106 mutex_unlock(&ftrace_sysctl_lock);
2110 void unregister_ftrace_graph(void)
2112 mutex_lock(&ftrace_sysctl_lock);
2114 atomic_dec(&ftrace_graph_active);
2115 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2116 ftrace_graph_entry = ftrace_graph_entry_stub;
2117 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2118 unregister_pm_notifier(&ftrace_suspend_notifier);
2120 mutex_unlock(&ftrace_sysctl_lock);
2123 /* Allocate a return stack for newly created task */
2124 void ftrace_graph_init_task(struct task_struct *t)
2126 if (atomic_read(&ftrace_graph_active)) {
2127 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2128 * sizeof(struct ftrace_ret_stack),
2132 t->curr_ret_stack = -1;
2133 atomic_set(&t->tracing_graph_pause, 0);
2134 atomic_set(&t->trace_overrun, 0);
2136 t->ret_stack = NULL;
2139 void ftrace_graph_exit_task(struct task_struct *t)
2141 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2143 t->ret_stack = NULL;
2144 /* NULL must become visible to IRQs before we free it: */
2150 void ftrace_graph_stop(void)