ftrace: add do_for_each_ftrace_rec and while_for_each_ftrace_rec
[linux-2.6-block.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30
31 #include <asm/ftrace.h>
32
33 #include "trace.h"
34
35 #define FTRACE_WARN_ON(cond)                    \
36         do {                                    \
37                 if (WARN_ON(cond))              \
38                         ftrace_kill();          \
39         } while (0)
40
41 #define FTRACE_WARN_ON_ONCE(cond)               \
42         do {                                    \
43                 if (WARN_ON_ONCE(cond))         \
44                         ftrace_kill();          \
45         } while (0)
46
47 /* ftrace_enabled is a method to turn ftrace on or off */
48 int ftrace_enabled __read_mostly;
49 static int last_ftrace_enabled;
50
51 /* set when tracing only a pid */
52 struct pid *ftrace_pid_trace;
53 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
54
55 /* Quick disabling of function tracer. */
56 int function_trace_stop;
57
58 /*
59  * ftrace_disabled is set when an anomaly is discovered.
60  * ftrace_disabled is much stronger than ftrace_enabled.
61  */
62 static int ftrace_disabled __read_mostly;
63
64 static DEFINE_SPINLOCK(ftrace_lock);
65 static DEFINE_MUTEX(ftrace_sysctl_lock);
66 static DEFINE_MUTEX(ftrace_start_lock);
67
68 static struct ftrace_ops ftrace_list_end __read_mostly =
69 {
70         .func = ftrace_stub,
71 };
72
73 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
74 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
75 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
76 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
77
78 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
79 {
80         struct ftrace_ops *op = ftrace_list;
81
82         /* in case someone actually ports this to alpha! */
83         read_barrier_depends();
84
85         while (op != &ftrace_list_end) {
86                 /* silly alpha */
87                 read_barrier_depends();
88                 op->func(ip, parent_ip);
89                 op = op->next;
90         };
91 }
92
93 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
94 {
95         if (!test_tsk_trace_trace(current))
96                 return;
97
98         ftrace_pid_function(ip, parent_ip);
99 }
100
101 static void set_ftrace_pid_function(ftrace_func_t func)
102 {
103         /* do not set ftrace_pid_function to itself! */
104         if (func != ftrace_pid_func)
105                 ftrace_pid_function = func;
106 }
107
108 /**
109  * clear_ftrace_function - reset the ftrace function
110  *
111  * This NULLs the ftrace function and in essence stops
112  * tracing.  There may be lag
113  */
114 void clear_ftrace_function(void)
115 {
116         ftrace_trace_function = ftrace_stub;
117         __ftrace_trace_function = ftrace_stub;
118         ftrace_pid_function = ftrace_stub;
119 }
120
121 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
122 /*
123  * For those archs that do not test ftrace_trace_stop in their
124  * mcount call site, we need to do it from C.
125  */
126 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
127 {
128         if (function_trace_stop)
129                 return;
130
131         __ftrace_trace_function(ip, parent_ip);
132 }
133 #endif
134
135 static int __register_ftrace_function(struct ftrace_ops *ops)
136 {
137         /* should not be called from interrupt context */
138         spin_lock(&ftrace_lock);
139
140         ops->next = ftrace_list;
141         /*
142          * We are entering ops into the ftrace_list but another
143          * CPU might be walking that list. We need to make sure
144          * the ops->next pointer is valid before another CPU sees
145          * the ops pointer included into the ftrace_list.
146          */
147         smp_wmb();
148         ftrace_list = ops;
149
150         if (ftrace_enabled) {
151                 ftrace_func_t func;
152
153                 if (ops->next == &ftrace_list_end)
154                         func = ops->func;
155                 else
156                         func = ftrace_list_func;
157
158                 if (ftrace_pid_trace) {
159                         set_ftrace_pid_function(func);
160                         func = ftrace_pid_func;
161                 }
162
163                 /*
164                  * For one func, simply call it directly.
165                  * For more than one func, call the chain.
166                  */
167 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
168                 ftrace_trace_function = func;
169 #else
170                 __ftrace_trace_function = func;
171                 ftrace_trace_function = ftrace_test_stop_func;
172 #endif
173         }
174
175         spin_unlock(&ftrace_lock);
176
177         return 0;
178 }
179
180 static int __unregister_ftrace_function(struct ftrace_ops *ops)
181 {
182         struct ftrace_ops **p;
183         int ret = 0;
184
185         /* should not be called from interrupt context */
186         spin_lock(&ftrace_lock);
187
188         /*
189          * If we are removing the last function, then simply point
190          * to the ftrace_stub.
191          */
192         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
193                 ftrace_trace_function = ftrace_stub;
194                 ftrace_list = &ftrace_list_end;
195                 goto out;
196         }
197
198         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
199                 if (*p == ops)
200                         break;
201
202         if (*p != ops) {
203                 ret = -1;
204                 goto out;
205         }
206
207         *p = (*p)->next;
208
209         if (ftrace_enabled) {
210                 /* If we only have one func left, then call that directly */
211                 if (ftrace_list->next == &ftrace_list_end) {
212                         ftrace_func_t func = ftrace_list->func;
213
214                         if (ftrace_pid_trace) {
215                                 set_ftrace_pid_function(func);
216                                 func = ftrace_pid_func;
217                         }
218 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
219                         ftrace_trace_function = func;
220 #else
221                         __ftrace_trace_function = func;
222 #endif
223                 }
224         }
225
226  out:
227         spin_unlock(&ftrace_lock);
228
229         return ret;
230 }
231
232 static void ftrace_update_pid_func(void)
233 {
234         ftrace_func_t func;
235
236         /* should not be called from interrupt context */
237         spin_lock(&ftrace_lock);
238
239         if (ftrace_trace_function == ftrace_stub)
240                 goto out;
241
242         func = ftrace_trace_function;
243
244         if (ftrace_pid_trace) {
245                 set_ftrace_pid_function(func);
246                 func = ftrace_pid_func;
247         } else {
248                 if (func == ftrace_pid_func)
249                         func = ftrace_pid_function;
250         }
251
252 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
253         ftrace_trace_function = func;
254 #else
255         __ftrace_trace_function = func;
256 #endif
257
258  out:
259         spin_unlock(&ftrace_lock);
260 }
261
262 #ifdef CONFIG_DYNAMIC_FTRACE
263 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
264 # error Dynamic ftrace depends on MCOUNT_RECORD
265 #endif
266
267 enum {
268         FTRACE_ENABLE_CALLS             = (1 << 0),
269         FTRACE_DISABLE_CALLS            = (1 << 1),
270         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
271         FTRACE_ENABLE_MCOUNT            = (1 << 3),
272         FTRACE_DISABLE_MCOUNT           = (1 << 4),
273         FTRACE_START_FUNC_RET           = (1 << 5),
274         FTRACE_STOP_FUNC_RET            = (1 << 6),
275 };
276
277 static int ftrace_filtered;
278
279 static LIST_HEAD(ftrace_new_addrs);
280
281 static DEFINE_MUTEX(ftrace_regex_lock);
282
283 struct ftrace_page {
284         struct ftrace_page      *next;
285         int                     index;
286         struct dyn_ftrace       records[];
287 };
288
289 #define ENTRIES_PER_PAGE \
290   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
291
292 /* estimate from running different kernels */
293 #define NR_TO_INIT              10000
294
295 static struct ftrace_page       *ftrace_pages_start;
296 static struct ftrace_page       *ftrace_pages;
297
298 static struct dyn_ftrace *ftrace_free_records;
299
300 /*
301  * This is a double for. Do not use 'break' to break out of the loop,
302  * you must use a goto.
303  */
304 #define do_for_each_ftrace_rec(pg, rec)                                 \
305         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
306                 int _____i;                                             \
307                 for (_____i = 0; _____i < pg->index; _____i++) {        \
308                         rec = &pg->records[_____i];
309
310 #define while_for_each_ftrace_rec()             \
311                 }                               \
312         }
313
314 #ifdef CONFIG_KPROBES
315
316 static int frozen_record_count;
317
318 static inline void freeze_record(struct dyn_ftrace *rec)
319 {
320         if (!(rec->flags & FTRACE_FL_FROZEN)) {
321                 rec->flags |= FTRACE_FL_FROZEN;
322                 frozen_record_count++;
323         }
324 }
325
326 static inline void unfreeze_record(struct dyn_ftrace *rec)
327 {
328         if (rec->flags & FTRACE_FL_FROZEN) {
329                 rec->flags &= ~FTRACE_FL_FROZEN;
330                 frozen_record_count--;
331         }
332 }
333
334 static inline int record_frozen(struct dyn_ftrace *rec)
335 {
336         return rec->flags & FTRACE_FL_FROZEN;
337 }
338 #else
339 # define freeze_record(rec)                     ({ 0; })
340 # define unfreeze_record(rec)                   ({ 0; })
341 # define record_frozen(rec)                     ({ 0; })
342 #endif /* CONFIG_KPROBES */
343
344 static void ftrace_free_rec(struct dyn_ftrace *rec)
345 {
346         rec->ip = (unsigned long)ftrace_free_records;
347         ftrace_free_records = rec;
348         rec->flags |= FTRACE_FL_FREE;
349 }
350
351 void ftrace_release(void *start, unsigned long size)
352 {
353         struct dyn_ftrace *rec;
354         struct ftrace_page *pg;
355         unsigned long s = (unsigned long)start;
356         unsigned long e = s + size;
357
358         if (ftrace_disabled || !start)
359                 return;
360
361         /* should not be called from interrupt context */
362         spin_lock(&ftrace_lock);
363
364         do_for_each_ftrace_rec(pg, rec) {
365                 if ((rec->ip >= s) && (rec->ip < e))
366                         ftrace_free_rec(rec);
367         } while_for_each_ftrace_rec();
368
369         spin_unlock(&ftrace_lock);
370 }
371
372 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
373 {
374         struct dyn_ftrace *rec;
375
376         /* First check for freed records */
377         if (ftrace_free_records) {
378                 rec = ftrace_free_records;
379
380                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
381                         FTRACE_WARN_ON_ONCE(1);
382                         ftrace_free_records = NULL;
383                         return NULL;
384                 }
385
386                 ftrace_free_records = (void *)rec->ip;
387                 memset(rec, 0, sizeof(*rec));
388                 return rec;
389         }
390
391         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
392                 if (!ftrace_pages->next) {
393                         /* allocate another page */
394                         ftrace_pages->next =
395                                 (void *)get_zeroed_page(GFP_KERNEL);
396                         if (!ftrace_pages->next)
397                                 return NULL;
398                 }
399                 ftrace_pages = ftrace_pages->next;
400         }
401
402         return &ftrace_pages->records[ftrace_pages->index++];
403 }
404
405 static struct dyn_ftrace *
406 ftrace_record_ip(unsigned long ip)
407 {
408         struct dyn_ftrace *rec;
409
410         if (ftrace_disabled)
411                 return NULL;
412
413         rec = ftrace_alloc_dyn_node(ip);
414         if (!rec)
415                 return NULL;
416
417         rec->ip = ip;
418
419         list_add(&rec->list, &ftrace_new_addrs);
420
421         return rec;
422 }
423
424 static void print_ip_ins(const char *fmt, unsigned char *p)
425 {
426         int i;
427
428         printk(KERN_CONT "%s", fmt);
429
430         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
431                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
432 }
433
434 static void ftrace_bug(int failed, unsigned long ip)
435 {
436         switch (failed) {
437         case -EFAULT:
438                 FTRACE_WARN_ON_ONCE(1);
439                 pr_info("ftrace faulted on modifying ");
440                 print_ip_sym(ip);
441                 break;
442         case -EINVAL:
443                 FTRACE_WARN_ON_ONCE(1);
444                 pr_info("ftrace failed to modify ");
445                 print_ip_sym(ip);
446                 print_ip_ins(" actual: ", (unsigned char *)ip);
447                 printk(KERN_CONT "\n");
448                 break;
449         case -EPERM:
450                 FTRACE_WARN_ON_ONCE(1);
451                 pr_info("ftrace faulted on writing ");
452                 print_ip_sym(ip);
453                 break;
454         default:
455                 FTRACE_WARN_ON_ONCE(1);
456                 pr_info("ftrace faulted on unknown error ");
457                 print_ip_sym(ip);
458         }
459 }
460
461
462 static int
463 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
464 {
465         unsigned long ip, fl;
466         unsigned long ftrace_addr;
467
468         ftrace_addr = (unsigned long)FTRACE_ADDR;
469
470         ip = rec->ip;
471
472         /*
473          * If this record is not to be traced and
474          * it is not enabled then do nothing.
475          *
476          * If this record is not to be traced and
477          * it is enabled then disable it.
478          *
479          */
480         if (rec->flags & FTRACE_FL_NOTRACE) {
481                 if (rec->flags & FTRACE_FL_ENABLED)
482                         rec->flags &= ~FTRACE_FL_ENABLED;
483                 else
484                         return 0;
485
486         } else if (ftrace_filtered && enable) {
487                 /*
488                  * Filtering is on:
489                  */
490
491                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
492
493                 /* Record is filtered and enabled, do nothing */
494                 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
495                         return 0;
496
497                 /* Record is not filtered or enabled, do nothing */
498                 if (!fl)
499                         return 0;
500
501                 /* Record is not filtered but enabled, disable it */
502                 if (fl == FTRACE_FL_ENABLED)
503                         rec->flags &= ~FTRACE_FL_ENABLED;
504                 else
505                 /* Otherwise record is filtered but not enabled, enable it */
506                         rec->flags |= FTRACE_FL_ENABLED;
507         } else {
508                 /* Disable or not filtered */
509
510                 if (enable) {
511                         /* if record is enabled, do nothing */
512                         if (rec->flags & FTRACE_FL_ENABLED)
513                                 return 0;
514
515                         rec->flags |= FTRACE_FL_ENABLED;
516
517                 } else {
518
519                         /* if record is not enabled, do nothing */
520                         if (!(rec->flags & FTRACE_FL_ENABLED))
521                                 return 0;
522
523                         rec->flags &= ~FTRACE_FL_ENABLED;
524                 }
525         }
526
527         if (rec->flags & FTRACE_FL_ENABLED)
528                 return ftrace_make_call(rec, ftrace_addr);
529         else
530                 return ftrace_make_nop(NULL, rec, ftrace_addr);
531 }
532
533 static void ftrace_replace_code(int enable)
534 {
535         int failed;
536         struct dyn_ftrace *rec;
537         struct ftrace_page *pg;
538
539         do_for_each_ftrace_rec(pg, rec) {
540                 /*
541                  * Skip over free records and records that have
542                  * failed.
543                  */
544                 if (rec->flags & FTRACE_FL_FREE ||
545                     rec->flags & FTRACE_FL_FAILED)
546                         continue;
547
548                 /* ignore updates to this record's mcount site */
549                 if (get_kprobe((void *)rec->ip)) {
550                         freeze_record(rec);
551                         continue;
552                 } else {
553                         unfreeze_record(rec);
554                 }
555
556                 failed = __ftrace_replace_code(rec, enable);
557                 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
558                         rec->flags |= FTRACE_FL_FAILED;
559                         if ((system_state == SYSTEM_BOOTING) ||
560                             !core_kernel_text(rec->ip)) {
561                                 ftrace_free_rec(rec);
562                         } else
563                                 ftrace_bug(failed, rec->ip);
564                 }
565         } while_for_each_ftrace_rec();
566 }
567
568 static int
569 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
570 {
571         unsigned long ip;
572         int ret;
573
574         ip = rec->ip;
575
576         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
577         if (ret) {
578                 ftrace_bug(ret, ip);
579                 rec->flags |= FTRACE_FL_FAILED;
580                 return 0;
581         }
582         return 1;
583 }
584
585 static int __ftrace_modify_code(void *data)
586 {
587         int *command = data;
588
589         if (*command & FTRACE_ENABLE_CALLS)
590                 ftrace_replace_code(1);
591         else if (*command & FTRACE_DISABLE_CALLS)
592                 ftrace_replace_code(0);
593
594         if (*command & FTRACE_UPDATE_TRACE_FUNC)
595                 ftrace_update_ftrace_func(ftrace_trace_function);
596
597         if (*command & FTRACE_START_FUNC_RET)
598                 ftrace_enable_ftrace_graph_caller();
599         else if (*command & FTRACE_STOP_FUNC_RET)
600                 ftrace_disable_ftrace_graph_caller();
601
602         return 0;
603 }
604
605 static void ftrace_run_update_code(int command)
606 {
607         stop_machine(__ftrace_modify_code, &command, NULL);
608 }
609
610 static ftrace_func_t saved_ftrace_func;
611 static int ftrace_start_up;
612
613 static void ftrace_startup_enable(int command)
614 {
615         if (saved_ftrace_func != ftrace_trace_function) {
616                 saved_ftrace_func = ftrace_trace_function;
617                 command |= FTRACE_UPDATE_TRACE_FUNC;
618         }
619
620         if (!command || !ftrace_enabled)
621                 return;
622
623         ftrace_run_update_code(command);
624 }
625
626 static void ftrace_startup(int command)
627 {
628         if (unlikely(ftrace_disabled))
629                 return;
630
631         mutex_lock(&ftrace_start_lock);
632         ftrace_start_up++;
633         command |= FTRACE_ENABLE_CALLS;
634
635         ftrace_startup_enable(command);
636
637         mutex_unlock(&ftrace_start_lock);
638 }
639
640 static void ftrace_shutdown(int command)
641 {
642         if (unlikely(ftrace_disabled))
643                 return;
644
645         mutex_lock(&ftrace_start_lock);
646         ftrace_start_up--;
647         if (!ftrace_start_up)
648                 command |= FTRACE_DISABLE_CALLS;
649
650         if (saved_ftrace_func != ftrace_trace_function) {
651                 saved_ftrace_func = ftrace_trace_function;
652                 command |= FTRACE_UPDATE_TRACE_FUNC;
653         }
654
655         if (!command || !ftrace_enabled)
656                 goto out;
657
658         ftrace_run_update_code(command);
659  out:
660         mutex_unlock(&ftrace_start_lock);
661 }
662
663 static void ftrace_startup_sysctl(void)
664 {
665         int command = FTRACE_ENABLE_MCOUNT;
666
667         if (unlikely(ftrace_disabled))
668                 return;
669
670         mutex_lock(&ftrace_start_lock);
671         /* Force update next time */
672         saved_ftrace_func = NULL;
673         /* ftrace_start_up is true if we want ftrace running */
674         if (ftrace_start_up)
675                 command |= FTRACE_ENABLE_CALLS;
676
677         ftrace_run_update_code(command);
678         mutex_unlock(&ftrace_start_lock);
679 }
680
681 static void ftrace_shutdown_sysctl(void)
682 {
683         int command = FTRACE_DISABLE_MCOUNT;
684
685         if (unlikely(ftrace_disabled))
686                 return;
687
688         mutex_lock(&ftrace_start_lock);
689         /* ftrace_start_up is true if ftrace is running */
690         if (ftrace_start_up)
691                 command |= FTRACE_DISABLE_CALLS;
692
693         ftrace_run_update_code(command);
694         mutex_unlock(&ftrace_start_lock);
695 }
696
697 static cycle_t          ftrace_update_time;
698 static unsigned long    ftrace_update_cnt;
699 unsigned long           ftrace_update_tot_cnt;
700
701 static int ftrace_update_code(struct module *mod)
702 {
703         struct dyn_ftrace *p, *t;
704         cycle_t start, stop;
705
706         start = ftrace_now(raw_smp_processor_id());
707         ftrace_update_cnt = 0;
708
709         list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
710
711                 /* If something went wrong, bail without enabling anything */
712                 if (unlikely(ftrace_disabled))
713                         return -1;
714
715                 list_del_init(&p->list);
716
717                 /* convert record (i.e, patch mcount-call with NOP) */
718                 if (ftrace_code_disable(mod, p)) {
719                         p->flags |= FTRACE_FL_CONVERTED;
720                         ftrace_update_cnt++;
721                 } else
722                         ftrace_free_rec(p);
723         }
724
725         stop = ftrace_now(raw_smp_processor_id());
726         ftrace_update_time = stop - start;
727         ftrace_update_tot_cnt += ftrace_update_cnt;
728
729         return 0;
730 }
731
732 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
733 {
734         struct ftrace_page *pg;
735         int cnt;
736         int i;
737
738         /* allocate a few pages */
739         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
740         if (!ftrace_pages_start)
741                 return -1;
742
743         /*
744          * Allocate a few more pages.
745          *
746          * TODO: have some parser search vmlinux before
747          *   final linking to find all calls to ftrace.
748          *   Then we can:
749          *    a) know how many pages to allocate.
750          *     and/or
751          *    b) set up the table then.
752          *
753          *  The dynamic code is still necessary for
754          *  modules.
755          */
756
757         pg = ftrace_pages = ftrace_pages_start;
758
759         cnt = num_to_init / ENTRIES_PER_PAGE;
760         pr_info("ftrace: allocating %ld entries in %d pages\n",
761                 num_to_init, cnt + 1);
762
763         for (i = 0; i < cnt; i++) {
764                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
765
766                 /* If we fail, we'll try later anyway */
767                 if (!pg->next)
768                         break;
769
770                 pg = pg->next;
771         }
772
773         return 0;
774 }
775
776 enum {
777         FTRACE_ITER_FILTER      = (1 << 0),
778         FTRACE_ITER_CONT        = (1 << 1),
779         FTRACE_ITER_NOTRACE     = (1 << 2),
780         FTRACE_ITER_FAILURES    = (1 << 3),
781         FTRACE_ITER_PRINTALL    = (1 << 4),
782 };
783
784 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
785
786 struct ftrace_iterator {
787         struct ftrace_page      *pg;
788         int                     idx;
789         unsigned                flags;
790         unsigned char           buffer[FTRACE_BUFF_MAX+1];
791         unsigned                buffer_idx;
792         unsigned                filtered;
793 };
794
795 static void *
796 t_next(struct seq_file *m, void *v, loff_t *pos)
797 {
798         struct ftrace_iterator *iter = m->private;
799         struct dyn_ftrace *rec = NULL;
800
801         (*pos)++;
802
803         if (iter->flags & FTRACE_ITER_PRINTALL)
804                 return NULL;
805
806         /* should not be called from interrupt context */
807         spin_lock(&ftrace_lock);
808  retry:
809         if (iter->idx >= iter->pg->index) {
810                 if (iter->pg->next) {
811                         iter->pg = iter->pg->next;
812                         iter->idx = 0;
813                         goto retry;
814                 } else {
815                         iter->idx = -1;
816                 }
817         } else {
818                 rec = &iter->pg->records[iter->idx++];
819                 if ((rec->flags & FTRACE_FL_FREE) ||
820
821                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
822                      (rec->flags & FTRACE_FL_FAILED)) ||
823
824                     ((iter->flags & FTRACE_ITER_FAILURES) &&
825                      !(rec->flags & FTRACE_FL_FAILED)) ||
826
827                     ((iter->flags & FTRACE_ITER_FILTER) &&
828                      !(rec->flags & FTRACE_FL_FILTER)) ||
829
830                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
831                      !(rec->flags & FTRACE_FL_NOTRACE))) {
832                         rec = NULL;
833                         goto retry;
834                 }
835         }
836         spin_unlock(&ftrace_lock);
837
838         return rec;
839 }
840
841 static void *t_start(struct seq_file *m, loff_t *pos)
842 {
843         struct ftrace_iterator *iter = m->private;
844         void *p = NULL;
845
846         /*
847          * For set_ftrace_filter reading, if we have the filter
848          * off, we can short cut and just print out that all
849          * functions are enabled.
850          */
851         if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
852                 if (*pos > 0)
853                         return NULL;
854                 iter->flags |= FTRACE_ITER_PRINTALL;
855                 (*pos)++;
856                 return iter;
857         }
858
859         if (*pos > 0) {
860                 if (iter->idx < 0)
861                         return p;
862                 (*pos)--;
863                 iter->idx--;
864         }
865
866         p = t_next(m, p, pos);
867
868         return p;
869 }
870
871 static void t_stop(struct seq_file *m, void *p)
872 {
873 }
874
875 static int t_show(struct seq_file *m, void *v)
876 {
877         struct ftrace_iterator *iter = m->private;
878         struct dyn_ftrace *rec = v;
879         char str[KSYM_SYMBOL_LEN];
880
881         if (iter->flags & FTRACE_ITER_PRINTALL) {
882                 seq_printf(m, "#### all functions enabled ####\n");
883                 return 0;
884         }
885
886         if (!rec)
887                 return 0;
888
889         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
890
891         seq_printf(m, "%s\n", str);
892
893         return 0;
894 }
895
896 static struct seq_operations show_ftrace_seq_ops = {
897         .start = t_start,
898         .next = t_next,
899         .stop = t_stop,
900         .show = t_show,
901 };
902
903 static int
904 ftrace_avail_open(struct inode *inode, struct file *file)
905 {
906         struct ftrace_iterator *iter;
907         int ret;
908
909         if (unlikely(ftrace_disabled))
910                 return -ENODEV;
911
912         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
913         if (!iter)
914                 return -ENOMEM;
915
916         iter->pg = ftrace_pages_start;
917
918         ret = seq_open(file, &show_ftrace_seq_ops);
919         if (!ret) {
920                 struct seq_file *m = file->private_data;
921
922                 m->private = iter;
923         } else {
924                 kfree(iter);
925         }
926
927         return ret;
928 }
929
930 int ftrace_avail_release(struct inode *inode, struct file *file)
931 {
932         struct seq_file *m = (struct seq_file *)file->private_data;
933         struct ftrace_iterator *iter = m->private;
934
935         seq_release(inode, file);
936         kfree(iter);
937
938         return 0;
939 }
940
941 static int
942 ftrace_failures_open(struct inode *inode, struct file *file)
943 {
944         int ret;
945         struct seq_file *m;
946         struct ftrace_iterator *iter;
947
948         ret = ftrace_avail_open(inode, file);
949         if (!ret) {
950                 m = (struct seq_file *)file->private_data;
951                 iter = (struct ftrace_iterator *)m->private;
952                 iter->flags = FTRACE_ITER_FAILURES;
953         }
954
955         return ret;
956 }
957
958
959 static void ftrace_filter_reset(int enable)
960 {
961         struct ftrace_page *pg;
962         struct dyn_ftrace *rec;
963         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
964
965         /* should not be called from interrupt context */
966         spin_lock(&ftrace_lock);
967         if (enable)
968                 ftrace_filtered = 0;
969         do_for_each_ftrace_rec(pg, rec) {
970                 if (rec->flags & FTRACE_FL_FAILED)
971                         continue;
972                 rec->flags &= ~type;
973         } while_for_each_ftrace_rec();
974
975         spin_unlock(&ftrace_lock);
976 }
977
978 static int
979 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
980 {
981         struct ftrace_iterator *iter;
982         int ret = 0;
983
984         if (unlikely(ftrace_disabled))
985                 return -ENODEV;
986
987         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
988         if (!iter)
989                 return -ENOMEM;
990
991         mutex_lock(&ftrace_regex_lock);
992         if ((file->f_mode & FMODE_WRITE) &&
993             !(file->f_flags & O_APPEND))
994                 ftrace_filter_reset(enable);
995
996         if (file->f_mode & FMODE_READ) {
997                 iter->pg = ftrace_pages_start;
998                 iter->flags = enable ? FTRACE_ITER_FILTER :
999                         FTRACE_ITER_NOTRACE;
1000
1001                 ret = seq_open(file, &show_ftrace_seq_ops);
1002                 if (!ret) {
1003                         struct seq_file *m = file->private_data;
1004                         m->private = iter;
1005                 } else
1006                         kfree(iter);
1007         } else
1008                 file->private_data = iter;
1009         mutex_unlock(&ftrace_regex_lock);
1010
1011         return ret;
1012 }
1013
1014 static int
1015 ftrace_filter_open(struct inode *inode, struct file *file)
1016 {
1017         return ftrace_regex_open(inode, file, 1);
1018 }
1019
1020 static int
1021 ftrace_notrace_open(struct inode *inode, struct file *file)
1022 {
1023         return ftrace_regex_open(inode, file, 0);
1024 }
1025
1026 static ssize_t
1027 ftrace_regex_read(struct file *file, char __user *ubuf,
1028                        size_t cnt, loff_t *ppos)
1029 {
1030         if (file->f_mode & FMODE_READ)
1031                 return seq_read(file, ubuf, cnt, ppos);
1032         else
1033                 return -EPERM;
1034 }
1035
1036 static loff_t
1037 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1038 {
1039         loff_t ret;
1040
1041         if (file->f_mode & FMODE_READ)
1042                 ret = seq_lseek(file, offset, origin);
1043         else
1044                 file->f_pos = ret = 1;
1045
1046         return ret;
1047 }
1048
1049 enum {
1050         MATCH_FULL,
1051         MATCH_FRONT_ONLY,
1052         MATCH_MIDDLE_ONLY,
1053         MATCH_END_ONLY,
1054 };
1055
1056 static void
1057 ftrace_match(unsigned char *buff, int len, int enable)
1058 {
1059         char str[KSYM_SYMBOL_LEN];
1060         char *search = NULL;
1061         struct ftrace_page *pg;
1062         struct dyn_ftrace *rec;
1063         int type = MATCH_FULL;
1064         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1065         unsigned i, match = 0, search_len = 0;
1066         int not = 0;
1067
1068         if (buff[0] == '!') {
1069                 not = 1;
1070                 buff++;
1071                 len--;
1072         }
1073
1074         for (i = 0; i < len; i++) {
1075                 if (buff[i] == '*') {
1076                         if (!i) {
1077                                 search = buff + i + 1;
1078                                 type = MATCH_END_ONLY;
1079                                 search_len = len - (i + 1);
1080                         } else {
1081                                 if (type == MATCH_END_ONLY) {
1082                                         type = MATCH_MIDDLE_ONLY;
1083                                 } else {
1084                                         match = i;
1085                                         type = MATCH_FRONT_ONLY;
1086                                 }
1087                                 buff[i] = 0;
1088                                 break;
1089                         }
1090                 }
1091         }
1092
1093         /* should not be called from interrupt context */
1094         spin_lock(&ftrace_lock);
1095         if (enable)
1096                 ftrace_filtered = 1;
1097         do_for_each_ftrace_rec(pg, rec) {
1098                 int matched = 0;
1099                 char *ptr;
1100
1101                 if (rec->flags & FTRACE_FL_FAILED)
1102                         continue;
1103                 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1104                 switch (type) {
1105                 case MATCH_FULL:
1106                         if (strcmp(str, buff) == 0)
1107                                 matched = 1;
1108                         break;
1109                 case MATCH_FRONT_ONLY:
1110                         if (memcmp(str, buff, match) == 0)
1111                                 matched = 1;
1112                         break;
1113                 case MATCH_MIDDLE_ONLY:
1114                         if (strstr(str, search))
1115                                 matched = 1;
1116                         break;
1117                 case MATCH_END_ONLY:
1118                         ptr = strstr(str, search);
1119                         if (ptr && (ptr[search_len] == 0))
1120                                 matched = 1;
1121                         break;
1122                 }
1123                 if (matched) {
1124                         if (not)
1125                                 rec->flags &= ~flag;
1126                         else
1127                                 rec->flags |= flag;
1128                 }
1129         } while_for_each_ftrace_rec();
1130         spin_unlock(&ftrace_lock);
1131 }
1132
1133 static ssize_t
1134 ftrace_regex_write(struct file *file, const char __user *ubuf,
1135                    size_t cnt, loff_t *ppos, int enable)
1136 {
1137         struct ftrace_iterator *iter;
1138         char ch;
1139         size_t read = 0;
1140         ssize_t ret;
1141
1142         if (!cnt || cnt < 0)
1143                 return 0;
1144
1145         mutex_lock(&ftrace_regex_lock);
1146
1147         if (file->f_mode & FMODE_READ) {
1148                 struct seq_file *m = file->private_data;
1149                 iter = m->private;
1150         } else
1151                 iter = file->private_data;
1152
1153         if (!*ppos) {
1154                 iter->flags &= ~FTRACE_ITER_CONT;
1155                 iter->buffer_idx = 0;
1156         }
1157
1158         ret = get_user(ch, ubuf++);
1159         if (ret)
1160                 goto out;
1161         read++;
1162         cnt--;
1163
1164         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1165                 /* skip white space */
1166                 while (cnt && isspace(ch)) {
1167                         ret = get_user(ch, ubuf++);
1168                         if (ret)
1169                                 goto out;
1170                         read++;
1171                         cnt--;
1172                 }
1173
1174                 if (isspace(ch)) {
1175                         file->f_pos += read;
1176                         ret = read;
1177                         goto out;
1178                 }
1179
1180                 iter->buffer_idx = 0;
1181         }
1182
1183         while (cnt && !isspace(ch)) {
1184                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1185                         iter->buffer[iter->buffer_idx++] = ch;
1186                 else {
1187                         ret = -EINVAL;
1188                         goto out;
1189                 }
1190                 ret = get_user(ch, ubuf++);
1191                 if (ret)
1192                         goto out;
1193                 read++;
1194                 cnt--;
1195         }
1196
1197         if (isspace(ch)) {
1198                 iter->filtered++;
1199                 iter->buffer[iter->buffer_idx] = 0;
1200                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1201                 iter->buffer_idx = 0;
1202         } else
1203                 iter->flags |= FTRACE_ITER_CONT;
1204
1205
1206         file->f_pos += read;
1207
1208         ret = read;
1209  out:
1210         mutex_unlock(&ftrace_regex_lock);
1211
1212         return ret;
1213 }
1214
1215 static ssize_t
1216 ftrace_filter_write(struct file *file, const char __user *ubuf,
1217                     size_t cnt, loff_t *ppos)
1218 {
1219         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1220 }
1221
1222 static ssize_t
1223 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1224                      size_t cnt, loff_t *ppos)
1225 {
1226         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1227 }
1228
1229 static void
1230 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1231 {
1232         if (unlikely(ftrace_disabled))
1233                 return;
1234
1235         mutex_lock(&ftrace_regex_lock);
1236         if (reset)
1237                 ftrace_filter_reset(enable);
1238         if (buf)
1239                 ftrace_match(buf, len, enable);
1240         mutex_unlock(&ftrace_regex_lock);
1241 }
1242
1243 /**
1244  * ftrace_set_filter - set a function to filter on in ftrace
1245  * @buf - the string that holds the function filter text.
1246  * @len - the length of the string.
1247  * @reset - non zero to reset all filters before applying this filter.
1248  *
1249  * Filters denote which functions should be enabled when tracing is enabled.
1250  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1251  */
1252 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1253 {
1254         ftrace_set_regex(buf, len, reset, 1);
1255 }
1256
1257 /**
1258  * ftrace_set_notrace - set a function to not trace in ftrace
1259  * @buf - the string that holds the function notrace text.
1260  * @len - the length of the string.
1261  * @reset - non zero to reset all filters before applying this filter.
1262  *
1263  * Notrace Filters denote which functions should not be enabled when tracing
1264  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1265  * for tracing.
1266  */
1267 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1268 {
1269         ftrace_set_regex(buf, len, reset, 0);
1270 }
1271
1272 static int
1273 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1274 {
1275         struct seq_file *m = (struct seq_file *)file->private_data;
1276         struct ftrace_iterator *iter;
1277
1278         mutex_lock(&ftrace_regex_lock);
1279         if (file->f_mode & FMODE_READ) {
1280                 iter = m->private;
1281
1282                 seq_release(inode, file);
1283         } else
1284                 iter = file->private_data;
1285
1286         if (iter->buffer_idx) {
1287                 iter->filtered++;
1288                 iter->buffer[iter->buffer_idx] = 0;
1289                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1290         }
1291
1292         mutex_lock(&ftrace_sysctl_lock);
1293         mutex_lock(&ftrace_start_lock);
1294         if (ftrace_start_up && ftrace_enabled)
1295                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1296         mutex_unlock(&ftrace_start_lock);
1297         mutex_unlock(&ftrace_sysctl_lock);
1298
1299         kfree(iter);
1300         mutex_unlock(&ftrace_regex_lock);
1301         return 0;
1302 }
1303
1304 static int
1305 ftrace_filter_release(struct inode *inode, struct file *file)
1306 {
1307         return ftrace_regex_release(inode, file, 1);
1308 }
1309
1310 static int
1311 ftrace_notrace_release(struct inode *inode, struct file *file)
1312 {
1313         return ftrace_regex_release(inode, file, 0);
1314 }
1315
1316 static struct file_operations ftrace_avail_fops = {
1317         .open = ftrace_avail_open,
1318         .read = seq_read,
1319         .llseek = seq_lseek,
1320         .release = ftrace_avail_release,
1321 };
1322
1323 static struct file_operations ftrace_failures_fops = {
1324         .open = ftrace_failures_open,
1325         .read = seq_read,
1326         .llseek = seq_lseek,
1327         .release = ftrace_avail_release,
1328 };
1329
1330 static struct file_operations ftrace_filter_fops = {
1331         .open = ftrace_filter_open,
1332         .read = ftrace_regex_read,
1333         .write = ftrace_filter_write,
1334         .llseek = ftrace_regex_lseek,
1335         .release = ftrace_filter_release,
1336 };
1337
1338 static struct file_operations ftrace_notrace_fops = {
1339         .open = ftrace_notrace_open,
1340         .read = ftrace_regex_read,
1341         .write = ftrace_notrace_write,
1342         .llseek = ftrace_regex_lseek,
1343         .release = ftrace_notrace_release,
1344 };
1345
1346 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1347
1348 static DEFINE_MUTEX(graph_lock);
1349
1350 int ftrace_graph_count;
1351 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1352
1353 static void *
1354 g_next(struct seq_file *m, void *v, loff_t *pos)
1355 {
1356         unsigned long *array = m->private;
1357         int index = *pos;
1358
1359         (*pos)++;
1360
1361         if (index >= ftrace_graph_count)
1362                 return NULL;
1363
1364         return &array[index];
1365 }
1366
1367 static void *g_start(struct seq_file *m, loff_t *pos)
1368 {
1369         void *p = NULL;
1370
1371         mutex_lock(&graph_lock);
1372
1373         p = g_next(m, p, pos);
1374
1375         return p;
1376 }
1377
1378 static void g_stop(struct seq_file *m, void *p)
1379 {
1380         mutex_unlock(&graph_lock);
1381 }
1382
1383 static int g_show(struct seq_file *m, void *v)
1384 {
1385         unsigned long *ptr = v;
1386         char str[KSYM_SYMBOL_LEN];
1387
1388         if (!ptr)
1389                 return 0;
1390
1391         kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1392
1393         seq_printf(m, "%s\n", str);
1394
1395         return 0;
1396 }
1397
1398 static struct seq_operations ftrace_graph_seq_ops = {
1399         .start = g_start,
1400         .next = g_next,
1401         .stop = g_stop,
1402         .show = g_show,
1403 };
1404
1405 static int
1406 ftrace_graph_open(struct inode *inode, struct file *file)
1407 {
1408         int ret = 0;
1409
1410         if (unlikely(ftrace_disabled))
1411                 return -ENODEV;
1412
1413         mutex_lock(&graph_lock);
1414         if ((file->f_mode & FMODE_WRITE) &&
1415             !(file->f_flags & O_APPEND)) {
1416                 ftrace_graph_count = 0;
1417                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1418         }
1419
1420         if (file->f_mode & FMODE_READ) {
1421                 ret = seq_open(file, &ftrace_graph_seq_ops);
1422                 if (!ret) {
1423                         struct seq_file *m = file->private_data;
1424                         m->private = ftrace_graph_funcs;
1425                 }
1426         } else
1427                 file->private_data = ftrace_graph_funcs;
1428         mutex_unlock(&graph_lock);
1429
1430         return ret;
1431 }
1432
1433 static ssize_t
1434 ftrace_graph_read(struct file *file, char __user *ubuf,
1435                        size_t cnt, loff_t *ppos)
1436 {
1437         if (file->f_mode & FMODE_READ)
1438                 return seq_read(file, ubuf, cnt, ppos);
1439         else
1440                 return -EPERM;
1441 }
1442
1443 static int
1444 ftrace_set_func(unsigned long *array, int idx, char *buffer)
1445 {
1446         char str[KSYM_SYMBOL_LEN];
1447         struct dyn_ftrace *rec;
1448         struct ftrace_page *pg;
1449         int found = 0;
1450         int j;
1451
1452         if (ftrace_disabled)
1453                 return -ENODEV;
1454
1455         /* should not be called from interrupt context */
1456         spin_lock(&ftrace_lock);
1457
1458         do_for_each_ftrace_rec(pg, rec) {
1459
1460                 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1461                         continue;
1462
1463                 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1464                 if (strcmp(str, buffer) == 0) {
1465                         /* Return 1 if we add it to the array */
1466                         found = 1;
1467                         for (j = 0; j < idx; j++)
1468                                 if (array[j] == rec->ip) {
1469                                         found = 0;
1470                                         break;
1471                                 }
1472                         if (found)
1473                                 array[idx] = rec->ip;
1474                         goto out;
1475                 }
1476         } while_for_each_ftrace_rec();
1477  out:
1478         spin_unlock(&ftrace_lock);
1479
1480         return found ? 0 : -EINVAL;
1481 }
1482
1483 static ssize_t
1484 ftrace_graph_write(struct file *file, const char __user *ubuf,
1485                    size_t cnt, loff_t *ppos)
1486 {
1487         unsigned char buffer[FTRACE_BUFF_MAX+1];
1488         unsigned long *array;
1489         size_t read = 0;
1490         ssize_t ret;
1491         int index = 0;
1492         char ch;
1493
1494         if (!cnt || cnt < 0)
1495                 return 0;
1496
1497         mutex_lock(&graph_lock);
1498
1499         if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1500                 ret = -EBUSY;
1501                 goto out;
1502         }
1503
1504         if (file->f_mode & FMODE_READ) {
1505                 struct seq_file *m = file->private_data;
1506                 array = m->private;
1507         } else
1508                 array = file->private_data;
1509
1510         ret = get_user(ch, ubuf++);
1511         if (ret)
1512                 goto out;
1513         read++;
1514         cnt--;
1515
1516         /* skip white space */
1517         while (cnt && isspace(ch)) {
1518                 ret = get_user(ch, ubuf++);
1519                 if (ret)
1520                         goto out;
1521                 read++;
1522                 cnt--;
1523         }
1524
1525         if (isspace(ch)) {
1526                 *ppos += read;
1527                 ret = read;
1528                 goto out;
1529         }
1530
1531         while (cnt && !isspace(ch)) {
1532                 if (index < FTRACE_BUFF_MAX)
1533                         buffer[index++] = ch;
1534                 else {
1535                         ret = -EINVAL;
1536                         goto out;
1537                 }
1538                 ret = get_user(ch, ubuf++);
1539                 if (ret)
1540                         goto out;
1541                 read++;
1542                 cnt--;
1543         }
1544         buffer[index] = 0;
1545
1546         /* we allow only one at a time */
1547         ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1548         if (ret)
1549                 goto out;
1550
1551         ftrace_graph_count++;
1552
1553         file->f_pos += read;
1554
1555         ret = read;
1556  out:
1557         mutex_unlock(&graph_lock);
1558
1559         return ret;
1560 }
1561
1562 static const struct file_operations ftrace_graph_fops = {
1563         .open = ftrace_graph_open,
1564         .read = ftrace_graph_read,
1565         .write = ftrace_graph_write,
1566 };
1567 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1568
1569 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1570 {
1571         struct dentry *entry;
1572
1573         entry = debugfs_create_file("available_filter_functions", 0444,
1574                                     d_tracer, NULL, &ftrace_avail_fops);
1575         if (!entry)
1576                 pr_warning("Could not create debugfs "
1577                            "'available_filter_functions' entry\n");
1578
1579         entry = debugfs_create_file("failures", 0444,
1580                                     d_tracer, NULL, &ftrace_failures_fops);
1581         if (!entry)
1582                 pr_warning("Could not create debugfs 'failures' entry\n");
1583
1584         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1585                                     NULL, &ftrace_filter_fops);
1586         if (!entry)
1587                 pr_warning("Could not create debugfs "
1588                            "'set_ftrace_filter' entry\n");
1589
1590         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1591                                     NULL, &ftrace_notrace_fops);
1592         if (!entry)
1593                 pr_warning("Could not create debugfs "
1594                            "'set_ftrace_notrace' entry\n");
1595
1596 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1597         entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
1598                                     NULL,
1599                                     &ftrace_graph_fops);
1600         if (!entry)
1601                 pr_warning("Could not create debugfs "
1602                            "'set_graph_function' entry\n");
1603 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1604
1605         return 0;
1606 }
1607
1608 static int ftrace_convert_nops(struct module *mod,
1609                                unsigned long *start,
1610                                unsigned long *end)
1611 {
1612         unsigned long *p;
1613         unsigned long addr;
1614         unsigned long flags;
1615
1616         mutex_lock(&ftrace_start_lock);
1617         p = start;
1618         while (p < end) {
1619                 addr = ftrace_call_adjust(*p++);
1620                 /*
1621                  * Some architecture linkers will pad between
1622                  * the different mcount_loc sections of different
1623                  * object files to satisfy alignments.
1624                  * Skip any NULL pointers.
1625                  */
1626                 if (!addr)
1627                         continue;
1628                 ftrace_record_ip(addr);
1629         }
1630
1631         /* disable interrupts to prevent kstop machine */
1632         local_irq_save(flags);
1633         ftrace_update_code(mod);
1634         local_irq_restore(flags);
1635         mutex_unlock(&ftrace_start_lock);
1636
1637         return 0;
1638 }
1639
1640 void ftrace_init_module(struct module *mod,
1641                         unsigned long *start, unsigned long *end)
1642 {
1643         if (ftrace_disabled || start == end)
1644                 return;
1645         ftrace_convert_nops(mod, start, end);
1646 }
1647
1648 extern unsigned long __start_mcount_loc[];
1649 extern unsigned long __stop_mcount_loc[];
1650
1651 void __init ftrace_init(void)
1652 {
1653         unsigned long count, addr, flags;
1654         int ret;
1655
1656         /* Keep the ftrace pointer to the stub */
1657         addr = (unsigned long)ftrace_stub;
1658
1659         local_irq_save(flags);
1660         ftrace_dyn_arch_init(&addr);
1661         local_irq_restore(flags);
1662
1663         /* ftrace_dyn_arch_init places the return code in addr */
1664         if (addr)
1665                 goto failed;
1666
1667         count = __stop_mcount_loc - __start_mcount_loc;
1668
1669         ret = ftrace_dyn_table_alloc(count);
1670         if (ret)
1671                 goto failed;
1672
1673         last_ftrace_enabled = ftrace_enabled = 1;
1674
1675         ret = ftrace_convert_nops(NULL,
1676                                   __start_mcount_loc,
1677                                   __stop_mcount_loc);
1678
1679         return;
1680  failed:
1681         ftrace_disabled = 1;
1682 }
1683
1684 #else
1685
1686 static int __init ftrace_nodyn_init(void)
1687 {
1688         ftrace_enabled = 1;
1689         return 0;
1690 }
1691 device_initcall(ftrace_nodyn_init);
1692
1693 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1694 static inline void ftrace_startup_enable(int command) { }
1695 /* Keep as macros so we do not need to define the commands */
1696 # define ftrace_startup(command)        do { } while (0)
1697 # define ftrace_shutdown(command)       do { } while (0)
1698 # define ftrace_startup_sysctl()        do { } while (0)
1699 # define ftrace_shutdown_sysctl()       do { } while (0)
1700 #endif /* CONFIG_DYNAMIC_FTRACE */
1701
1702 static ssize_t
1703 ftrace_pid_read(struct file *file, char __user *ubuf,
1704                        size_t cnt, loff_t *ppos)
1705 {
1706         char buf[64];
1707         int r;
1708
1709         if (ftrace_pid_trace == ftrace_swapper_pid)
1710                 r = sprintf(buf, "swapper tasks\n");
1711         else if (ftrace_pid_trace)
1712                 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
1713         else
1714                 r = sprintf(buf, "no pid\n");
1715
1716         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1717 }
1718
1719 static void clear_ftrace_swapper(void)
1720 {
1721         struct task_struct *p;
1722         int cpu;
1723
1724         get_online_cpus();
1725         for_each_online_cpu(cpu) {
1726                 p = idle_task(cpu);
1727                 clear_tsk_trace_trace(p);
1728         }
1729         put_online_cpus();
1730 }
1731
1732 static void set_ftrace_swapper(void)
1733 {
1734         struct task_struct *p;
1735         int cpu;
1736
1737         get_online_cpus();
1738         for_each_online_cpu(cpu) {
1739                 p = idle_task(cpu);
1740                 set_tsk_trace_trace(p);
1741         }
1742         put_online_cpus();
1743 }
1744
1745 static void clear_ftrace_pid(struct pid *pid)
1746 {
1747         struct task_struct *p;
1748
1749         rcu_read_lock();
1750         do_each_pid_task(pid, PIDTYPE_PID, p) {
1751                 clear_tsk_trace_trace(p);
1752         } while_each_pid_task(pid, PIDTYPE_PID, p);
1753         rcu_read_unlock();
1754
1755         put_pid(pid);
1756 }
1757
1758 static void set_ftrace_pid(struct pid *pid)
1759 {
1760         struct task_struct *p;
1761
1762         rcu_read_lock();
1763         do_each_pid_task(pid, PIDTYPE_PID, p) {
1764                 set_tsk_trace_trace(p);
1765         } while_each_pid_task(pid, PIDTYPE_PID, p);
1766         rcu_read_unlock();
1767 }
1768
1769 static void clear_ftrace_pid_task(struct pid **pid)
1770 {
1771         if (*pid == ftrace_swapper_pid)
1772                 clear_ftrace_swapper();
1773         else
1774                 clear_ftrace_pid(*pid);
1775
1776         *pid = NULL;
1777 }
1778
1779 static void set_ftrace_pid_task(struct pid *pid)
1780 {
1781         if (pid == ftrace_swapper_pid)
1782                 set_ftrace_swapper();
1783         else
1784                 set_ftrace_pid(pid);
1785 }
1786
1787 static ssize_t
1788 ftrace_pid_write(struct file *filp, const char __user *ubuf,
1789                    size_t cnt, loff_t *ppos)
1790 {
1791         struct pid *pid;
1792         char buf[64];
1793         long val;
1794         int ret;
1795
1796         if (cnt >= sizeof(buf))
1797                 return -EINVAL;
1798
1799         if (copy_from_user(&buf, ubuf, cnt))
1800                 return -EFAULT;
1801
1802         buf[cnt] = 0;
1803
1804         ret = strict_strtol(buf, 10, &val);
1805         if (ret < 0)
1806                 return ret;
1807
1808         mutex_lock(&ftrace_start_lock);
1809         if (val < 0) {
1810                 /* disable pid tracing */
1811                 if (!ftrace_pid_trace)
1812                         goto out;
1813
1814                 clear_ftrace_pid_task(&ftrace_pid_trace);
1815
1816         } else {
1817                 /* swapper task is special */
1818                 if (!val) {
1819                         pid = ftrace_swapper_pid;
1820                         if (pid == ftrace_pid_trace)
1821                                 goto out;
1822                 } else {
1823                         pid = find_get_pid(val);
1824
1825                         if (pid == ftrace_pid_trace) {
1826                                 put_pid(pid);
1827                                 goto out;
1828                         }
1829                 }
1830
1831                 if (ftrace_pid_trace)
1832                         clear_ftrace_pid_task(&ftrace_pid_trace);
1833
1834                 if (!pid)
1835                         goto out;
1836
1837                 ftrace_pid_trace = pid;
1838
1839                 set_ftrace_pid_task(ftrace_pid_trace);
1840         }
1841
1842         /* update the function call */
1843         ftrace_update_pid_func();
1844         ftrace_startup_enable(0);
1845
1846  out:
1847         mutex_unlock(&ftrace_start_lock);
1848
1849         return cnt;
1850 }
1851
1852 static struct file_operations ftrace_pid_fops = {
1853         .read = ftrace_pid_read,
1854         .write = ftrace_pid_write,
1855 };
1856
1857 static __init int ftrace_init_debugfs(void)
1858 {
1859         struct dentry *d_tracer;
1860         struct dentry *entry;
1861
1862         d_tracer = tracing_init_dentry();
1863         if (!d_tracer)
1864                 return 0;
1865
1866         ftrace_init_dyn_debugfs(d_tracer);
1867
1868         entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1869                                     NULL, &ftrace_pid_fops);
1870         if (!entry)
1871                 pr_warning("Could not create debugfs "
1872                            "'set_ftrace_pid' entry\n");
1873         return 0;
1874 }
1875
1876 fs_initcall(ftrace_init_debugfs);
1877
1878 /**
1879  * ftrace_kill - kill ftrace
1880  *
1881  * This function should be used by panic code. It stops ftrace
1882  * but in a not so nice way. If you need to simply kill ftrace
1883  * from a non-atomic section, use ftrace_kill.
1884  */
1885 void ftrace_kill(void)
1886 {
1887         ftrace_disabled = 1;
1888         ftrace_enabled = 0;
1889         clear_ftrace_function();
1890 }
1891
1892 /**
1893  * register_ftrace_function - register a function for profiling
1894  * @ops - ops structure that holds the function for profiling.
1895  *
1896  * Register a function to be called by all functions in the
1897  * kernel.
1898  *
1899  * Note: @ops->func and all the functions it calls must be labeled
1900  *       with "notrace", otherwise it will go into a
1901  *       recursive loop.
1902  */
1903 int register_ftrace_function(struct ftrace_ops *ops)
1904 {
1905         int ret;
1906
1907         if (unlikely(ftrace_disabled))
1908                 return -1;
1909
1910         mutex_lock(&ftrace_sysctl_lock);
1911
1912         ret = __register_ftrace_function(ops);
1913         ftrace_startup(0);
1914
1915         mutex_unlock(&ftrace_sysctl_lock);
1916         return ret;
1917 }
1918
1919 /**
1920  * unregister_ftrace_function - unregister a function for profiling.
1921  * @ops - ops structure that holds the function to unregister
1922  *
1923  * Unregister a function that was added to be called by ftrace profiling.
1924  */
1925 int unregister_ftrace_function(struct ftrace_ops *ops)
1926 {
1927         int ret;
1928
1929         mutex_lock(&ftrace_sysctl_lock);
1930         ret = __unregister_ftrace_function(ops);
1931         ftrace_shutdown(0);
1932         mutex_unlock(&ftrace_sysctl_lock);
1933
1934         return ret;
1935 }
1936
1937 int
1938 ftrace_enable_sysctl(struct ctl_table *table, int write,
1939                      struct file *file, void __user *buffer, size_t *lenp,
1940                      loff_t *ppos)
1941 {
1942         int ret;
1943
1944         if (unlikely(ftrace_disabled))
1945                 return -ENODEV;
1946
1947         mutex_lock(&ftrace_sysctl_lock);
1948
1949         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1950
1951         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1952                 goto out;
1953
1954         last_ftrace_enabled = ftrace_enabled;
1955
1956         if (ftrace_enabled) {
1957
1958                 ftrace_startup_sysctl();
1959
1960                 /* we are starting ftrace again */
1961                 if (ftrace_list != &ftrace_list_end) {
1962                         if (ftrace_list->next == &ftrace_list_end)
1963                                 ftrace_trace_function = ftrace_list->func;
1964                         else
1965                                 ftrace_trace_function = ftrace_list_func;
1966                 }
1967
1968         } else {
1969                 /* stopping ftrace calls (just send to ftrace_stub) */
1970                 ftrace_trace_function = ftrace_stub;
1971
1972                 ftrace_shutdown_sysctl();
1973         }
1974
1975  out:
1976         mutex_unlock(&ftrace_sysctl_lock);
1977         return ret;
1978 }
1979
1980 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1981
1982 static atomic_t ftrace_graph_active;
1983 static struct notifier_block ftrace_suspend_notifier;
1984
1985 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
1986 {
1987         return 0;
1988 }
1989
1990 /* The callbacks that hook a function */
1991 trace_func_graph_ret_t ftrace_graph_return =
1992                         (trace_func_graph_ret_t)ftrace_stub;
1993 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
1994
1995 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1996 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1997 {
1998         int i;
1999         int ret = 0;
2000         unsigned long flags;
2001         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
2002         struct task_struct *g, *t;
2003
2004         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
2005                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
2006                                         * sizeof(struct ftrace_ret_stack),
2007                                         GFP_KERNEL);
2008                 if (!ret_stack_list[i]) {
2009                         start = 0;
2010                         end = i;
2011                         ret = -ENOMEM;
2012                         goto free;
2013                 }
2014         }
2015
2016         read_lock_irqsave(&tasklist_lock, flags);
2017         do_each_thread(g, t) {
2018                 if (start == end) {
2019                         ret = -EAGAIN;
2020                         goto unlock;
2021                 }
2022
2023                 if (t->ret_stack == NULL) {
2024                         t->curr_ret_stack = -1;
2025                         /* Make sure IRQs see the -1 first: */
2026                         barrier();
2027                         t->ret_stack = ret_stack_list[start++];
2028                         atomic_set(&t->tracing_graph_pause, 0);
2029                         atomic_set(&t->trace_overrun, 0);
2030                 }
2031         } while_each_thread(g, t);
2032
2033 unlock:
2034         read_unlock_irqrestore(&tasklist_lock, flags);
2035 free:
2036         for (i = start; i < end; i++)
2037                 kfree(ret_stack_list[i]);
2038         return ret;
2039 }
2040
2041 /* Allocate a return stack for each task */
2042 static int start_graph_tracing(void)
2043 {
2044         struct ftrace_ret_stack **ret_stack_list;
2045         int ret;
2046
2047         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2048                                 sizeof(struct ftrace_ret_stack *),
2049                                 GFP_KERNEL);
2050
2051         if (!ret_stack_list)
2052                 return -ENOMEM;
2053
2054         do {
2055                 ret = alloc_retstack_tasklist(ret_stack_list);
2056         } while (ret == -EAGAIN);
2057
2058         kfree(ret_stack_list);
2059         return ret;
2060 }
2061
2062 /*
2063  * Hibernation protection.
2064  * The state of the current task is too much unstable during
2065  * suspend/restore to disk. We want to protect against that.
2066  */
2067 static int
2068 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2069                                                         void *unused)
2070 {
2071         switch (state) {
2072         case PM_HIBERNATION_PREPARE:
2073                 pause_graph_tracing();
2074                 break;
2075
2076         case PM_POST_HIBERNATION:
2077                 unpause_graph_tracing();
2078                 break;
2079         }
2080         return NOTIFY_DONE;
2081 }
2082
2083 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2084                         trace_func_graph_ent_t entryfunc)
2085 {
2086         int ret = 0;
2087
2088         mutex_lock(&ftrace_sysctl_lock);
2089
2090         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2091         register_pm_notifier(&ftrace_suspend_notifier);
2092
2093         atomic_inc(&ftrace_graph_active);
2094         ret = start_graph_tracing();
2095         if (ret) {
2096                 atomic_dec(&ftrace_graph_active);
2097                 goto out;
2098         }
2099
2100         ftrace_graph_return = retfunc;
2101         ftrace_graph_entry = entryfunc;
2102
2103         ftrace_startup(FTRACE_START_FUNC_RET);
2104
2105 out:
2106         mutex_unlock(&ftrace_sysctl_lock);
2107         return ret;
2108 }
2109
2110 void unregister_ftrace_graph(void)
2111 {
2112         mutex_lock(&ftrace_sysctl_lock);
2113
2114         atomic_dec(&ftrace_graph_active);
2115         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2116         ftrace_graph_entry = ftrace_graph_entry_stub;
2117         ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2118         unregister_pm_notifier(&ftrace_suspend_notifier);
2119
2120         mutex_unlock(&ftrace_sysctl_lock);
2121 }
2122
2123 /* Allocate a return stack for newly created task */
2124 void ftrace_graph_init_task(struct task_struct *t)
2125 {
2126         if (atomic_read(&ftrace_graph_active)) {
2127                 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2128                                 * sizeof(struct ftrace_ret_stack),
2129                                 GFP_KERNEL);
2130                 if (!t->ret_stack)
2131                         return;
2132                 t->curr_ret_stack = -1;
2133                 atomic_set(&t->tracing_graph_pause, 0);
2134                 atomic_set(&t->trace_overrun, 0);
2135         } else
2136                 t->ret_stack = NULL;
2137 }
2138
2139 void ftrace_graph_exit_task(struct task_struct *t)
2140 {
2141         struct ftrace_ret_stack *ret_stack = t->ret_stack;
2142
2143         t->ret_stack = NULL;
2144         /* NULL must become visible to IRQs before we free it: */
2145         barrier();
2146
2147         kfree(ret_stack);
2148 }
2149
2150 void ftrace_graph_stop(void)
2151 {
2152         ftrace_stop();
2153 }
2154 #endif
2155