ftrace: Remove unnecessary disabling of irqs
[linux-2.6-block.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f 19#include <linux/seq_file.h>
4a2b8dda 20#include <linux/suspend.h>
5072c59f 21#include <linux/debugfs.h>
3d083395 22#include <linux/hardirq.h>
2d8b820b 23#include <linux/kthread.h>
5072c59f 24#include <linux/uaccess.h>
2d8b820b 25#include <linux/ftrace.h>
b0fc494f 26#include <linux/sysctl.h>
5a0e3ad6 27#include <linux/slab.h>
5072c59f 28#include <linux/ctype.h>
3d083395 29#include <linux/list.h>
59df055f 30#include <linux/hash.h>
3f379b03 31#include <linux/rcupdate.h>
3d083395 32
ad8d75ff 33#include <trace/events/sched.h>
8aef2d28 34
395a59d0 35#include <asm/ftrace.h>
2af15d6a 36#include <asm/setup.h>
395a59d0 37
0706f1c4 38#include "trace_output.h"
bac429f0 39#include "trace_stat.h"
16444a8a 40
6912896e 41#define FTRACE_WARN_ON(cond) \
0778d9ad
SR
42 ({ \
43 int ___r = cond; \
44 if (WARN_ON(___r)) \
6912896e 45 ftrace_kill(); \
0778d9ad
SR
46 ___r; \
47 })
6912896e
SR
48
49#define FTRACE_WARN_ON_ONCE(cond) \
0778d9ad
SR
50 ({ \
51 int ___r = cond; \
52 if (WARN_ON_ONCE(___r)) \
6912896e 53 ftrace_kill(); \
0778d9ad
SR
54 ___r; \
55 })
6912896e 56
8fc0c701
SR
57/* hash bits for specific function selection */
58#define FTRACE_HASH_BITS 7
59#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
60
4eebcc81
SR
61/* ftrace_enabled is a method to turn ftrace on or off */
62int ftrace_enabled __read_mostly;
d61f82d0 63static int last_ftrace_enabled;
b0fc494f 64
60a7ecf4
SR
65/* Quick disabling of function tracer. */
66int function_trace_stop;
67
756d17ee 68/* List for set_ftrace_pid's pids. */
69LIST_HEAD(ftrace_pids);
70struct ftrace_pid {
71 struct list_head list;
72 struct pid *pid;
73};
74
4eebcc81
SR
75/*
76 * ftrace_disabled is set when an anomaly is discovered.
77 * ftrace_disabled is much stronger than ftrace_enabled.
78 */
79static int ftrace_disabled __read_mostly;
80
52baf119 81static DEFINE_MUTEX(ftrace_lock);
b0fc494f 82
16444a8a
ACM
83static struct ftrace_ops ftrace_list_end __read_mostly =
84{
fb9fb015 85 .func = ftrace_stub,
16444a8a
ACM
86};
87
88static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
89ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
60a7ecf4 90ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
df4fc315 91ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
16444a8a 92
3f379b03
PM
93/*
94 * Traverse the ftrace_list, invoking all entries. The reason that we
95 * can use rcu_dereference_raw() is that elements removed from this list
96 * are simply leaked, so there is no need to interact with a grace-period
97 * mechanism. The rcu_dereference_raw() calls are needed to handle
98 * concurrent insertions into the ftrace_list.
99 *
100 * Silly Alpha and silly pointer-speculation compiler optimizations!
101 */
f2252935 102static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a 103{
3f379b03 104 struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/
16444a8a
ACM
105
106 while (op != &ftrace_list_end) {
16444a8a 107 op->func(ip, parent_ip);
3f379b03 108 op = rcu_dereference_raw(op->next); /*see above*/
16444a8a
ACM
109 };
110}
111
df4fc315
SR
112static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
113{
0ef8cde5 114 if (!test_tsk_trace_trace(current))
df4fc315
SR
115 return;
116
117 ftrace_pid_function(ip, parent_ip);
118}
119
120static void set_ftrace_pid_function(ftrace_func_t func)
121{
122 /* do not set ftrace_pid_function to itself! */
123 if (func != ftrace_pid_func)
124 ftrace_pid_function = func;
125}
126
16444a8a 127/**
3d083395 128 * clear_ftrace_function - reset the ftrace function
16444a8a 129 *
3d083395
SR
130 * This NULLs the ftrace function and in essence stops
131 * tracing. There may be lag
16444a8a 132 */
3d083395 133void clear_ftrace_function(void)
16444a8a 134{
3d083395 135 ftrace_trace_function = ftrace_stub;
60a7ecf4 136 __ftrace_trace_function = ftrace_stub;
df4fc315 137 ftrace_pid_function = ftrace_stub;
3d083395
SR
138}
139
60a7ecf4
SR
140#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
141/*
142 * For those archs that do not test ftrace_trace_stop in their
143 * mcount call site, we need to do it from C.
144 */
145static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
146{
147 if (function_trace_stop)
148 return;
149
150 __ftrace_trace_function(ip, parent_ip);
151}
152#endif
153
e309b41d 154static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395 155{
16444a8a
ACM
156 ops->next = ftrace_list;
157 /*
158 * We are entering ops into the ftrace_list but another
159 * CPU might be walking that list. We need to make sure
160 * the ops->next pointer is valid before another CPU sees
161 * the ops pointer included into the ftrace_list.
162 */
3f379b03 163 rcu_assign_pointer(ftrace_list, ops);
3d083395 164
b0fc494f 165 if (ftrace_enabled) {
df4fc315
SR
166 ftrace_func_t func;
167
168 if (ops->next == &ftrace_list_end)
169 func = ops->func;
170 else
171 func = ftrace_list_func;
172
756d17ee 173 if (!list_empty(&ftrace_pids)) {
df4fc315
SR
174 set_ftrace_pid_function(func);
175 func = ftrace_pid_func;
176 }
177
b0fc494f
SR
178 /*
179 * For one func, simply call it directly.
180 * For more than one func, call the chain.
181 */
60a7ecf4 182#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
df4fc315 183 ftrace_trace_function = func;
60a7ecf4 184#else
df4fc315 185 __ftrace_trace_function = func;
60a7ecf4
SR
186 ftrace_trace_function = ftrace_test_stop_func;
187#endif
b0fc494f 188 }
3d083395 189
16444a8a
ACM
190 return 0;
191}
192
e309b41d 193static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 194{
16444a8a 195 struct ftrace_ops **p;
16444a8a
ACM
196
197 /*
3d083395
SR
198 * If we are removing the last function, then simply point
199 * to the ftrace_stub.
16444a8a
ACM
200 */
201 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
202 ftrace_trace_function = ftrace_stub;
203 ftrace_list = &ftrace_list_end;
e6ea44e9 204 return 0;
16444a8a
ACM
205 }
206
207 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
208 if (*p == ops)
209 break;
210
e6ea44e9
SR
211 if (*p != ops)
212 return -1;
16444a8a
ACM
213
214 *p = (*p)->next;
215
b0fc494f
SR
216 if (ftrace_enabled) {
217 /* If we only have one func left, then call that directly */
df4fc315
SR
218 if (ftrace_list->next == &ftrace_list_end) {
219 ftrace_func_t func = ftrace_list->func;
220
756d17ee 221 if (!list_empty(&ftrace_pids)) {
df4fc315
SR
222 set_ftrace_pid_function(func);
223 func = ftrace_pid_func;
224 }
225#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
226 ftrace_trace_function = func;
227#else
228 __ftrace_trace_function = func;
229#endif
230 }
b0fc494f 231 }
16444a8a 232
e6ea44e9 233 return 0;
3d083395
SR
234}
235
df4fc315
SR
236static void ftrace_update_pid_func(void)
237{
238 ftrace_func_t func;
239
df4fc315 240 if (ftrace_trace_function == ftrace_stub)
10dd3ebe 241 return;
df4fc315 242
33974093 243#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
df4fc315 244 func = ftrace_trace_function;
33974093
MF
245#else
246 func = __ftrace_trace_function;
247#endif
df4fc315 248
756d17ee 249 if (!list_empty(&ftrace_pids)) {
df4fc315
SR
250 set_ftrace_pid_function(func);
251 func = ftrace_pid_func;
252 } else {
66eafebc
LW
253 if (func == ftrace_pid_func)
254 func = ftrace_pid_function;
df4fc315
SR
255 }
256
257#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
258 ftrace_trace_function = func;
259#else
260 __ftrace_trace_function = func;
261#endif
df4fc315
SR
262}
263
493762fc
SR
264#ifdef CONFIG_FUNCTION_PROFILER
265struct ftrace_profile {
266 struct hlist_node node;
267 unsigned long ip;
268 unsigned long counter;
0706f1c4
SR
269#ifdef CONFIG_FUNCTION_GRAPH_TRACER
270 unsigned long long time;
e330b3bc 271 unsigned long long time_squared;
0706f1c4 272#endif
8fc0c701
SR
273};
274
493762fc
SR
275struct ftrace_profile_page {
276 struct ftrace_profile_page *next;
277 unsigned long index;
278 struct ftrace_profile records[];
d61f82d0
SR
279};
280
cafb168a
SR
281struct ftrace_profile_stat {
282 atomic_t disabled;
283 struct hlist_head *hash;
284 struct ftrace_profile_page *pages;
285 struct ftrace_profile_page *start;
286 struct tracer_stat stat;
287};
288
493762fc
SR
289#define PROFILE_RECORDS_SIZE \
290 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
5072c59f 291
493762fc
SR
292#define PROFILES_PER_PAGE \
293 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
3d083395 294
fb9fb015
SR
295static int ftrace_profile_bits __read_mostly;
296static int ftrace_profile_enabled __read_mostly;
297
298/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
bac429f0
SR
299static DEFINE_MUTEX(ftrace_profile_lock);
300
cafb168a 301static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
493762fc
SR
302
303#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
304
bac429f0
SR
305static void *
306function_stat_next(void *v, int idx)
307{
493762fc
SR
308 struct ftrace_profile *rec = v;
309 struct ftrace_profile_page *pg;
bac429f0 310
493762fc 311 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
bac429f0
SR
312
313 again:
0296e425
LZ
314 if (idx != 0)
315 rec++;
316
bac429f0
SR
317 if ((void *)rec >= (void *)&pg->records[pg->index]) {
318 pg = pg->next;
319 if (!pg)
320 return NULL;
321 rec = &pg->records[0];
493762fc
SR
322 if (!rec->counter)
323 goto again;
bac429f0
SR
324 }
325
bac429f0
SR
326 return rec;
327}
328
329static void *function_stat_start(struct tracer_stat *trace)
330{
cafb168a
SR
331 struct ftrace_profile_stat *stat =
332 container_of(trace, struct ftrace_profile_stat, stat);
333
334 if (!stat || !stat->start)
335 return NULL;
336
337 return function_stat_next(&stat->start->records[0], 0);
bac429f0
SR
338}
339
0706f1c4
SR
340#ifdef CONFIG_FUNCTION_GRAPH_TRACER
341/* function graph compares on total time */
342static int function_stat_cmp(void *p1, void *p2)
343{
344 struct ftrace_profile *a = p1;
345 struct ftrace_profile *b = p2;
346
347 if (a->time < b->time)
348 return -1;
349 if (a->time > b->time)
350 return 1;
351 else
352 return 0;
353}
354#else
355/* not function graph compares against hits */
bac429f0
SR
356static int function_stat_cmp(void *p1, void *p2)
357{
493762fc
SR
358 struct ftrace_profile *a = p1;
359 struct ftrace_profile *b = p2;
bac429f0
SR
360
361 if (a->counter < b->counter)
362 return -1;
363 if (a->counter > b->counter)
364 return 1;
365 else
366 return 0;
367}
0706f1c4 368#endif
bac429f0
SR
369
370static int function_stat_headers(struct seq_file *m)
371{
0706f1c4 372#ifdef CONFIG_FUNCTION_GRAPH_TRACER
34886c8b 373 seq_printf(m, " Function "
e330b3bc 374 "Hit Time Avg s^2\n"
34886c8b 375 " -------- "
e330b3bc 376 "--- ---- --- ---\n");
0706f1c4 377#else
bac429f0
SR
378 seq_printf(m, " Function Hit\n"
379 " -------- ---\n");
0706f1c4 380#endif
bac429f0
SR
381 return 0;
382}
383
384static int function_stat_show(struct seq_file *m, void *v)
385{
493762fc 386 struct ftrace_profile *rec = v;
bac429f0 387 char str[KSYM_SYMBOL_LEN];
3aaba20f 388 int ret = 0;
0706f1c4 389#ifdef CONFIG_FUNCTION_GRAPH_TRACER
34886c8b
SR
390 static struct trace_seq s;
391 unsigned long long avg;
e330b3bc 392 unsigned long long stddev;
0706f1c4 393#endif
3aaba20f
LZ
394 mutex_lock(&ftrace_profile_lock);
395
396 /* we raced with function_profile_reset() */
397 if (unlikely(rec->counter == 0)) {
398 ret = -EBUSY;
399 goto out;
400 }
bac429f0
SR
401
402 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
0706f1c4
SR
403 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
404
405#ifdef CONFIG_FUNCTION_GRAPH_TRACER
406 seq_printf(m, " ");
34886c8b
SR
407 avg = rec->time;
408 do_div(avg, rec->counter);
409
e330b3bc
CD
410 /* Sample standard deviation (s^2) */
411 if (rec->counter <= 1)
412 stddev = 0;
413 else {
414 stddev = rec->time_squared - rec->counter * avg * avg;
415 /*
416 * Divide only 1000 for ns^2 -> us^2 conversion.
417 * trace_print_graph_duration will divide 1000 again.
418 */
419 do_div(stddev, (rec->counter - 1) * 1000);
420 }
421
34886c8b
SR
422 trace_seq_init(&s);
423 trace_print_graph_duration(rec->time, &s);
424 trace_seq_puts(&s, " ");
425 trace_print_graph_duration(avg, &s);
e330b3bc
CD
426 trace_seq_puts(&s, " ");
427 trace_print_graph_duration(stddev, &s);
0706f1c4 428 trace_print_seq(m, &s);
0706f1c4
SR
429#endif
430 seq_putc(m, '\n');
3aaba20f
LZ
431out:
432 mutex_unlock(&ftrace_profile_lock);
bac429f0 433
3aaba20f 434 return ret;
bac429f0
SR
435}
436
cafb168a 437static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
bac429f0 438{
493762fc 439 struct ftrace_profile_page *pg;
bac429f0 440
cafb168a 441 pg = stat->pages = stat->start;
bac429f0 442
493762fc
SR
443 while (pg) {
444 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
445 pg->index = 0;
446 pg = pg->next;
bac429f0
SR
447 }
448
cafb168a 449 memset(stat->hash, 0,
493762fc
SR
450 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
451}
bac429f0 452
cafb168a 453int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
493762fc
SR
454{
455 struct ftrace_profile_page *pg;
318e0a73
SR
456 int functions;
457 int pages;
493762fc 458 int i;
bac429f0 459
493762fc 460 /* If we already allocated, do nothing */
cafb168a 461 if (stat->pages)
493762fc 462 return 0;
bac429f0 463
cafb168a
SR
464 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
465 if (!stat->pages)
493762fc 466 return -ENOMEM;
bac429f0 467
318e0a73
SR
468#ifdef CONFIG_DYNAMIC_FTRACE
469 functions = ftrace_update_tot_cnt;
470#else
471 /*
472 * We do not know the number of functions that exist because
473 * dynamic tracing is what counts them. With past experience
474 * we have around 20K functions. That should be more than enough.
475 * It is highly unlikely we will execute every function in
476 * the kernel.
477 */
478 functions = 20000;
479#endif
480
cafb168a 481 pg = stat->start = stat->pages;
bac429f0 482
318e0a73
SR
483 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
484
485 for (i = 0; i < pages; i++) {
493762fc 486 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
493762fc 487 if (!pg->next)
318e0a73 488 goto out_free;
493762fc
SR
489 pg = pg->next;
490 }
491
492 return 0;
318e0a73
SR
493
494 out_free:
495 pg = stat->start;
496 while (pg) {
497 unsigned long tmp = (unsigned long)pg;
498
499 pg = pg->next;
500 free_page(tmp);
501 }
502
503 free_page((unsigned long)stat->pages);
504 stat->pages = NULL;
505 stat->start = NULL;
506
507 return -ENOMEM;
bac429f0
SR
508}
509
cafb168a 510static int ftrace_profile_init_cpu(int cpu)
bac429f0 511{
cafb168a 512 struct ftrace_profile_stat *stat;
493762fc 513 int size;
bac429f0 514
cafb168a
SR
515 stat = &per_cpu(ftrace_profile_stats, cpu);
516
517 if (stat->hash) {
493762fc 518 /* If the profile is already created, simply reset it */
cafb168a 519 ftrace_profile_reset(stat);
493762fc
SR
520 return 0;
521 }
bac429f0 522
493762fc
SR
523 /*
524 * We are profiling all functions, but usually only a few thousand
525 * functions are hit. We'll make a hash of 1024 items.
526 */
527 size = FTRACE_PROFILE_HASH_SIZE;
bac429f0 528
cafb168a 529 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
493762fc 530
cafb168a 531 if (!stat->hash)
493762fc
SR
532 return -ENOMEM;
533
cafb168a
SR
534 if (!ftrace_profile_bits) {
535 size--;
493762fc 536
cafb168a
SR
537 for (; size; size >>= 1)
538 ftrace_profile_bits++;
539 }
493762fc 540
318e0a73 541 /* Preallocate the function profiling pages */
cafb168a
SR
542 if (ftrace_profile_pages_init(stat) < 0) {
543 kfree(stat->hash);
544 stat->hash = NULL;
493762fc
SR
545 return -ENOMEM;
546 }
547
548 return 0;
bac429f0
SR
549}
550
cafb168a
SR
551static int ftrace_profile_init(void)
552{
553 int cpu;
554 int ret = 0;
555
556 for_each_online_cpu(cpu) {
557 ret = ftrace_profile_init_cpu(cpu);
558 if (ret)
559 break;
560 }
561
562 return ret;
563}
564
493762fc 565/* interrupts must be disabled */
cafb168a
SR
566static struct ftrace_profile *
567ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
bac429f0 568{
493762fc 569 struct ftrace_profile *rec;
bac429f0
SR
570 struct hlist_head *hhd;
571 struct hlist_node *n;
bac429f0
SR
572 unsigned long key;
573
bac429f0 574 key = hash_long(ip, ftrace_profile_bits);
cafb168a 575 hhd = &stat->hash[key];
bac429f0
SR
576
577 if (hlist_empty(hhd))
578 return NULL;
579
bac429f0
SR
580 hlist_for_each_entry_rcu(rec, n, hhd, node) {
581 if (rec->ip == ip)
493762fc
SR
582 return rec;
583 }
584
585 return NULL;
586}
587
cafb168a
SR
588static void ftrace_add_profile(struct ftrace_profile_stat *stat,
589 struct ftrace_profile *rec)
493762fc
SR
590{
591 unsigned long key;
592
593 key = hash_long(rec->ip, ftrace_profile_bits);
cafb168a 594 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
493762fc
SR
595}
596
318e0a73
SR
597/*
598 * The memory is already allocated, this simply finds a new record to use.
599 */
493762fc 600static struct ftrace_profile *
318e0a73 601ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
493762fc
SR
602{
603 struct ftrace_profile *rec = NULL;
604
318e0a73 605 /* prevent recursion (from NMIs) */
cafb168a 606 if (atomic_inc_return(&stat->disabled) != 1)
493762fc
SR
607 goto out;
608
493762fc 609 /*
318e0a73
SR
610 * Try to find the function again since an NMI
611 * could have added it
493762fc 612 */
cafb168a 613 rec = ftrace_find_profiled_func(stat, ip);
493762fc 614 if (rec)
cafb168a 615 goto out;
493762fc 616
cafb168a
SR
617 if (stat->pages->index == PROFILES_PER_PAGE) {
618 if (!stat->pages->next)
619 goto out;
620 stat->pages = stat->pages->next;
bac429f0 621 }
493762fc 622
cafb168a 623 rec = &stat->pages->records[stat->pages->index++];
493762fc 624 rec->ip = ip;
cafb168a 625 ftrace_add_profile(stat, rec);
493762fc 626
bac429f0 627 out:
cafb168a 628 atomic_dec(&stat->disabled);
bac429f0
SR
629
630 return rec;
631}
632
633static void
634function_profile_call(unsigned long ip, unsigned long parent_ip)
635{
cafb168a 636 struct ftrace_profile_stat *stat;
493762fc 637 struct ftrace_profile *rec;
bac429f0
SR
638 unsigned long flags;
639
640 if (!ftrace_profile_enabled)
641 return;
642
643 local_irq_save(flags);
cafb168a
SR
644
645 stat = &__get_cpu_var(ftrace_profile_stats);
0f6ce3de 646 if (!stat->hash || !ftrace_profile_enabled)
cafb168a
SR
647 goto out;
648
649 rec = ftrace_find_profiled_func(stat, ip);
493762fc 650 if (!rec) {
318e0a73 651 rec = ftrace_profile_alloc(stat, ip);
493762fc
SR
652 if (!rec)
653 goto out;
654 }
bac429f0
SR
655
656 rec->counter++;
657 out:
658 local_irq_restore(flags);
659}
660
0706f1c4
SR
661#ifdef CONFIG_FUNCTION_GRAPH_TRACER
662static int profile_graph_entry(struct ftrace_graph_ent *trace)
663{
664 function_profile_call(trace->func, 0);
665 return 1;
666}
667
668static void profile_graph_return(struct ftrace_graph_ret *trace)
669{
cafb168a 670 struct ftrace_profile_stat *stat;
a2a16d6a 671 unsigned long long calltime;
0706f1c4 672 struct ftrace_profile *rec;
cafb168a 673 unsigned long flags;
0706f1c4
SR
674
675 local_irq_save(flags);
cafb168a 676 stat = &__get_cpu_var(ftrace_profile_stats);
0f6ce3de 677 if (!stat->hash || !ftrace_profile_enabled)
cafb168a
SR
678 goto out;
679
37e44bc5
SR
680 /* If the calltime was zero'd ignore it */
681 if (!trace->calltime)
682 goto out;
683
a2a16d6a
SR
684 calltime = trace->rettime - trace->calltime;
685
686 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
687 int index;
688
689 index = trace->depth;
690
691 /* Append this call time to the parent time to subtract */
692 if (index)
693 current->ret_stack[index - 1].subtime += calltime;
694
695 if (current->ret_stack[index].subtime < calltime)
696 calltime -= current->ret_stack[index].subtime;
697 else
698 calltime = 0;
699 }
700
cafb168a 701 rec = ftrace_find_profiled_func(stat, trace->func);
e330b3bc 702 if (rec) {
a2a16d6a 703 rec->time += calltime;
e330b3bc
CD
704 rec->time_squared += calltime * calltime;
705 }
a2a16d6a 706
cafb168a 707 out:
0706f1c4
SR
708 local_irq_restore(flags);
709}
710
711static int register_ftrace_profiler(void)
712{
713 return register_ftrace_graph(&profile_graph_return,
714 &profile_graph_entry);
715}
716
717static void unregister_ftrace_profiler(void)
718{
719 unregister_ftrace_graph();
720}
721#else
bac429f0
SR
722static struct ftrace_ops ftrace_profile_ops __read_mostly =
723{
fb9fb015 724 .func = function_profile_call,
bac429f0
SR
725};
726
0706f1c4
SR
727static int register_ftrace_profiler(void)
728{
729 return register_ftrace_function(&ftrace_profile_ops);
730}
731
732static void unregister_ftrace_profiler(void)
733{
734 unregister_ftrace_function(&ftrace_profile_ops);
735}
736#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
737
bac429f0
SR
738static ssize_t
739ftrace_profile_write(struct file *filp, const char __user *ubuf,
740 size_t cnt, loff_t *ppos)
741{
742 unsigned long val;
fb9fb015 743 char buf[64]; /* big enough to hold a number */
bac429f0
SR
744 int ret;
745
bac429f0
SR
746 if (cnt >= sizeof(buf))
747 return -EINVAL;
748
749 if (copy_from_user(&buf, ubuf, cnt))
750 return -EFAULT;
751
752 buf[cnt] = 0;
753
754 ret = strict_strtoul(buf, 10, &val);
755 if (ret < 0)
756 return ret;
757
758 val = !!val;
759
760 mutex_lock(&ftrace_profile_lock);
761 if (ftrace_profile_enabled ^ val) {
762 if (val) {
493762fc
SR
763 ret = ftrace_profile_init();
764 if (ret < 0) {
765 cnt = ret;
766 goto out;
767 }
768
0706f1c4
SR
769 ret = register_ftrace_profiler();
770 if (ret < 0) {
771 cnt = ret;
772 goto out;
773 }
bac429f0
SR
774 ftrace_profile_enabled = 1;
775 } else {
776 ftrace_profile_enabled = 0;
0f6ce3de
SR
777 /*
778 * unregister_ftrace_profiler calls stop_machine
779 * so this acts like an synchronize_sched.
780 */
0706f1c4 781 unregister_ftrace_profiler();
bac429f0
SR
782 }
783 }
493762fc 784 out:
bac429f0
SR
785 mutex_unlock(&ftrace_profile_lock);
786
cf8517cf 787 *ppos += cnt;
bac429f0
SR
788
789 return cnt;
790}
791
493762fc
SR
792static ssize_t
793ftrace_profile_read(struct file *filp, char __user *ubuf,
794 size_t cnt, loff_t *ppos)
795{
fb9fb015 796 char buf[64]; /* big enough to hold a number */
493762fc
SR
797 int r;
798
799 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
800 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
801}
802
bac429f0
SR
803static const struct file_operations ftrace_profile_fops = {
804 .open = tracing_open_generic,
805 .read = ftrace_profile_read,
806 .write = ftrace_profile_write,
6038f373 807 .llseek = default_llseek,
bac429f0
SR
808};
809
cafb168a
SR
810/* used to initialize the real stat files */
811static struct tracer_stat function_stats __initdata = {
fb9fb015
SR
812 .name = "functions",
813 .stat_start = function_stat_start,
814 .stat_next = function_stat_next,
815 .stat_cmp = function_stat_cmp,
816 .stat_headers = function_stat_headers,
817 .stat_show = function_stat_show
cafb168a
SR
818};
819
6ab5d668 820static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
bac429f0 821{
cafb168a 822 struct ftrace_profile_stat *stat;
bac429f0 823 struct dentry *entry;
cafb168a 824 char *name;
bac429f0 825 int ret;
cafb168a
SR
826 int cpu;
827
828 for_each_possible_cpu(cpu) {
829 stat = &per_cpu(ftrace_profile_stats, cpu);
830
831 /* allocate enough for function name + cpu number */
832 name = kmalloc(32, GFP_KERNEL);
833 if (!name) {
834 /*
835 * The files created are permanent, if something happens
836 * we still do not free memory.
837 */
cafb168a
SR
838 WARN(1,
839 "Could not allocate stat file for cpu %d\n",
840 cpu);
841 return;
842 }
843 stat->stat = function_stats;
844 snprintf(name, 32, "function%d", cpu);
845 stat->stat.name = name;
846 ret = register_stat_tracer(&stat->stat);
847 if (ret) {
848 WARN(1,
849 "Could not register function stat for cpu %d\n",
850 cpu);
851 kfree(name);
852 return;
853 }
bac429f0
SR
854 }
855
856 entry = debugfs_create_file("function_profile_enabled", 0644,
857 d_tracer, NULL, &ftrace_profile_fops);
858 if (!entry)
859 pr_warning("Could not create debugfs "
860 "'function_profile_enabled' entry\n");
861}
862
bac429f0 863#else /* CONFIG_FUNCTION_PROFILER */
6ab5d668 864static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
bac429f0
SR
865{
866}
bac429f0
SR
867#endif /* CONFIG_FUNCTION_PROFILER */
868
493762fc
SR
869static struct pid * const ftrace_swapper_pid = &init_struct_pid;
870
871#ifdef CONFIG_DYNAMIC_FTRACE
872
873#ifndef CONFIG_FTRACE_MCOUNT_RECORD
874# error Dynamic ftrace depends on MCOUNT_RECORD
875#endif
876
877static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
878
879struct ftrace_func_probe {
880 struct hlist_node node;
881 struct ftrace_probe_ops *ops;
882 unsigned long flags;
883 unsigned long ip;
884 void *data;
885 struct rcu_head rcu;
886};
887
888enum {
889 FTRACE_ENABLE_CALLS = (1 << 0),
890 FTRACE_DISABLE_CALLS = (1 << 1),
891 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
79e406d7
SR
892 FTRACE_START_FUNC_RET = (1 << 3),
893 FTRACE_STOP_FUNC_RET = (1 << 4),
493762fc
SR
894};
895
896static int ftrace_filtered;
897
898static struct dyn_ftrace *ftrace_new_addrs;
899
900static DEFINE_MUTEX(ftrace_regex_lock);
901
902struct ftrace_page {
903 struct ftrace_page *next;
904 int index;
905 struct dyn_ftrace records[];
906};
907
908#define ENTRIES_PER_PAGE \
909 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
910
911/* estimate from running different kernels */
912#define NR_TO_INIT 10000
913
914static struct ftrace_page *ftrace_pages_start;
915static struct ftrace_page *ftrace_pages;
916
917static struct dyn_ftrace *ftrace_free_records;
918
919/*
920 * This is a double for. Do not use 'break' to break out of the loop,
921 * you must use a goto.
922 */
923#define do_for_each_ftrace_rec(pg, rec) \
924 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
925 int _____i; \
926 for (_____i = 0; _____i < pg->index; _____i++) { \
927 rec = &pg->records[_____i];
928
929#define while_for_each_ftrace_rec() \
930 } \
931 }
932
e309b41d 933static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084 934{
ee000b7f 935 rec->freelist = ftrace_free_records;
37ad5084
SR
936 ftrace_free_records = rec;
937 rec->flags |= FTRACE_FL_FREE;
938}
939
e309b41d 940static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 941{
37ad5084
SR
942 struct dyn_ftrace *rec;
943
944 /* First check for freed records */
945 if (ftrace_free_records) {
946 rec = ftrace_free_records;
947
37ad5084 948 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
6912896e 949 FTRACE_WARN_ON_ONCE(1);
37ad5084
SR
950 ftrace_free_records = NULL;
951 return NULL;
952 }
953
ee000b7f 954 ftrace_free_records = rec->freelist;
37ad5084
SR
955 memset(rec, 0, sizeof(*rec));
956 return rec;
957 }
958
3c1720f0 959 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
08f5ac90
SR
960 if (!ftrace_pages->next) {
961 /* allocate another page */
962 ftrace_pages->next =
963 (void *)get_zeroed_page(GFP_KERNEL);
964 if (!ftrace_pages->next)
965 return NULL;
966 }
3c1720f0
SR
967 ftrace_pages = ftrace_pages->next;
968 }
969
970 return &ftrace_pages->records[ftrace_pages->index++];
971}
972
08f5ac90 973static struct dyn_ftrace *
d61f82d0 974ftrace_record_ip(unsigned long ip)
3d083395 975{
08f5ac90 976 struct dyn_ftrace *rec;
3d083395 977
f3c7ac40 978 if (ftrace_disabled)
08f5ac90 979 return NULL;
3d083395 980
08f5ac90
SR
981 rec = ftrace_alloc_dyn_node(ip);
982 if (!rec)
983 return NULL;
3d083395 984
08f5ac90 985 rec->ip = ip;
ee000b7f 986 rec->newlist = ftrace_new_addrs;
e94142a6 987 ftrace_new_addrs = rec;
3d083395 988
08f5ac90 989 return rec;
3d083395
SR
990}
991
b17e8a37
SR
992static void print_ip_ins(const char *fmt, unsigned char *p)
993{
994 int i;
995
996 printk(KERN_CONT "%s", fmt);
997
998 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
999 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1000}
1001
31e88909 1002static void ftrace_bug(int failed, unsigned long ip)
b17e8a37
SR
1003{
1004 switch (failed) {
1005 case -EFAULT:
1006 FTRACE_WARN_ON_ONCE(1);
1007 pr_info("ftrace faulted on modifying ");
1008 print_ip_sym(ip);
1009 break;
1010 case -EINVAL:
1011 FTRACE_WARN_ON_ONCE(1);
1012 pr_info("ftrace failed to modify ");
1013 print_ip_sym(ip);
b17e8a37 1014 print_ip_ins(" actual: ", (unsigned char *)ip);
b17e8a37
SR
1015 printk(KERN_CONT "\n");
1016 break;
1017 case -EPERM:
1018 FTRACE_WARN_ON_ONCE(1);
1019 pr_info("ftrace faulted on writing ");
1020 print_ip_sym(ip);
1021 break;
1022 default:
1023 FTRACE_WARN_ON_ONCE(1);
1024 pr_info("ftrace faulted on unknown error ");
1025 print_ip_sym(ip);
1026 }
1027}
1028
3c1720f0 1029
2cfa1978
MH
1030/* Return 1 if the address range is reserved for ftrace */
1031int ftrace_text_reserved(void *start, void *end)
1032{
1033 struct dyn_ftrace *rec;
1034 struct ftrace_page *pg;
1035
1036 do_for_each_ftrace_rec(pg, rec) {
1037 if (rec->ip <= (unsigned long)end &&
1038 rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1039 return 1;
1040 } while_for_each_ftrace_rec();
1041 return 0;
1042}
1043
1044
0eb96701 1045static int
31e88909 1046__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
5072c59f 1047{
e7d3737e 1048 unsigned long ftrace_addr;
64fbcd16 1049 unsigned long flag = 0UL;
e7d3737e 1050
f0001207 1051 ftrace_addr = (unsigned long)FTRACE_ADDR;
5072c59f 1052
982c350b 1053 /*
64fbcd16
XG
1054 * If this record is not to be traced or we want to disable it,
1055 * then disable it.
982c350b 1056 *
64fbcd16 1057 * If we want to enable it and filtering is off, then enable it.
982c350b 1058 *
64fbcd16
XG
1059 * If we want to enable it and filtering is on, enable it only if
1060 * it's filtered
982c350b 1061 */
64fbcd16
XG
1062 if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) {
1063 if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
1064 flag = FTRACE_FL_ENABLED;
1065 }
982c350b 1066
64fbcd16
XG
1067 /* If the state of this record hasn't changed, then do nothing */
1068 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1069 return 0;
982c350b 1070
64fbcd16
XG
1071 if (flag) {
1072 rec->flags |= FTRACE_FL_ENABLED;
1073 return ftrace_make_call(rec, ftrace_addr);
5072c59f
SR
1074 }
1075
64fbcd16
XG
1076 rec->flags &= ~FTRACE_FL_ENABLED;
1077 return ftrace_make_nop(NULL, rec, ftrace_addr);
5072c59f
SR
1078}
1079
e309b41d 1080static void ftrace_replace_code(int enable)
3c1720f0 1081{
3c1720f0
SR
1082 struct dyn_ftrace *rec;
1083 struct ftrace_page *pg;
6a24a244 1084 int failed;
3c1720f0 1085
265c831c
SR
1086 do_for_each_ftrace_rec(pg, rec) {
1087 /*
fa9d13cf
Z
1088 * Skip over free records, records that have
1089 * failed and not converted.
265c831c
SR
1090 */
1091 if (rec->flags & FTRACE_FL_FREE ||
fa9d13cf 1092 rec->flags & FTRACE_FL_FAILED ||
03303549 1093 !(rec->flags & FTRACE_FL_CONVERTED))
265c831c
SR
1094 continue;
1095
265c831c 1096 failed = __ftrace_replace_code(rec, enable);
fa9d13cf 1097 if (failed) {
265c831c 1098 rec->flags |= FTRACE_FL_FAILED;
3279ba37
SR
1099 ftrace_bug(failed, rec->ip);
1100 /* Stop processing */
1101 return;
3c1720f0 1102 }
265c831c 1103 } while_for_each_ftrace_rec();
3c1720f0
SR
1104}
1105
492a7ea5 1106static int
31e88909 1107ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
3c1720f0
SR
1108{
1109 unsigned long ip;
593eb8a2 1110 int ret;
3c1720f0
SR
1111
1112 ip = rec->ip;
1113
25aac9dc 1114 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
593eb8a2 1115 if (ret) {
31e88909 1116 ftrace_bug(ret, ip);
3c1720f0 1117 rec->flags |= FTRACE_FL_FAILED;
492a7ea5 1118 return 0;
37ad5084 1119 }
492a7ea5 1120 return 1;
3c1720f0
SR
1121}
1122
000ab691
SR
1123/*
1124 * archs can override this function if they must do something
1125 * before the modifying code is performed.
1126 */
1127int __weak ftrace_arch_code_modify_prepare(void)
1128{
1129 return 0;
1130}
1131
1132/*
1133 * archs can override this function if they must do something
1134 * after the modifying code is performed.
1135 */
1136int __weak ftrace_arch_code_modify_post_process(void)
1137{
1138 return 0;
1139}
1140
e309b41d 1141static int __ftrace_modify_code(void *data)
3d083395 1142{
d61f82d0
SR
1143 int *command = data;
1144
a3583244 1145 if (*command & FTRACE_ENABLE_CALLS)
d61f82d0 1146 ftrace_replace_code(1);
a3583244 1147 else if (*command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
1148 ftrace_replace_code(0);
1149
1150 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1151 ftrace_update_ftrace_func(ftrace_trace_function);
1152
5a45cfe1
SR
1153 if (*command & FTRACE_START_FUNC_RET)
1154 ftrace_enable_ftrace_graph_caller();
1155 else if (*command & FTRACE_STOP_FUNC_RET)
1156 ftrace_disable_ftrace_graph_caller();
1157
d61f82d0 1158 return 0;
3d083395
SR
1159}
1160
e309b41d 1161static void ftrace_run_update_code(int command)
3d083395 1162{
000ab691
SR
1163 int ret;
1164
1165 ret = ftrace_arch_code_modify_prepare();
1166 FTRACE_WARN_ON(ret);
1167 if (ret)
1168 return;
1169
784e2d76 1170 stop_machine(__ftrace_modify_code, &command, NULL);
000ab691
SR
1171
1172 ret = ftrace_arch_code_modify_post_process();
1173 FTRACE_WARN_ON(ret);
3d083395
SR
1174}
1175
d61f82d0 1176static ftrace_func_t saved_ftrace_func;
60a7ecf4 1177static int ftrace_start_up;
df4fc315
SR
1178
1179static void ftrace_startup_enable(int command)
1180{
1181 if (saved_ftrace_func != ftrace_trace_function) {
1182 saved_ftrace_func = ftrace_trace_function;
1183 command |= FTRACE_UPDATE_TRACE_FUNC;
1184 }
1185
1186 if (!command || !ftrace_enabled)
1187 return;
1188
1189 ftrace_run_update_code(command);
1190}
d61f82d0 1191
5a45cfe1 1192static void ftrace_startup(int command)
3d083395 1193{
4eebcc81
SR
1194 if (unlikely(ftrace_disabled))
1195 return;
1196
60a7ecf4 1197 ftrace_start_up++;
982c350b 1198 command |= FTRACE_ENABLE_CALLS;
d61f82d0 1199
df4fc315 1200 ftrace_startup_enable(command);
3d083395
SR
1201}
1202
5a45cfe1 1203static void ftrace_shutdown(int command)
3d083395 1204{
4eebcc81
SR
1205 if (unlikely(ftrace_disabled))
1206 return;
1207
60a7ecf4 1208 ftrace_start_up--;
9ea1a153
FW
1209 /*
1210 * Just warn in case of unbalance, no need to kill ftrace, it's not
1211 * critical but the ftrace_call callers may be never nopped again after
1212 * further ftrace uses.
1213 */
1214 WARN_ON_ONCE(ftrace_start_up < 0);
1215
60a7ecf4 1216 if (!ftrace_start_up)
d61f82d0 1217 command |= FTRACE_DISABLE_CALLS;
3d083395 1218
d61f82d0
SR
1219 if (saved_ftrace_func != ftrace_trace_function) {
1220 saved_ftrace_func = ftrace_trace_function;
1221 command |= FTRACE_UPDATE_TRACE_FUNC;
1222 }
3d083395 1223
d61f82d0 1224 if (!command || !ftrace_enabled)
e6ea44e9 1225 return;
d61f82d0
SR
1226
1227 ftrace_run_update_code(command);
3d083395
SR
1228}
1229
e309b41d 1230static void ftrace_startup_sysctl(void)
b0fc494f 1231{
4eebcc81
SR
1232 if (unlikely(ftrace_disabled))
1233 return;
1234
d61f82d0
SR
1235 /* Force update next time */
1236 saved_ftrace_func = NULL;
60a7ecf4
SR
1237 /* ftrace_start_up is true if we want ftrace running */
1238 if (ftrace_start_up)
79e406d7 1239 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
b0fc494f
SR
1240}
1241
e309b41d 1242static void ftrace_shutdown_sysctl(void)
b0fc494f 1243{
4eebcc81
SR
1244 if (unlikely(ftrace_disabled))
1245 return;
1246
60a7ecf4
SR
1247 /* ftrace_start_up is true if ftrace is running */
1248 if (ftrace_start_up)
79e406d7 1249 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
b0fc494f
SR
1250}
1251
3d083395
SR
1252static cycle_t ftrace_update_time;
1253static unsigned long ftrace_update_cnt;
1254unsigned long ftrace_update_tot_cnt;
1255
31e88909 1256static int ftrace_update_code(struct module *mod)
3d083395 1257{
e94142a6 1258 struct dyn_ftrace *p;
f22f9a89 1259 cycle_t start, stop;
3d083395 1260
750ed1a4 1261 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
1262 ftrace_update_cnt = 0;
1263
e94142a6 1264 while (ftrace_new_addrs) {
3d083395 1265
08f5ac90
SR
1266 /* If something went wrong, bail without enabling anything */
1267 if (unlikely(ftrace_disabled))
1268 return -1;
f22f9a89 1269
e94142a6 1270 p = ftrace_new_addrs;
ee000b7f 1271 ftrace_new_addrs = p->newlist;
e94142a6 1272 p->flags = 0L;
f22f9a89 1273
5cb084bb 1274 /*
25985edc 1275 * Do the initial record conversion from mcount jump
5cb084bb
JO
1276 * to the NOP instructions.
1277 */
1278 if (!ftrace_code_disable(mod, p)) {
08f5ac90 1279 ftrace_free_rec(p);
5cb084bb
JO
1280 continue;
1281 }
1282
1283 p->flags |= FTRACE_FL_CONVERTED;
1284 ftrace_update_cnt++;
1285
1286 /*
1287 * If the tracing is enabled, go ahead and enable the record.
1288 *
1289 * The reason not to enable the record immediatelly is the
1290 * inherent check of ftrace_make_nop/ftrace_make_call for
1291 * correct previous instructions. Making first the NOP
1292 * conversion puts the module to the correct state, thus
1293 * passing the ftrace_make_call check.
1294 */
1295 if (ftrace_start_up) {
1296 int failed = __ftrace_replace_code(p, 1);
1297 if (failed) {
1298 ftrace_bug(failed, p->ip);
1299 ftrace_free_rec(p);
1300 }
1301 }
3d083395
SR
1302 }
1303
750ed1a4 1304 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
1305 ftrace_update_time = stop - start;
1306 ftrace_update_tot_cnt += ftrace_update_cnt;
1307
16444a8a
ACM
1308 return 0;
1309}
1310
68bf21aa 1311static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
3c1720f0
SR
1312{
1313 struct ftrace_page *pg;
1314 int cnt;
1315 int i;
3c1720f0
SR
1316
1317 /* allocate a few pages */
1318 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1319 if (!ftrace_pages_start)
1320 return -1;
1321
1322 /*
1323 * Allocate a few more pages.
1324 *
1325 * TODO: have some parser search vmlinux before
1326 * final linking to find all calls to ftrace.
1327 * Then we can:
1328 * a) know how many pages to allocate.
1329 * and/or
1330 * b) set up the table then.
1331 *
1332 * The dynamic code is still necessary for
1333 * modules.
1334 */
1335
1336 pg = ftrace_pages = ftrace_pages_start;
1337
68bf21aa 1338 cnt = num_to_init / ENTRIES_PER_PAGE;
08f5ac90 1339 pr_info("ftrace: allocating %ld entries in %d pages\n",
5821e1b7 1340 num_to_init, cnt + 1);
3c1720f0
SR
1341
1342 for (i = 0; i < cnt; i++) {
1343 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1344
1345 /* If we fail, we'll try later anyway */
1346 if (!pg->next)
1347 break;
1348
1349 pg = pg->next;
1350 }
1351
1352 return 0;
1353}
1354
5072c59f
SR
1355enum {
1356 FTRACE_ITER_FILTER = (1 << 0),
689fd8b6 1357 FTRACE_ITER_NOTRACE = (1 << 1),
1358 FTRACE_ITER_FAILURES = (1 << 2),
1359 FTRACE_ITER_PRINTALL = (1 << 3),
1360 FTRACE_ITER_HASH = (1 << 4),
5072c59f
SR
1361};
1362
1363#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1364
1365struct ftrace_iterator {
98c4fd04 1366 loff_t pos;
4aeb6967
SR
1367 loff_t func_pos;
1368 struct ftrace_page *pg;
1369 struct dyn_ftrace *func;
1370 struct ftrace_func_probe *probe;
1371 struct trace_parser parser;
1372 int hidx;
1373 int idx;
1374 unsigned flags;
5072c59f
SR
1375};
1376
8fc0c701 1377static void *
4aeb6967 1378t_hash_next(struct seq_file *m, loff_t *pos)
8fc0c701
SR
1379{
1380 struct ftrace_iterator *iter = m->private;
4aeb6967 1381 struct hlist_node *hnd = NULL;
8fc0c701
SR
1382 struct hlist_head *hhd;
1383
8fc0c701 1384 (*pos)++;
98c4fd04 1385 iter->pos = *pos;
8fc0c701 1386
4aeb6967
SR
1387 if (iter->probe)
1388 hnd = &iter->probe->node;
8fc0c701
SR
1389 retry:
1390 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1391 return NULL;
1392
1393 hhd = &ftrace_func_hash[iter->hidx];
1394
1395 if (hlist_empty(hhd)) {
1396 iter->hidx++;
1397 hnd = NULL;
1398 goto retry;
1399 }
1400
1401 if (!hnd)
1402 hnd = hhd->first;
1403 else {
1404 hnd = hnd->next;
1405 if (!hnd) {
1406 iter->hidx++;
1407 goto retry;
1408 }
1409 }
1410
4aeb6967
SR
1411 if (WARN_ON_ONCE(!hnd))
1412 return NULL;
1413
1414 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
1415
1416 return iter;
8fc0c701
SR
1417}
1418
1419static void *t_hash_start(struct seq_file *m, loff_t *pos)
1420{
1421 struct ftrace_iterator *iter = m->private;
1422 void *p = NULL;
d82d6244
LZ
1423 loff_t l;
1424
2bccfffd
SR
1425 if (iter->func_pos > *pos)
1426 return NULL;
8fc0c701 1427
d82d6244 1428 iter->hidx = 0;
2bccfffd 1429 for (l = 0; l <= (*pos - iter->func_pos); ) {
4aeb6967 1430 p = t_hash_next(m, &l);
d82d6244
LZ
1431 if (!p)
1432 break;
1433 }
4aeb6967
SR
1434 if (!p)
1435 return NULL;
1436
98c4fd04
SR
1437 /* Only set this if we have an item */
1438 iter->flags |= FTRACE_ITER_HASH;
1439
4aeb6967 1440 return iter;
8fc0c701
SR
1441}
1442
4aeb6967
SR
1443static int
1444t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
8fc0c701 1445{
b6887d79 1446 struct ftrace_func_probe *rec;
8fc0c701 1447
4aeb6967
SR
1448 rec = iter->probe;
1449 if (WARN_ON_ONCE(!rec))
1450 return -EIO;
8fc0c701 1451
809dcf29
SR
1452 if (rec->ops->print)
1453 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1454
b375a11a 1455 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
8fc0c701
SR
1456
1457 if (rec->data)
1458 seq_printf(m, ":%p", rec->data);
1459 seq_putc(m, '\n');
1460
1461 return 0;
1462}
1463
e309b41d 1464static void *
5072c59f
SR
1465t_next(struct seq_file *m, void *v, loff_t *pos)
1466{
1467 struct ftrace_iterator *iter = m->private;
1468 struct dyn_ftrace *rec = NULL;
1469
8fc0c701 1470 if (iter->flags & FTRACE_ITER_HASH)
4aeb6967 1471 return t_hash_next(m, pos);
8fc0c701 1472
5072c59f 1473 (*pos)++;
1106b699 1474 iter->pos = iter->func_pos = *pos;
5072c59f 1475
0c75a3ed 1476 if (iter->flags & FTRACE_ITER_PRINTALL)
57c072c7 1477 return t_hash_start(m, pos);
0c75a3ed 1478
5072c59f
SR
1479 retry:
1480 if (iter->idx >= iter->pg->index) {
1481 if (iter->pg->next) {
1482 iter->pg = iter->pg->next;
1483 iter->idx = 0;
1484 goto retry;
1485 }
1486 } else {
1487 rec = &iter->pg->records[iter->idx++];
a9fdda33
SR
1488 if ((rec->flags & FTRACE_FL_FREE) ||
1489
1490 (!(iter->flags & FTRACE_ITER_FAILURES) &&
eb9a7bf0
AS
1491 (rec->flags & FTRACE_FL_FAILED)) ||
1492
1493 ((iter->flags & FTRACE_ITER_FAILURES) &&
a9fdda33 1494 !(rec->flags & FTRACE_FL_FAILED)) ||
eb9a7bf0 1495
0183fb1c
SR
1496 ((iter->flags & FTRACE_ITER_FILTER) &&
1497 !(rec->flags & FTRACE_FL_FILTER)) ||
1498
41c52c0d
SR
1499 ((iter->flags & FTRACE_ITER_NOTRACE) &&
1500 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
1501 rec = NULL;
1502 goto retry;
1503 }
1504 }
1505
4aeb6967 1506 if (!rec)
57c072c7 1507 return t_hash_start(m, pos);
4aeb6967
SR
1508
1509 iter->func = rec;
1510
1511 return iter;
5072c59f
SR
1512}
1513
98c4fd04
SR
1514static void reset_iter_read(struct ftrace_iterator *iter)
1515{
1516 iter->pos = 0;
1517 iter->func_pos = 0;
1518 iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
5072c59f
SR
1519}
1520
1521static void *t_start(struct seq_file *m, loff_t *pos)
1522{
1523 struct ftrace_iterator *iter = m->private;
1524 void *p = NULL;
694ce0a5 1525 loff_t l;
5072c59f 1526
8fc0c701 1527 mutex_lock(&ftrace_lock);
98c4fd04
SR
1528 /*
1529 * If an lseek was done, then reset and start from beginning.
1530 */
1531 if (*pos < iter->pos)
1532 reset_iter_read(iter);
1533
0c75a3ed
SR
1534 /*
1535 * For set_ftrace_filter reading, if we have the filter
1536 * off, we can short cut and just print out that all
1537 * functions are enabled.
1538 */
1539 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
1540 if (*pos > 0)
8fc0c701 1541 return t_hash_start(m, pos);
0c75a3ed 1542 iter->flags |= FTRACE_ITER_PRINTALL;
df091625
CW
1543 /* reset in case of seek/pread */
1544 iter->flags &= ~FTRACE_ITER_HASH;
0c75a3ed
SR
1545 return iter;
1546 }
1547
8fc0c701
SR
1548 if (iter->flags & FTRACE_ITER_HASH)
1549 return t_hash_start(m, pos);
1550
98c4fd04
SR
1551 /*
1552 * Unfortunately, we need to restart at ftrace_pages_start
1553 * every time we let go of the ftrace_mutex. This is because
1554 * those pointers can change without the lock.
1555 */
694ce0a5
LZ
1556 iter->pg = ftrace_pages_start;
1557 iter->idx = 0;
1558 for (l = 0; l <= *pos; ) {
1559 p = t_next(m, p, &l);
1560 if (!p)
1561 break;
50cdaf08 1562 }
5821e1b7 1563
4aeb6967
SR
1564 if (!p) {
1565 if (iter->flags & FTRACE_ITER_FILTER)
1566 return t_hash_start(m, pos);
8fc0c701 1567
4aeb6967
SR
1568 return NULL;
1569 }
1570
1571 return iter;
5072c59f
SR
1572}
1573
1574static void t_stop(struct seq_file *m, void *p)
1575{
8fc0c701 1576 mutex_unlock(&ftrace_lock);
5072c59f
SR
1577}
1578
1579static int t_show(struct seq_file *m, void *v)
1580{
0c75a3ed 1581 struct ftrace_iterator *iter = m->private;
4aeb6967 1582 struct dyn_ftrace *rec;
5072c59f 1583
8fc0c701 1584 if (iter->flags & FTRACE_ITER_HASH)
4aeb6967 1585 return t_hash_show(m, iter);
8fc0c701 1586
0c75a3ed
SR
1587 if (iter->flags & FTRACE_ITER_PRINTALL) {
1588 seq_printf(m, "#### all functions enabled ####\n");
1589 return 0;
1590 }
1591
4aeb6967
SR
1592 rec = iter->func;
1593
5072c59f
SR
1594 if (!rec)
1595 return 0;
1596
b375a11a 1597 seq_printf(m, "%ps\n", (void *)rec->ip);
5072c59f
SR
1598
1599 return 0;
1600}
1601
88e9d34c 1602static const struct seq_operations show_ftrace_seq_ops = {
5072c59f
SR
1603 .start = t_start,
1604 .next = t_next,
1605 .stop = t_stop,
1606 .show = t_show,
1607};
1608
e309b41d 1609static int
5072c59f
SR
1610ftrace_avail_open(struct inode *inode, struct file *file)
1611{
1612 struct ftrace_iterator *iter;
1613 int ret;
1614
4eebcc81
SR
1615 if (unlikely(ftrace_disabled))
1616 return -ENODEV;
1617
5072c59f
SR
1618 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1619 if (!iter)
1620 return -ENOMEM;
1621
1622 iter->pg = ftrace_pages_start;
5072c59f
SR
1623
1624 ret = seq_open(file, &show_ftrace_seq_ops);
1625 if (!ret) {
1626 struct seq_file *m = file->private_data;
4bf39a94 1627
5072c59f 1628 m->private = iter;
4bf39a94 1629 } else {
5072c59f 1630 kfree(iter);
4bf39a94 1631 }
5072c59f
SR
1632
1633 return ret;
1634}
1635
eb9a7bf0
AS
1636static int
1637ftrace_failures_open(struct inode *inode, struct file *file)
1638{
1639 int ret;
1640 struct seq_file *m;
1641 struct ftrace_iterator *iter;
1642
1643 ret = ftrace_avail_open(inode, file);
1644 if (!ret) {
907f2784 1645 m = file->private_data;
1646 iter = m->private;
eb9a7bf0
AS
1647 iter->flags = FTRACE_ITER_FAILURES;
1648 }
1649
1650 return ret;
1651}
1652
1653
41c52c0d 1654static void ftrace_filter_reset(int enable)
5072c59f
SR
1655{
1656 struct ftrace_page *pg;
1657 struct dyn_ftrace *rec;
41c52c0d 1658 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f 1659
52baf119 1660 mutex_lock(&ftrace_lock);
41c52c0d
SR
1661 if (enable)
1662 ftrace_filtered = 0;
265c831c
SR
1663 do_for_each_ftrace_rec(pg, rec) {
1664 if (rec->flags & FTRACE_FL_FAILED)
1665 continue;
1666 rec->flags &= ~type;
1667 } while_for_each_ftrace_rec();
52baf119 1668 mutex_unlock(&ftrace_lock);
5072c59f
SR
1669}
1670
e309b41d 1671static int
41c52c0d 1672ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1673{
1674 struct ftrace_iterator *iter;
1675 int ret = 0;
1676
4eebcc81
SR
1677 if (unlikely(ftrace_disabled))
1678 return -ENODEV;
1679
5072c59f
SR
1680 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1681 if (!iter)
1682 return -ENOMEM;
1683
689fd8b6 1684 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
1685 kfree(iter);
1686 return -ENOMEM;
1687 }
1688
41c52c0d 1689 mutex_lock(&ftrace_regex_lock);
5072c59f 1690 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 1691 (file->f_flags & O_TRUNC))
41c52c0d 1692 ftrace_filter_reset(enable);
5072c59f
SR
1693
1694 if (file->f_mode & FMODE_READ) {
1695 iter->pg = ftrace_pages_start;
41c52c0d
SR
1696 iter->flags = enable ? FTRACE_ITER_FILTER :
1697 FTRACE_ITER_NOTRACE;
5072c59f
SR
1698
1699 ret = seq_open(file, &show_ftrace_seq_ops);
1700 if (!ret) {
1701 struct seq_file *m = file->private_data;
1702 m->private = iter;
79fe249c
LZ
1703 } else {
1704 trace_parser_put(&iter->parser);
5072c59f 1705 kfree(iter);
79fe249c 1706 }
5072c59f
SR
1707 } else
1708 file->private_data = iter;
41c52c0d 1709 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1710
1711 return ret;
1712}
1713
41c52c0d
SR
1714static int
1715ftrace_filter_open(struct inode *inode, struct file *file)
1716{
1717 return ftrace_regex_open(inode, file, 1);
1718}
1719
1720static int
1721ftrace_notrace_open(struct inode *inode, struct file *file)
1722{
1723 return ftrace_regex_open(inode, file, 0);
1724}
1725
e309b41d 1726static loff_t
41c52c0d 1727ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
1728{
1729 loff_t ret;
1730
1731 if (file->f_mode & FMODE_READ)
1732 ret = seq_lseek(file, offset, origin);
1733 else
1734 file->f_pos = ret = 1;
1735
1736 return ret;
1737}
1738
64e7c440 1739static int ftrace_match(char *str, char *regex, int len, int type)
9f4801e3 1740{
9f4801e3 1741 int matched = 0;
751e9983 1742 int slen;
9f4801e3 1743
9f4801e3
SR
1744 switch (type) {
1745 case MATCH_FULL:
1746 if (strcmp(str, regex) == 0)
1747 matched = 1;
1748 break;
1749 case MATCH_FRONT_ONLY:
1750 if (strncmp(str, regex, len) == 0)
1751 matched = 1;
1752 break;
1753 case MATCH_MIDDLE_ONLY:
1754 if (strstr(str, regex))
1755 matched = 1;
1756 break;
1757 case MATCH_END_ONLY:
751e9983
LZ
1758 slen = strlen(str);
1759 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
9f4801e3
SR
1760 matched = 1;
1761 break;
1762 }
1763
1764 return matched;
1765}
1766
64e7c440
SR
1767static int
1768ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1769{
1770 char str[KSYM_SYMBOL_LEN];
1771
1772 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1773 return ftrace_match(str, regex, len, type);
1774}
1775
311d16da 1776static int ftrace_match_records(char *buff, int len, int enable)
9f4801e3 1777{
6a24a244 1778 unsigned int search_len;
9f4801e3
SR
1779 struct ftrace_page *pg;
1780 struct dyn_ftrace *rec;
6a24a244
SR
1781 unsigned long flag;
1782 char *search;
9f4801e3 1783 int type;
9f4801e3 1784 int not;
311d16da 1785 int found = 0;
9f4801e3 1786
6a24a244 1787 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
3f6fe06d 1788 type = filter_parse_regex(buff, len, &search, &not);
9f4801e3
SR
1789
1790 search_len = strlen(search);
1791
52baf119 1792 mutex_lock(&ftrace_lock);
265c831c 1793 do_for_each_ftrace_rec(pg, rec) {
265c831c
SR
1794
1795 if (rec->flags & FTRACE_FL_FAILED)
1796 continue;
9f4801e3
SR
1797
1798 if (ftrace_match_record(rec, search, search_len, type)) {
265c831c
SR
1799 if (not)
1800 rec->flags &= ~flag;
1801 else
1802 rec->flags |= flag;
311d16da 1803 found = 1;
265c831c 1804 }
e68746a2
SR
1805 /*
1806 * Only enable filtering if we have a function that
1807 * is filtered on.
1808 */
1809 if (enable && (rec->flags & FTRACE_FL_FILTER))
1810 ftrace_filtered = 1;
265c831c 1811 } while_for_each_ftrace_rec();
52baf119 1812 mutex_unlock(&ftrace_lock);
311d16da
LZ
1813
1814 return found;
5072c59f
SR
1815}
1816
64e7c440
SR
1817static int
1818ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1819 char *regex, int len, int type)
1820{
1821 char str[KSYM_SYMBOL_LEN];
1822 char *modname;
1823
1824 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1825
1826 if (!modname || strcmp(modname, mod))
1827 return 0;
1828
1829 /* blank search means to match all funcs in the mod */
1830 if (len)
1831 return ftrace_match(str, regex, len, type);
1832 else
1833 return 1;
1834}
1835
311d16da 1836static int ftrace_match_module_records(char *buff, char *mod, int enable)
64e7c440 1837{
6a24a244 1838 unsigned search_len = 0;
64e7c440
SR
1839 struct ftrace_page *pg;
1840 struct dyn_ftrace *rec;
1841 int type = MATCH_FULL;
6a24a244
SR
1842 char *search = buff;
1843 unsigned long flag;
64e7c440 1844 int not = 0;
311d16da 1845 int found = 0;
64e7c440 1846
6a24a244
SR
1847 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1848
64e7c440
SR
1849 /* blank or '*' mean the same */
1850 if (strcmp(buff, "*") == 0)
1851 buff[0] = 0;
1852
1853 /* handle the case of 'dont filter this module' */
1854 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1855 buff[0] = 0;
1856 not = 1;
1857 }
1858
1859 if (strlen(buff)) {
3f6fe06d 1860 type = filter_parse_regex(buff, strlen(buff), &search, &not);
64e7c440
SR
1861 search_len = strlen(search);
1862 }
1863
52baf119 1864 mutex_lock(&ftrace_lock);
64e7c440
SR
1865 do_for_each_ftrace_rec(pg, rec) {
1866
1867 if (rec->flags & FTRACE_FL_FAILED)
1868 continue;
1869
1870 if (ftrace_match_module_record(rec, mod,
1871 search, search_len, type)) {
1872 if (not)
1873 rec->flags &= ~flag;
1874 else
1875 rec->flags |= flag;
311d16da 1876 found = 1;
64e7c440 1877 }
e68746a2
SR
1878 if (enable && (rec->flags & FTRACE_FL_FILTER))
1879 ftrace_filtered = 1;
64e7c440
SR
1880
1881 } while_for_each_ftrace_rec();
52baf119 1882 mutex_unlock(&ftrace_lock);
311d16da
LZ
1883
1884 return found;
64e7c440
SR
1885}
1886
f6180773
SR
1887/*
1888 * We register the module command as a template to show others how
1889 * to register the a command as well.
1890 */
1891
1892static int
1893ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1894{
1895 char *mod;
1896
1897 /*
1898 * cmd == 'mod' because we only registered this func
1899 * for the 'mod' ftrace_func_command.
1900 * But if you register one func with multiple commands,
1901 * you can tell which command was used by the cmd
1902 * parameter.
1903 */
1904
1905 /* we must have a module name */
1906 if (!param)
1907 return -EINVAL;
1908
1909 mod = strsep(&param, ":");
1910 if (!strlen(mod))
1911 return -EINVAL;
1912
311d16da
LZ
1913 if (ftrace_match_module_records(func, mod, enable))
1914 return 0;
1915 return -EINVAL;
f6180773
SR
1916}
1917
1918static struct ftrace_func_command ftrace_mod_cmd = {
1919 .name = "mod",
1920 .func = ftrace_mod_callback,
1921};
1922
1923static int __init ftrace_mod_cmd_init(void)
1924{
1925 return register_ftrace_command(&ftrace_mod_cmd);
1926}
1927device_initcall(ftrace_mod_cmd_init);
1928
59df055f 1929static void
b6887d79 1930function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
59df055f 1931{
b6887d79 1932 struct ftrace_func_probe *entry;
59df055f
SR
1933 struct hlist_head *hhd;
1934 struct hlist_node *n;
1935 unsigned long key;
59df055f
SR
1936
1937 key = hash_long(ip, FTRACE_HASH_BITS);
1938
1939 hhd = &ftrace_func_hash[key];
1940
1941 if (hlist_empty(hhd))
1942 return;
1943
1944 /*
1945 * Disable preemption for these calls to prevent a RCU grace
1946 * period. This syncs the hash iteration and freeing of items
1947 * on the hash. rcu_read_lock is too dangerous here.
1948 */
5168ae50 1949 preempt_disable_notrace();
59df055f
SR
1950 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1951 if (entry->ip == ip)
1952 entry->ops->func(ip, parent_ip, &entry->data);
1953 }
5168ae50 1954 preempt_enable_notrace();
59df055f
SR
1955}
1956
b6887d79 1957static struct ftrace_ops trace_probe_ops __read_mostly =
59df055f 1958{
fb9fb015 1959 .func = function_trace_probe_call,
59df055f
SR
1960};
1961
b6887d79 1962static int ftrace_probe_registered;
59df055f 1963
b6887d79 1964static void __enable_ftrace_function_probe(void)
59df055f
SR
1965{
1966 int i;
1967
b6887d79 1968 if (ftrace_probe_registered)
59df055f
SR
1969 return;
1970
1971 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1972 struct hlist_head *hhd = &ftrace_func_hash[i];
1973 if (hhd->first)
1974 break;
1975 }
1976 /* Nothing registered? */
1977 if (i == FTRACE_FUNC_HASHSIZE)
1978 return;
1979
b6887d79 1980 __register_ftrace_function(&trace_probe_ops);
59df055f 1981 ftrace_startup(0);
b6887d79 1982 ftrace_probe_registered = 1;
59df055f
SR
1983}
1984
b6887d79 1985static void __disable_ftrace_function_probe(void)
59df055f
SR
1986{
1987 int i;
1988
b6887d79 1989 if (!ftrace_probe_registered)
59df055f
SR
1990 return;
1991
1992 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1993 struct hlist_head *hhd = &ftrace_func_hash[i];
1994 if (hhd->first)
1995 return;
1996 }
1997
1998 /* no more funcs left */
b6887d79 1999 __unregister_ftrace_function(&trace_probe_ops);
59df055f 2000 ftrace_shutdown(0);
b6887d79 2001 ftrace_probe_registered = 0;
59df055f
SR
2002}
2003
2004
2005static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2006{
b6887d79
SR
2007 struct ftrace_func_probe *entry =
2008 container_of(rhp, struct ftrace_func_probe, rcu);
59df055f
SR
2009
2010 if (entry->ops->free)
2011 entry->ops->free(&entry->data);
2012 kfree(entry);
2013}
2014
2015
2016int
b6887d79 2017register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
2018 void *data)
2019{
b6887d79 2020 struct ftrace_func_probe *entry;
59df055f
SR
2021 struct ftrace_page *pg;
2022 struct dyn_ftrace *rec;
59df055f 2023 int type, len, not;
6a24a244 2024 unsigned long key;
59df055f
SR
2025 int count = 0;
2026 char *search;
2027
3f6fe06d 2028 type = filter_parse_regex(glob, strlen(glob), &search, &not);
59df055f
SR
2029 len = strlen(search);
2030
b6887d79 2031 /* we do not support '!' for function probes */
59df055f
SR
2032 if (WARN_ON(not))
2033 return -EINVAL;
2034
2035 mutex_lock(&ftrace_lock);
2036 do_for_each_ftrace_rec(pg, rec) {
2037
2038 if (rec->flags & FTRACE_FL_FAILED)
2039 continue;
2040
2041 if (!ftrace_match_record(rec, search, len, type))
2042 continue;
2043
2044 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2045 if (!entry) {
b6887d79 2046 /* If we did not process any, then return error */
59df055f
SR
2047 if (!count)
2048 count = -ENOMEM;
2049 goto out_unlock;
2050 }
2051
2052 count++;
2053
2054 entry->data = data;
2055
2056 /*
2057 * The caller might want to do something special
2058 * for each function we find. We call the callback
2059 * to give the caller an opportunity to do so.
2060 */
2061 if (ops->callback) {
2062 if (ops->callback(rec->ip, &entry->data) < 0) {
2063 /* caller does not like this func */
2064 kfree(entry);
2065 continue;
2066 }
2067 }
2068
2069 entry->ops = ops;
2070 entry->ip = rec->ip;
2071
2072 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2073 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2074
2075 } while_for_each_ftrace_rec();
b6887d79 2076 __enable_ftrace_function_probe();
59df055f
SR
2077
2078 out_unlock:
2079 mutex_unlock(&ftrace_lock);
2080
2081 return count;
2082}
2083
2084enum {
b6887d79
SR
2085 PROBE_TEST_FUNC = 1,
2086 PROBE_TEST_DATA = 2
59df055f
SR
2087};
2088
2089static void
b6887d79 2090__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
2091 void *data, int flags)
2092{
b6887d79 2093 struct ftrace_func_probe *entry;
59df055f
SR
2094 struct hlist_node *n, *tmp;
2095 char str[KSYM_SYMBOL_LEN];
2096 int type = MATCH_FULL;
2097 int i, len = 0;
2098 char *search;
2099
b36461da 2100 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
59df055f 2101 glob = NULL;
b36461da 2102 else if (glob) {
59df055f
SR
2103 int not;
2104
3f6fe06d 2105 type = filter_parse_regex(glob, strlen(glob), &search, &not);
59df055f
SR
2106 len = strlen(search);
2107
b6887d79 2108 /* we do not support '!' for function probes */
59df055f
SR
2109 if (WARN_ON(not))
2110 return;
2111 }
2112
2113 mutex_lock(&ftrace_lock);
2114 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2115 struct hlist_head *hhd = &ftrace_func_hash[i];
2116
2117 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2118
2119 /* break up if statements for readability */
b6887d79 2120 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
59df055f
SR
2121 continue;
2122
b6887d79 2123 if ((flags & PROBE_TEST_DATA) && entry->data != data)
59df055f
SR
2124 continue;
2125
2126 /* do this last, since it is the most expensive */
2127 if (glob) {
2128 kallsyms_lookup(entry->ip, NULL, NULL,
2129 NULL, str);
2130 if (!ftrace_match(str, glob, len, type))
2131 continue;
2132 }
2133
2134 hlist_del(&entry->node);
2135 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2136 }
2137 }
b6887d79 2138 __disable_ftrace_function_probe();
59df055f
SR
2139 mutex_unlock(&ftrace_lock);
2140}
2141
2142void
b6887d79 2143unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
2144 void *data)
2145{
b6887d79
SR
2146 __unregister_ftrace_function_probe(glob, ops, data,
2147 PROBE_TEST_FUNC | PROBE_TEST_DATA);
59df055f
SR
2148}
2149
2150void
b6887d79 2151unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
59df055f 2152{
b6887d79 2153 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
59df055f
SR
2154}
2155
b6887d79 2156void unregister_ftrace_function_probe_all(char *glob)
59df055f 2157{
b6887d79 2158 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
59df055f
SR
2159}
2160
f6180773
SR
2161static LIST_HEAD(ftrace_commands);
2162static DEFINE_MUTEX(ftrace_cmd_mutex);
2163
2164int register_ftrace_command(struct ftrace_func_command *cmd)
2165{
2166 struct ftrace_func_command *p;
2167 int ret = 0;
2168
2169 mutex_lock(&ftrace_cmd_mutex);
2170 list_for_each_entry(p, &ftrace_commands, list) {
2171 if (strcmp(cmd->name, p->name) == 0) {
2172 ret = -EBUSY;
2173 goto out_unlock;
2174 }
2175 }
2176 list_add(&cmd->list, &ftrace_commands);
2177 out_unlock:
2178 mutex_unlock(&ftrace_cmd_mutex);
2179
2180 return ret;
2181}
2182
2183int unregister_ftrace_command(struct ftrace_func_command *cmd)
2184{
2185 struct ftrace_func_command *p, *n;
2186 int ret = -ENODEV;
2187
2188 mutex_lock(&ftrace_cmd_mutex);
2189 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2190 if (strcmp(cmd->name, p->name) == 0) {
2191 ret = 0;
2192 list_del_init(&p->list);
2193 goto out_unlock;
2194 }
2195 }
2196 out_unlock:
2197 mutex_unlock(&ftrace_cmd_mutex);
2198
2199 return ret;
2200}
2201
64e7c440
SR
2202static int ftrace_process_regex(char *buff, int len, int enable)
2203{
f6180773 2204 char *func, *command, *next = buff;
6a24a244 2205 struct ftrace_func_command *p;
f6180773 2206 int ret = -EINVAL;
64e7c440
SR
2207
2208 func = strsep(&next, ":");
2209
2210 if (!next) {
311d16da
LZ
2211 if (ftrace_match_records(func, len, enable))
2212 return 0;
2213 return ret;
64e7c440
SR
2214 }
2215
f6180773 2216 /* command found */
64e7c440
SR
2217
2218 command = strsep(&next, ":");
2219
f6180773
SR
2220 mutex_lock(&ftrace_cmd_mutex);
2221 list_for_each_entry(p, &ftrace_commands, list) {
2222 if (strcmp(p->name, command) == 0) {
2223 ret = p->func(func, command, next, enable);
2224 goto out_unlock;
2225 }
64e7c440 2226 }
f6180773
SR
2227 out_unlock:
2228 mutex_unlock(&ftrace_cmd_mutex);
64e7c440 2229
f6180773 2230 return ret;
64e7c440
SR
2231}
2232
e309b41d 2233static ssize_t
41c52c0d
SR
2234ftrace_regex_write(struct file *file, const char __user *ubuf,
2235 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
2236{
2237 struct ftrace_iterator *iter;
689fd8b6 2238 struct trace_parser *parser;
2239 ssize_t ret, read;
5072c59f 2240
4ba7978e 2241 if (!cnt)
5072c59f
SR
2242 return 0;
2243
41c52c0d 2244 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
2245
2246 if (file->f_mode & FMODE_READ) {
2247 struct seq_file *m = file->private_data;
2248 iter = m->private;
2249 } else
2250 iter = file->private_data;
2251
689fd8b6 2252 parser = &iter->parser;
2253 read = trace_get_user(parser, ubuf, cnt, ppos);
5072c59f 2254
4ba7978e 2255 if (read >= 0 && trace_parser_loaded(parser) &&
689fd8b6 2256 !trace_parser_cont(parser)) {
2257 ret = ftrace_process_regex(parser->buffer,
2258 parser->idx, enable);
313254a9 2259 trace_parser_clear(parser);
5072c59f 2260 if (ret)
ed146b25 2261 goto out_unlock;
eda1e328 2262 }
5072c59f 2263
5072c59f 2264 ret = read;
ed146b25 2265out_unlock:
689fd8b6 2266 mutex_unlock(&ftrace_regex_lock);
ed146b25 2267
5072c59f
SR
2268 return ret;
2269}
2270
41c52c0d
SR
2271static ssize_t
2272ftrace_filter_write(struct file *file, const char __user *ubuf,
2273 size_t cnt, loff_t *ppos)
2274{
2275 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2276}
2277
2278static ssize_t
2279ftrace_notrace_write(struct file *file, const char __user *ubuf,
2280 size_t cnt, loff_t *ppos)
2281{
2282 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2283}
2284
2285static void
2286ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
2287{
2288 if (unlikely(ftrace_disabled))
2289 return;
2290
2291 mutex_lock(&ftrace_regex_lock);
2292 if (reset)
2293 ftrace_filter_reset(enable);
2294 if (buf)
7f24b31b 2295 ftrace_match_records(buf, len, enable);
41c52c0d
SR
2296 mutex_unlock(&ftrace_regex_lock);
2297}
2298
77a2b37d
SR
2299/**
2300 * ftrace_set_filter - set a function to filter on in ftrace
2301 * @buf - the string that holds the function filter text.
2302 * @len - the length of the string.
2303 * @reset - non zero to reset all filters before applying this filter.
2304 *
2305 * Filters denote which functions should be enabled when tracing is enabled.
2306 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2307 */
e309b41d 2308void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 2309{
41c52c0d
SR
2310 ftrace_set_regex(buf, len, reset, 1);
2311}
4eebcc81 2312
41c52c0d
SR
2313/**
2314 * ftrace_set_notrace - set a function to not trace in ftrace
2315 * @buf - the string that holds the function notrace text.
2316 * @len - the length of the string.
2317 * @reset - non zero to reset all filters before applying this filter.
2318 *
2319 * Notrace Filters denote which functions should not be enabled when tracing
2320 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2321 * for tracing.
2322 */
2323void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2324{
2325 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
2326}
2327
2af15d6a
SR
2328/*
2329 * command line interface to allow users to set filters on boot up.
2330 */
2331#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
2332static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2333static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2334
2335static int __init set_ftrace_notrace(char *str)
2336{
2337 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2338 return 1;
2339}
2340__setup("ftrace_notrace=", set_ftrace_notrace);
2341
2342static int __init set_ftrace_filter(char *str)
2343{
2344 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2345 return 1;
2346}
2347__setup("ftrace_filter=", set_ftrace_filter);
2348
369bc18f 2349#ifdef CONFIG_FUNCTION_GRAPH_TRACER
f6060f46 2350static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
801c29fd
SR
2351static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
2352
369bc18f
SA
2353static int __init set_graph_function(char *str)
2354{
06f43d66 2355 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
369bc18f
SA
2356 return 1;
2357}
2358__setup("ftrace_graph_filter=", set_graph_function);
2359
2360static void __init set_ftrace_early_graph(char *buf)
2361{
2362 int ret;
2363 char *func;
2364
2365 while (buf) {
2366 func = strsep(&buf, ",");
2367 /* we allow only one expression at a time */
2368 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2369 func);
2370 if (ret)
2371 printk(KERN_DEBUG "ftrace: function %s not "
2372 "traceable\n", func);
2373 }
2374}
2375#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2376
2af15d6a
SR
2377static void __init set_ftrace_early_filter(char *buf, int enable)
2378{
2379 char *func;
2380
2381 while (buf) {
2382 func = strsep(&buf, ",");
2383 ftrace_set_regex(func, strlen(func), 0, enable);
2384 }
2385}
2386
2387static void __init set_ftrace_early_filters(void)
2388{
2389 if (ftrace_filter_buf[0])
2390 set_ftrace_early_filter(ftrace_filter_buf, 1);
2391 if (ftrace_notrace_buf[0])
2392 set_ftrace_early_filter(ftrace_notrace_buf, 0);
369bc18f
SA
2393#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2394 if (ftrace_graph_buf[0])
2395 set_ftrace_early_graph(ftrace_graph_buf);
2396#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2af15d6a
SR
2397}
2398
e309b41d 2399static int
41c52c0d 2400ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
2401{
2402 struct seq_file *m = (struct seq_file *)file->private_data;
2403 struct ftrace_iterator *iter;
689fd8b6 2404 struct trace_parser *parser;
5072c59f 2405
41c52c0d 2406 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
2407 if (file->f_mode & FMODE_READ) {
2408 iter = m->private;
2409
2410 seq_release(inode, file);
2411 } else
2412 iter = file->private_data;
2413
689fd8b6 2414 parser = &iter->parser;
2415 if (trace_parser_loaded(parser)) {
2416 parser->buffer[parser->idx] = 0;
2417 ftrace_match_records(parser->buffer, parser->idx, enable);
5072c59f
SR
2418 }
2419
e6ea44e9 2420 mutex_lock(&ftrace_lock);
ee02a2e5 2421 if (ftrace_start_up && ftrace_enabled)
5072c59f 2422 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
e6ea44e9 2423 mutex_unlock(&ftrace_lock);
5072c59f 2424
689fd8b6 2425 trace_parser_put(parser);
5072c59f 2426 kfree(iter);
689fd8b6 2427
41c52c0d 2428 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
2429 return 0;
2430}
2431
41c52c0d
SR
2432static int
2433ftrace_filter_release(struct inode *inode, struct file *file)
2434{
2435 return ftrace_regex_release(inode, file, 1);
2436}
2437
2438static int
2439ftrace_notrace_release(struct inode *inode, struct file *file)
2440{
2441 return ftrace_regex_release(inode, file, 0);
2442}
2443
5e2336a0 2444static const struct file_operations ftrace_avail_fops = {
5072c59f
SR
2445 .open = ftrace_avail_open,
2446 .read = seq_read,
2447 .llseek = seq_lseek,
3be04b47 2448 .release = seq_release_private,
5072c59f
SR
2449};
2450
5e2336a0 2451static const struct file_operations ftrace_failures_fops = {
eb9a7bf0
AS
2452 .open = ftrace_failures_open,
2453 .read = seq_read,
2454 .llseek = seq_lseek,
3be04b47 2455 .release = seq_release_private,
eb9a7bf0
AS
2456};
2457
5e2336a0 2458static const struct file_operations ftrace_filter_fops = {
5072c59f 2459 .open = ftrace_filter_open,
850a80cf 2460 .read = seq_read,
5072c59f 2461 .write = ftrace_filter_write,
98c4fd04 2462 .llseek = ftrace_regex_lseek,
5072c59f
SR
2463 .release = ftrace_filter_release,
2464};
2465
5e2336a0 2466static const struct file_operations ftrace_notrace_fops = {
41c52c0d 2467 .open = ftrace_notrace_open,
850a80cf 2468 .read = seq_read,
41c52c0d
SR
2469 .write = ftrace_notrace_write,
2470 .llseek = ftrace_regex_lseek,
2471 .release = ftrace_notrace_release,
2472};
2473
ea4e2bc4
SR
2474#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2475
2476static DEFINE_MUTEX(graph_lock);
2477
2478int ftrace_graph_count;
c7c6b1fe 2479int ftrace_graph_filter_enabled;
ea4e2bc4
SR
2480unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2481
2482static void *
85951842 2483__g_next(struct seq_file *m, loff_t *pos)
ea4e2bc4 2484{
85951842 2485 if (*pos >= ftrace_graph_count)
ea4e2bc4 2486 return NULL;
a4ec5e0c 2487 return &ftrace_graph_funcs[*pos];
85951842 2488}
ea4e2bc4 2489
85951842
LZ
2490static void *
2491g_next(struct seq_file *m, void *v, loff_t *pos)
2492{
2493 (*pos)++;
2494 return __g_next(m, pos);
ea4e2bc4
SR
2495}
2496
2497static void *g_start(struct seq_file *m, loff_t *pos)
2498{
ea4e2bc4
SR
2499 mutex_lock(&graph_lock);
2500
f9349a8f 2501 /* Nothing, tell g_show to print all functions are enabled */
c7c6b1fe 2502 if (!ftrace_graph_filter_enabled && !*pos)
f9349a8f
FW
2503 return (void *)1;
2504
85951842 2505 return __g_next(m, pos);
ea4e2bc4
SR
2506}
2507
2508static void g_stop(struct seq_file *m, void *p)
2509{
2510 mutex_unlock(&graph_lock);
2511}
2512
2513static int g_show(struct seq_file *m, void *v)
2514{
2515 unsigned long *ptr = v;
ea4e2bc4
SR
2516
2517 if (!ptr)
2518 return 0;
2519
f9349a8f
FW
2520 if (ptr == (unsigned long *)1) {
2521 seq_printf(m, "#### all functions enabled ####\n");
2522 return 0;
2523 }
2524
b375a11a 2525 seq_printf(m, "%ps\n", (void *)*ptr);
ea4e2bc4
SR
2526
2527 return 0;
2528}
2529
88e9d34c 2530static const struct seq_operations ftrace_graph_seq_ops = {
ea4e2bc4
SR
2531 .start = g_start,
2532 .next = g_next,
2533 .stop = g_stop,
2534 .show = g_show,
2535};
2536
2537static int
2538ftrace_graph_open(struct inode *inode, struct file *file)
2539{
2540 int ret = 0;
2541
2542 if (unlikely(ftrace_disabled))
2543 return -ENODEV;
2544
2545 mutex_lock(&graph_lock);
2546 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 2547 (file->f_flags & O_TRUNC)) {
c7c6b1fe 2548 ftrace_graph_filter_enabled = 0;
ea4e2bc4
SR
2549 ftrace_graph_count = 0;
2550 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2551 }
a4ec5e0c 2552 mutex_unlock(&graph_lock);
ea4e2bc4 2553
a4ec5e0c 2554 if (file->f_mode & FMODE_READ)
ea4e2bc4 2555 ret = seq_open(file, &ftrace_graph_seq_ops);
ea4e2bc4
SR
2556
2557 return ret;
2558}
2559
87827111
LZ
2560static int
2561ftrace_graph_release(struct inode *inode, struct file *file)
2562{
2563 if (file->f_mode & FMODE_READ)
2564 seq_release(inode, file);
2565 return 0;
2566}
2567
ea4e2bc4 2568static int
f9349a8f 2569ftrace_set_func(unsigned long *array, int *idx, char *buffer)
ea4e2bc4 2570{
ea4e2bc4
SR
2571 struct dyn_ftrace *rec;
2572 struct ftrace_page *pg;
f9349a8f 2573 int search_len;
c7c6b1fe 2574 int fail = 1;
f9349a8f
FW
2575 int type, not;
2576 char *search;
2577 bool exists;
2578 int i;
ea4e2bc4
SR
2579
2580 if (ftrace_disabled)
2581 return -ENODEV;
2582
f9349a8f 2583 /* decode regex */
3f6fe06d 2584 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
c7c6b1fe
LZ
2585 if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
2586 return -EBUSY;
f9349a8f
FW
2587
2588 search_len = strlen(search);
2589
52baf119 2590 mutex_lock(&ftrace_lock);
265c831c
SR
2591 do_for_each_ftrace_rec(pg, rec) {
2592
2593 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2594 continue;
2595
f9349a8f 2596 if (ftrace_match_record(rec, search, search_len, type)) {
c7c6b1fe 2597 /* if it is in the array */
f9349a8f 2598 exists = false;
c7c6b1fe 2599 for (i = 0; i < *idx; i++) {
f9349a8f
FW
2600 if (array[i] == rec->ip) {
2601 exists = true;
265c831c
SR
2602 break;
2603 }
c7c6b1fe
LZ
2604 }
2605
2606 if (!not) {
2607 fail = 0;
2608 if (!exists) {
2609 array[(*idx)++] = rec->ip;
2610 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2611 goto out;
2612 }
2613 } else {
2614 if (exists) {
2615 array[i] = array[--(*idx)];
2616 array[*idx] = 0;
2617 fail = 0;
2618 }
2619 }
ea4e2bc4 2620 }
265c831c 2621 } while_for_each_ftrace_rec();
c7c6b1fe 2622out:
52baf119 2623 mutex_unlock(&ftrace_lock);
ea4e2bc4 2624
c7c6b1fe
LZ
2625 if (fail)
2626 return -EINVAL;
2627
2628 ftrace_graph_filter_enabled = 1;
2629 return 0;
ea4e2bc4
SR
2630}
2631
2632static ssize_t
2633ftrace_graph_write(struct file *file, const char __user *ubuf,
2634 size_t cnt, loff_t *ppos)
2635{
689fd8b6 2636 struct trace_parser parser;
4ba7978e 2637 ssize_t read, ret;
ea4e2bc4 2638
c7c6b1fe 2639 if (!cnt)
ea4e2bc4
SR
2640 return 0;
2641
2642 mutex_lock(&graph_lock);
2643
689fd8b6 2644 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
2645 ret = -ENOMEM;
1eb90f13 2646 goto out_unlock;
ea4e2bc4
SR
2647 }
2648
689fd8b6 2649 read = trace_get_user(&parser, ubuf, cnt, ppos);
ea4e2bc4 2650
4ba7978e 2651 if (read >= 0 && trace_parser_loaded((&parser))) {
689fd8b6 2652 parser.buffer[parser.idx] = 0;
2653
2654 /* we allow only one expression at a time */
a4ec5e0c 2655 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
689fd8b6 2656 parser.buffer);
ea4e2bc4 2657 if (ret)
1eb90f13 2658 goto out_free;
ea4e2bc4 2659 }
ea4e2bc4
SR
2660
2661 ret = read;
1eb90f13
LZ
2662
2663out_free:
689fd8b6 2664 trace_parser_put(&parser);
1eb90f13 2665out_unlock:
ea4e2bc4
SR
2666 mutex_unlock(&graph_lock);
2667
2668 return ret;
2669}
2670
2671static const struct file_operations ftrace_graph_fops = {
87827111
LZ
2672 .open = ftrace_graph_open,
2673 .read = seq_read,
2674 .write = ftrace_graph_write,
2675 .release = ftrace_graph_release,
6038f373 2676 .llseek = seq_lseek,
ea4e2bc4
SR
2677};
2678#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2679
df4fc315 2680static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
5072c59f 2681{
5072c59f 2682
5452af66
FW
2683 trace_create_file("available_filter_functions", 0444,
2684 d_tracer, NULL, &ftrace_avail_fops);
5072c59f 2685
5452af66
FW
2686 trace_create_file("failures", 0444,
2687 d_tracer, NULL, &ftrace_failures_fops);
eb9a7bf0 2688
5452af66
FW
2689 trace_create_file("set_ftrace_filter", 0644, d_tracer,
2690 NULL, &ftrace_filter_fops);
41c52c0d 2691
5452af66 2692 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
41c52c0d 2693 NULL, &ftrace_notrace_fops);
ad90c0e3 2694
ea4e2bc4 2695#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5452af66 2696 trace_create_file("set_graph_function", 0444, d_tracer,
ea4e2bc4
SR
2697 NULL,
2698 &ftrace_graph_fops);
ea4e2bc4
SR
2699#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2700
5072c59f
SR
2701 return 0;
2702}
2703
5cb084bb 2704static int ftrace_process_locs(struct module *mod,
31e88909 2705 unsigned long *start,
68bf21aa
SR
2706 unsigned long *end)
2707{
2708 unsigned long *p;
2709 unsigned long addr;
68bf21aa 2710
e6ea44e9 2711 mutex_lock(&ftrace_lock);
68bf21aa
SR
2712 p = start;
2713 while (p < end) {
2714 addr = ftrace_call_adjust(*p++);
20e5227e
SR
2715 /*
2716 * Some architecture linkers will pad between
2717 * the different mcount_loc sections of different
2718 * object files to satisfy alignments.
2719 * Skip any NULL pointers.
2720 */
2721 if (!addr)
2722 continue;
68bf21aa 2723 ftrace_record_ip(addr);
68bf21aa
SR
2724 }
2725
31e88909 2726 ftrace_update_code(mod);
e6ea44e9 2727 mutex_unlock(&ftrace_lock);
68bf21aa
SR
2728
2729 return 0;
2730}
2731
93eb677d 2732#ifdef CONFIG_MODULES
e7247a15 2733void ftrace_release_mod(struct module *mod)
93eb677d
SR
2734{
2735 struct dyn_ftrace *rec;
2736 struct ftrace_page *pg;
93eb677d 2737
e7247a15 2738 if (ftrace_disabled)
93eb677d
SR
2739 return;
2740
2741 mutex_lock(&ftrace_lock);
2742 do_for_each_ftrace_rec(pg, rec) {
e7247a15 2743 if (within_module_core(rec->ip, mod)) {
93eb677d
SR
2744 /*
2745 * rec->ip is changed in ftrace_free_rec()
2746 * It should not between s and e if record was freed.
2747 */
2748 FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
2749 ftrace_free_rec(rec);
2750 }
2751 } while_for_each_ftrace_rec();
2752 mutex_unlock(&ftrace_lock);
2753}
2754
2755static void ftrace_init_module(struct module *mod,
2756 unsigned long *start, unsigned long *end)
90d595fe 2757{
00fd61ae 2758 if (ftrace_disabled || start == end)
fed1939c 2759 return;
5cb084bb 2760 ftrace_process_locs(mod, start, end);
90d595fe
SR
2761}
2762
93eb677d
SR
2763static int ftrace_module_notify(struct notifier_block *self,
2764 unsigned long val, void *data)
2765{
2766 struct module *mod = data;
2767
2768 switch (val) {
2769 case MODULE_STATE_COMING:
2770 ftrace_init_module(mod, mod->ftrace_callsites,
2771 mod->ftrace_callsites +
2772 mod->num_ftrace_callsites);
2773 break;
2774 case MODULE_STATE_GOING:
e7247a15 2775 ftrace_release_mod(mod);
93eb677d
SR
2776 break;
2777 }
2778
2779 return 0;
2780}
2781#else
2782static int ftrace_module_notify(struct notifier_block *self,
2783 unsigned long val, void *data)
2784{
2785 return 0;
2786}
2787#endif /* CONFIG_MODULES */
2788
2789struct notifier_block ftrace_module_nb = {
2790 .notifier_call = ftrace_module_notify,
2791 .priority = 0,
2792};
2793
68bf21aa
SR
2794extern unsigned long __start_mcount_loc[];
2795extern unsigned long __stop_mcount_loc[];
2796
2797void __init ftrace_init(void)
2798{
2799 unsigned long count, addr, flags;
2800 int ret;
2801
2802 /* Keep the ftrace pointer to the stub */
2803 addr = (unsigned long)ftrace_stub;
2804
2805 local_irq_save(flags);
2806 ftrace_dyn_arch_init(&addr);
2807 local_irq_restore(flags);
2808
2809 /* ftrace_dyn_arch_init places the return code in addr */
2810 if (addr)
2811 goto failed;
2812
2813 count = __stop_mcount_loc - __start_mcount_loc;
2814
2815 ret = ftrace_dyn_table_alloc(count);
2816 if (ret)
2817 goto failed;
2818
2819 last_ftrace_enabled = ftrace_enabled = 1;
2820
5cb084bb 2821 ret = ftrace_process_locs(NULL,
31e88909 2822 __start_mcount_loc,
68bf21aa
SR
2823 __stop_mcount_loc);
2824
93eb677d 2825 ret = register_module_notifier(&ftrace_module_nb);
24ed0c4b 2826 if (ret)
93eb677d
SR
2827 pr_warning("Failed to register trace ftrace module notifier\n");
2828
2af15d6a
SR
2829 set_ftrace_early_filters();
2830
68bf21aa
SR
2831 return;
2832 failed:
2833 ftrace_disabled = 1;
2834}
68bf21aa 2835
3d083395 2836#else
0b6e4d56
FW
2837
2838static int __init ftrace_nodyn_init(void)
2839{
2840 ftrace_enabled = 1;
2841 return 0;
2842}
2843device_initcall(ftrace_nodyn_init);
2844
df4fc315
SR
2845static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2846static inline void ftrace_startup_enable(int command) { }
5a45cfe1
SR
2847/* Keep as macros so we do not need to define the commands */
2848# define ftrace_startup(command) do { } while (0)
2849# define ftrace_shutdown(command) do { } while (0)
c7aafc54
IM
2850# define ftrace_startup_sysctl() do { } while (0)
2851# define ftrace_shutdown_sysctl() do { } while (0)
3d083395
SR
2852#endif /* CONFIG_DYNAMIC_FTRACE */
2853
e32d8956 2854static void clear_ftrace_swapper(void)
978f3a45
SR
2855{
2856 struct task_struct *p;
e32d8956 2857 int cpu;
978f3a45 2858
e32d8956
SR
2859 get_online_cpus();
2860 for_each_online_cpu(cpu) {
2861 p = idle_task(cpu);
978f3a45 2862 clear_tsk_trace_trace(p);
e32d8956
SR
2863 }
2864 put_online_cpus();
2865}
978f3a45 2866
e32d8956
SR
2867static void set_ftrace_swapper(void)
2868{
2869 struct task_struct *p;
2870 int cpu;
2871
2872 get_online_cpus();
2873 for_each_online_cpu(cpu) {
2874 p = idle_task(cpu);
2875 set_tsk_trace_trace(p);
2876 }
2877 put_online_cpus();
978f3a45
SR
2878}
2879
e32d8956
SR
2880static void clear_ftrace_pid(struct pid *pid)
2881{
2882 struct task_struct *p;
2883
229c4ef8 2884 rcu_read_lock();
e32d8956
SR
2885 do_each_pid_task(pid, PIDTYPE_PID, p) {
2886 clear_tsk_trace_trace(p);
2887 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8
ON
2888 rcu_read_unlock();
2889
e32d8956
SR
2890 put_pid(pid);
2891}
2892
2893static void set_ftrace_pid(struct pid *pid)
978f3a45
SR
2894{
2895 struct task_struct *p;
2896
229c4ef8 2897 rcu_read_lock();
978f3a45
SR
2898 do_each_pid_task(pid, PIDTYPE_PID, p) {
2899 set_tsk_trace_trace(p);
2900 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8 2901 rcu_read_unlock();
978f3a45
SR
2902}
2903
756d17ee 2904static void clear_ftrace_pid_task(struct pid *pid)
e32d8956 2905{
756d17ee 2906 if (pid == ftrace_swapper_pid)
e32d8956
SR
2907 clear_ftrace_swapper();
2908 else
756d17ee 2909 clear_ftrace_pid(pid);
e32d8956
SR
2910}
2911
2912static void set_ftrace_pid_task(struct pid *pid)
2913{
2914 if (pid == ftrace_swapper_pid)
2915 set_ftrace_swapper();
2916 else
2917 set_ftrace_pid(pid);
2918}
2919
756d17ee 2920static int ftrace_pid_add(int p)
df4fc315 2921{
978f3a45 2922 struct pid *pid;
756d17ee 2923 struct ftrace_pid *fpid;
2924 int ret = -EINVAL;
df4fc315 2925
756d17ee 2926 mutex_lock(&ftrace_lock);
df4fc315 2927
756d17ee 2928 if (!p)
2929 pid = ftrace_swapper_pid;
2930 else
2931 pid = find_get_pid(p);
df4fc315 2932
756d17ee 2933 if (!pid)
2934 goto out;
df4fc315 2935
756d17ee 2936 ret = 0;
df4fc315 2937
756d17ee 2938 list_for_each_entry(fpid, &ftrace_pids, list)
2939 if (fpid->pid == pid)
2940 goto out_put;
978f3a45 2941
756d17ee 2942 ret = -ENOMEM;
df4fc315 2943
756d17ee 2944 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
2945 if (!fpid)
2946 goto out_put;
df4fc315 2947
756d17ee 2948 list_add(&fpid->list, &ftrace_pids);
2949 fpid->pid = pid;
0ef8cde5 2950
756d17ee 2951 set_ftrace_pid_task(pid);
978f3a45 2952
756d17ee 2953 ftrace_update_pid_func();
2954 ftrace_startup_enable(0);
2955
2956 mutex_unlock(&ftrace_lock);
2957 return 0;
2958
2959out_put:
2960 if (pid != ftrace_swapper_pid)
2961 put_pid(pid);
978f3a45 2962
756d17ee 2963out:
2964 mutex_unlock(&ftrace_lock);
2965 return ret;
2966}
2967
2968static void ftrace_pid_reset(void)
2969{
2970 struct ftrace_pid *fpid, *safe;
978f3a45 2971
756d17ee 2972 mutex_lock(&ftrace_lock);
2973 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
2974 struct pid *pid = fpid->pid;
2975
2976 clear_ftrace_pid_task(pid);
2977
2978 list_del(&fpid->list);
2979 kfree(fpid);
df4fc315
SR
2980 }
2981
df4fc315
SR
2982 ftrace_update_pid_func();
2983 ftrace_startup_enable(0);
2984
e6ea44e9 2985 mutex_unlock(&ftrace_lock);
756d17ee 2986}
df4fc315 2987
756d17ee 2988static void *fpid_start(struct seq_file *m, loff_t *pos)
2989{
2990 mutex_lock(&ftrace_lock);
2991
2992 if (list_empty(&ftrace_pids) && (!*pos))
2993 return (void *) 1;
2994
2995 return seq_list_start(&ftrace_pids, *pos);
2996}
2997
2998static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
2999{
3000 if (v == (void *)1)
3001 return NULL;
3002
3003 return seq_list_next(v, &ftrace_pids, pos);
3004}
3005
3006static void fpid_stop(struct seq_file *m, void *p)
3007{
3008 mutex_unlock(&ftrace_lock);
3009}
3010
3011static int fpid_show(struct seq_file *m, void *v)
3012{
3013 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
3014
3015 if (v == (void *)1) {
3016 seq_printf(m, "no pid\n");
3017 return 0;
3018 }
3019
3020 if (fpid->pid == ftrace_swapper_pid)
3021 seq_printf(m, "swapper tasks\n");
3022 else
3023 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
3024
3025 return 0;
3026}
3027
3028static const struct seq_operations ftrace_pid_sops = {
3029 .start = fpid_start,
3030 .next = fpid_next,
3031 .stop = fpid_stop,
3032 .show = fpid_show,
3033};
3034
3035static int
3036ftrace_pid_open(struct inode *inode, struct file *file)
3037{
3038 int ret = 0;
3039
3040 if ((file->f_mode & FMODE_WRITE) &&
3041 (file->f_flags & O_TRUNC))
3042 ftrace_pid_reset();
3043
3044 if (file->f_mode & FMODE_READ)
3045 ret = seq_open(file, &ftrace_pid_sops);
3046
3047 return ret;
3048}
3049
df4fc315
SR
3050static ssize_t
3051ftrace_pid_write(struct file *filp, const char __user *ubuf,
3052 size_t cnt, loff_t *ppos)
3053{
457dc928 3054 char buf[64], *tmp;
df4fc315
SR
3055 long val;
3056 int ret;
3057
3058 if (cnt >= sizeof(buf))
3059 return -EINVAL;
3060
3061 if (copy_from_user(&buf, ubuf, cnt))
3062 return -EFAULT;
3063
3064 buf[cnt] = 0;
3065
756d17ee 3066 /*
3067 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3068 * to clean the filter quietly.
3069 */
457dc928
IM
3070 tmp = strstrip(buf);
3071 if (strlen(tmp) == 0)
756d17ee 3072 return 1;
3073
457dc928 3074 ret = strict_strtol(tmp, 10, &val);
df4fc315
SR
3075 if (ret < 0)
3076 return ret;
3077
756d17ee 3078 ret = ftrace_pid_add(val);
df4fc315 3079
756d17ee 3080 return ret ? ret : cnt;
3081}
df4fc315 3082
756d17ee 3083static int
3084ftrace_pid_release(struct inode *inode, struct file *file)
3085{
3086 if (file->f_mode & FMODE_READ)
3087 seq_release(inode, file);
df4fc315 3088
756d17ee 3089 return 0;
df4fc315
SR
3090}
3091
5e2336a0 3092static const struct file_operations ftrace_pid_fops = {
756d17ee 3093 .open = ftrace_pid_open,
3094 .write = ftrace_pid_write,
3095 .read = seq_read,
3096 .llseek = seq_lseek,
3097 .release = ftrace_pid_release,
df4fc315
SR
3098};
3099
3100static __init int ftrace_init_debugfs(void)
3101{
3102 struct dentry *d_tracer;
df4fc315
SR
3103
3104 d_tracer = tracing_init_dentry();
3105 if (!d_tracer)
3106 return 0;
3107
3108 ftrace_init_dyn_debugfs(d_tracer);
3109
5452af66
FW
3110 trace_create_file("set_ftrace_pid", 0644, d_tracer,
3111 NULL, &ftrace_pid_fops);
493762fc
SR
3112
3113 ftrace_profile_debugfs(d_tracer);
3114
df4fc315
SR
3115 return 0;
3116}
df4fc315
SR
3117fs_initcall(ftrace_init_debugfs);
3118
a2bb6a3d 3119/**
81adbdc0 3120 * ftrace_kill - kill ftrace
a2bb6a3d
SR
3121 *
3122 * This function should be used by panic code. It stops ftrace
3123 * but in a not so nice way. If you need to simply kill ftrace
3124 * from a non-atomic section, use ftrace_kill.
3125 */
81adbdc0 3126void ftrace_kill(void)
a2bb6a3d
SR
3127{
3128 ftrace_disabled = 1;
3129 ftrace_enabled = 0;
a2bb6a3d
SR
3130 clear_ftrace_function();
3131}
3132
16444a8a 3133/**
3d083395
SR
3134 * register_ftrace_function - register a function for profiling
3135 * @ops - ops structure that holds the function for profiling.
16444a8a 3136 *
3d083395
SR
3137 * Register a function to be called by all functions in the
3138 * kernel.
3139 *
3140 * Note: @ops->func and all the functions it calls must be labeled
3141 * with "notrace", otherwise it will go into a
3142 * recursive loop.
16444a8a 3143 */
3d083395 3144int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 3145{
b0fc494f
SR
3146 int ret;
3147
4eebcc81
SR
3148 if (unlikely(ftrace_disabled))
3149 return -1;
3150
e6ea44e9 3151 mutex_lock(&ftrace_lock);
e7d3737e 3152
b0fc494f 3153 ret = __register_ftrace_function(ops);
5a45cfe1 3154 ftrace_startup(0);
b0fc494f 3155
e6ea44e9 3156 mutex_unlock(&ftrace_lock);
b0fc494f 3157 return ret;
3d083395
SR
3158}
3159
3160/**
32632920 3161 * unregister_ftrace_function - unregister a function for profiling.
3d083395
SR
3162 * @ops - ops structure that holds the function to unregister
3163 *
3164 * Unregister a function that was added to be called by ftrace profiling.
3165 */
3166int unregister_ftrace_function(struct ftrace_ops *ops)
3167{
3168 int ret;
3169
e6ea44e9 3170 mutex_lock(&ftrace_lock);
3d083395 3171 ret = __unregister_ftrace_function(ops);
5a45cfe1 3172 ftrace_shutdown(0);
e6ea44e9 3173 mutex_unlock(&ftrace_lock);
b0fc494f
SR
3174
3175 return ret;
3176}
3177
e309b41d 3178int
b0fc494f 3179ftrace_enable_sysctl(struct ctl_table *table, int write,
8d65af78 3180 void __user *buffer, size_t *lenp,
b0fc494f
SR
3181 loff_t *ppos)
3182{
3183 int ret;
3184
4eebcc81
SR
3185 if (unlikely(ftrace_disabled))
3186 return -ENODEV;
3187
e6ea44e9 3188 mutex_lock(&ftrace_lock);
b0fc494f 3189
8d65af78 3190 ret = proc_dointvec(table, write, buffer, lenp, ppos);
b0fc494f 3191
a32c7765 3192 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
b0fc494f
SR
3193 goto out;
3194
a32c7765 3195 last_ftrace_enabled = !!ftrace_enabled;
b0fc494f
SR
3196
3197 if (ftrace_enabled) {
3198
3199 ftrace_startup_sysctl();
3200
3201 /* we are starting ftrace again */
3202 if (ftrace_list != &ftrace_list_end) {
3203 if (ftrace_list->next == &ftrace_list_end)
3204 ftrace_trace_function = ftrace_list->func;
3205 else
3206 ftrace_trace_function = ftrace_list_func;
3207 }
3208
3209 } else {
3210 /* stopping ftrace calls (just send to ftrace_stub) */
3211 ftrace_trace_function = ftrace_stub;
3212
3213 ftrace_shutdown_sysctl();
3214 }
3215
3216 out:
e6ea44e9 3217 mutex_unlock(&ftrace_lock);
3d083395 3218 return ret;
16444a8a 3219}
f17845e5 3220
fb52607a 3221#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e7d3737e 3222
597af815 3223static int ftrace_graph_active;
4a2b8dda 3224static struct notifier_block ftrace_suspend_notifier;
e7d3737e 3225
e49dc19c
SR
3226int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3227{
3228 return 0;
3229}
3230
287b6e68
FW
3231/* The callbacks that hook a function */
3232trace_func_graph_ret_t ftrace_graph_return =
3233 (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 3234trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
f201ae23
FW
3235
3236/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3237static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3238{
3239 int i;
3240 int ret = 0;
3241 unsigned long flags;
3242 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3243 struct task_struct *g, *t;
3244
3245 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3246 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3247 * sizeof(struct ftrace_ret_stack),
3248 GFP_KERNEL);
3249 if (!ret_stack_list[i]) {
3250 start = 0;
3251 end = i;
3252 ret = -ENOMEM;
3253 goto free;
3254 }
3255 }
3256
3257 read_lock_irqsave(&tasklist_lock, flags);
3258 do_each_thread(g, t) {
3259 if (start == end) {
3260 ret = -EAGAIN;
3261 goto unlock;
3262 }
3263
3264 if (t->ret_stack == NULL) {
380c4b14 3265 atomic_set(&t->tracing_graph_pause, 0);
f201ae23 3266 atomic_set(&t->trace_overrun, 0);
26c01624
SR
3267 t->curr_ret_stack = -1;
3268 /* Make sure the tasks see the -1 first: */
3269 smp_wmb();
3270 t->ret_stack = ret_stack_list[start++];
f201ae23
FW
3271 }
3272 } while_each_thread(g, t);
3273
3274unlock:
3275 read_unlock_irqrestore(&tasklist_lock, flags);
3276free:
3277 for (i = start; i < end; i++)
3278 kfree(ret_stack_list[i]);
3279 return ret;
3280}
3281
8aef2d28 3282static void
38516ab5
SR
3283ftrace_graph_probe_sched_switch(void *ignore,
3284 struct task_struct *prev, struct task_struct *next)
8aef2d28
SR
3285{
3286 unsigned long long timestamp;
3287 int index;
3288
be6f164a
SR
3289 /*
3290 * Does the user want to count the time a function was asleep.
3291 * If so, do not update the time stamps.
3292 */
3293 if (trace_flags & TRACE_ITER_SLEEP_TIME)
3294 return;
3295
8aef2d28
SR
3296 timestamp = trace_clock_local();
3297
3298 prev->ftrace_timestamp = timestamp;
3299
3300 /* only process tasks that we timestamped */
3301 if (!next->ftrace_timestamp)
3302 return;
3303
3304 /*
3305 * Update all the counters in next to make up for the
3306 * time next was sleeping.
3307 */
3308 timestamp -= next->ftrace_timestamp;
3309
3310 for (index = next->curr_ret_stack; index >= 0; index--)
3311 next->ret_stack[index].calltime += timestamp;
3312}
3313
f201ae23 3314/* Allocate a return stack for each task */
fb52607a 3315static int start_graph_tracing(void)
f201ae23
FW
3316{
3317 struct ftrace_ret_stack **ret_stack_list;
5b058bcd 3318 int ret, cpu;
f201ae23
FW
3319
3320 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3321 sizeof(struct ftrace_ret_stack *),
3322 GFP_KERNEL);
3323
3324 if (!ret_stack_list)
3325 return -ENOMEM;
3326
5b058bcd 3327 /* The cpu_boot init_task->ret_stack will never be freed */
179c498a
SR
3328 for_each_online_cpu(cpu) {
3329 if (!idle_task(cpu)->ret_stack)
868baf07 3330 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
179c498a 3331 }
5b058bcd 3332
f201ae23
FW
3333 do {
3334 ret = alloc_retstack_tasklist(ret_stack_list);
3335 } while (ret == -EAGAIN);
3336
8aef2d28 3337 if (!ret) {
38516ab5 3338 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
8aef2d28
SR
3339 if (ret)
3340 pr_info("ftrace_graph: Couldn't activate tracepoint"
3341 " probe to kernel_sched_switch\n");
3342 }
3343
f201ae23
FW
3344 kfree(ret_stack_list);
3345 return ret;
3346}
3347
4a2b8dda
FW
3348/*
3349 * Hibernation protection.
3350 * The state of the current task is too much unstable during
3351 * suspend/restore to disk. We want to protect against that.
3352 */
3353static int
3354ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3355 void *unused)
3356{
3357 switch (state) {
3358 case PM_HIBERNATION_PREPARE:
3359 pause_graph_tracing();
3360 break;
3361
3362 case PM_POST_HIBERNATION:
3363 unpause_graph_tracing();
3364 break;
3365 }
3366 return NOTIFY_DONE;
3367}
3368
287b6e68
FW
3369int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3370 trace_func_graph_ent_t entryfunc)
15e6cb36 3371{
e7d3737e
FW
3372 int ret = 0;
3373
e6ea44e9 3374 mutex_lock(&ftrace_lock);
e7d3737e 3375
05ce5818 3376 /* we currently allow only one tracer registered at a time */
597af815 3377 if (ftrace_graph_active) {
05ce5818
SR
3378 ret = -EBUSY;
3379 goto out;
3380 }
3381
4a2b8dda
FW
3382 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3383 register_pm_notifier(&ftrace_suspend_notifier);
3384
597af815 3385 ftrace_graph_active++;
fb52607a 3386 ret = start_graph_tracing();
f201ae23 3387 if (ret) {
597af815 3388 ftrace_graph_active--;
f201ae23
FW
3389 goto out;
3390 }
e53a6319 3391
287b6e68
FW
3392 ftrace_graph_return = retfunc;
3393 ftrace_graph_entry = entryfunc;
e53a6319 3394
5a45cfe1 3395 ftrace_startup(FTRACE_START_FUNC_RET);
e7d3737e
FW
3396
3397out:
e6ea44e9 3398 mutex_unlock(&ftrace_lock);
e7d3737e 3399 return ret;
15e6cb36
FW
3400}
3401
fb52607a 3402void unregister_ftrace_graph(void)
15e6cb36 3403{
e6ea44e9 3404 mutex_lock(&ftrace_lock);
e7d3737e 3405
597af815 3406 if (unlikely(!ftrace_graph_active))
2aad1b76
SR
3407 goto out;
3408
597af815 3409 ftrace_graph_active--;
287b6e68 3410 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 3411 ftrace_graph_entry = ftrace_graph_entry_stub;
5a45cfe1 3412 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
4a2b8dda 3413 unregister_pm_notifier(&ftrace_suspend_notifier);
38516ab5 3414 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
e7d3737e 3415
2aad1b76 3416 out:
e6ea44e9 3417 mutex_unlock(&ftrace_lock);
15e6cb36 3418}
f201ae23 3419
868baf07
SR
3420static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
3421
3422static void
3423graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
3424{
3425 atomic_set(&t->tracing_graph_pause, 0);
3426 atomic_set(&t->trace_overrun, 0);
3427 t->ftrace_timestamp = 0;
25985edc 3428 /* make curr_ret_stack visible before we add the ret_stack */
868baf07
SR
3429 smp_wmb();
3430 t->ret_stack = ret_stack;
3431}
3432
3433/*
3434 * Allocate a return stack for the idle task. May be the first
3435 * time through, or it may be done by CPU hotplug online.
3436 */
3437void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
3438{
3439 t->curr_ret_stack = -1;
3440 /*
3441 * The idle task has no parent, it either has its own
3442 * stack or no stack at all.
3443 */
3444 if (t->ret_stack)
3445 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
3446
3447 if (ftrace_graph_active) {
3448 struct ftrace_ret_stack *ret_stack;
3449
3450 ret_stack = per_cpu(idle_ret_stack, cpu);
3451 if (!ret_stack) {
3452 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3453 * sizeof(struct ftrace_ret_stack),
3454 GFP_KERNEL);
3455 if (!ret_stack)
3456 return;
3457 per_cpu(idle_ret_stack, cpu) = ret_stack;
3458 }
3459 graph_init_task(t, ret_stack);
3460 }
3461}
3462
f201ae23 3463/* Allocate a return stack for newly created task */
fb52607a 3464void ftrace_graph_init_task(struct task_struct *t)
f201ae23 3465{
84047e36
SR
3466 /* Make sure we do not use the parent ret_stack */
3467 t->ret_stack = NULL;
ea14eb71 3468 t->curr_ret_stack = -1;
84047e36 3469
597af815 3470 if (ftrace_graph_active) {
82310a32
SR
3471 struct ftrace_ret_stack *ret_stack;
3472
3473 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
f201ae23
FW
3474 * sizeof(struct ftrace_ret_stack),
3475 GFP_KERNEL);
82310a32 3476 if (!ret_stack)
f201ae23 3477 return;
868baf07 3478 graph_init_task(t, ret_stack);
84047e36 3479 }
f201ae23
FW
3480}
3481
fb52607a 3482void ftrace_graph_exit_task(struct task_struct *t)
f201ae23 3483{
eae849ca
FW
3484 struct ftrace_ret_stack *ret_stack = t->ret_stack;
3485
f201ae23 3486 t->ret_stack = NULL;
eae849ca
FW
3487 /* NULL must become visible to IRQs before we free it: */
3488 barrier();
3489
3490 kfree(ret_stack);
f201ae23 3491}
14a866c5
SR
3492
3493void ftrace_graph_stop(void)
3494{
3495 ftrace_stop();
3496}
15e6cb36 3497#endif