trace: stop all recording to ring buffer on ftrace_dump
[linux-block.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f
SR
19#include <linux/seq_file.h>
20#include <linux/debugfs.h>
3d083395 21#include <linux/hardirq.h>
2d8b820b 22#include <linux/kthread.h>
5072c59f 23#include <linux/uaccess.h>
f22f9a89 24#include <linux/kprobes.h>
2d8b820b 25#include <linux/ftrace.h>
b0fc494f 26#include <linux/sysctl.h>
5072c59f 27#include <linux/ctype.h>
3d083395
SR
28#include <linux/list.h>
29
395a59d0
AS
30#include <asm/ftrace.h>
31
3d083395 32#include "trace.h"
16444a8a 33
6912896e
SR
34#define FTRACE_WARN_ON(cond) \
35 do { \
36 if (WARN_ON(cond)) \
37 ftrace_kill(); \
38 } while (0)
39
40#define FTRACE_WARN_ON_ONCE(cond) \
41 do { \
42 if (WARN_ON_ONCE(cond)) \
43 ftrace_kill(); \
44 } while (0)
45
4eebcc81
SR
46/* ftrace_enabled is a method to turn ftrace on or off */
47int ftrace_enabled __read_mostly;
d61f82d0 48static int last_ftrace_enabled;
b0fc494f 49
0ef8cde5 50/* set when tracing only a pid */
978f3a45 51struct pid *ftrace_pid_trace;
21bbecda 52static struct pid * const ftrace_swapper_pid = &init_struct_pid;
df4fc315 53
60a7ecf4
SR
54/* Quick disabling of function tracer. */
55int function_trace_stop;
56
4eebcc81
SR
57/*
58 * ftrace_disabled is set when an anomaly is discovered.
59 * ftrace_disabled is much stronger than ftrace_enabled.
60 */
61static int ftrace_disabled __read_mostly;
62
3d083395 63static DEFINE_SPINLOCK(ftrace_lock);
b0fc494f 64static DEFINE_MUTEX(ftrace_sysctl_lock);
df4fc315 65static DEFINE_MUTEX(ftrace_start_lock);
b0fc494f 66
16444a8a
ACM
67static struct ftrace_ops ftrace_list_end __read_mostly =
68{
69 .func = ftrace_stub,
70};
71
72static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
73ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
60a7ecf4 74ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
df4fc315 75ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
16444a8a 76
f2252935 77static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
78{
79 struct ftrace_ops *op = ftrace_list;
80
81 /* in case someone actually ports this to alpha! */
82 read_barrier_depends();
83
84 while (op != &ftrace_list_end) {
85 /* silly alpha */
86 read_barrier_depends();
87 op->func(ip, parent_ip);
88 op = op->next;
89 };
90}
91
df4fc315
SR
92static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
93{
0ef8cde5 94 if (!test_tsk_trace_trace(current))
df4fc315
SR
95 return;
96
97 ftrace_pid_function(ip, parent_ip);
98}
99
100static void set_ftrace_pid_function(ftrace_func_t func)
101{
102 /* do not set ftrace_pid_function to itself! */
103 if (func != ftrace_pid_func)
104 ftrace_pid_function = func;
105}
106
16444a8a 107/**
3d083395 108 * clear_ftrace_function - reset the ftrace function
16444a8a 109 *
3d083395
SR
110 * This NULLs the ftrace function and in essence stops
111 * tracing. There may be lag
16444a8a 112 */
3d083395 113void clear_ftrace_function(void)
16444a8a 114{
3d083395 115 ftrace_trace_function = ftrace_stub;
60a7ecf4 116 __ftrace_trace_function = ftrace_stub;
df4fc315 117 ftrace_pid_function = ftrace_stub;
3d083395
SR
118}
119
60a7ecf4
SR
120#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
121/*
122 * For those archs that do not test ftrace_trace_stop in their
123 * mcount call site, we need to do it from C.
124 */
125static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
126{
127 if (function_trace_stop)
128 return;
129
130 __ftrace_trace_function(ip, parent_ip);
131}
132#endif
133
e309b41d 134static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395 135{
99ecdc43 136 /* should not be called from interrupt context */
3d083395 137 spin_lock(&ftrace_lock);
16444a8a 138
16444a8a
ACM
139 ops->next = ftrace_list;
140 /*
141 * We are entering ops into the ftrace_list but another
142 * CPU might be walking that list. We need to make sure
143 * the ops->next pointer is valid before another CPU sees
144 * the ops pointer included into the ftrace_list.
145 */
146 smp_wmb();
147 ftrace_list = ops;
3d083395 148
b0fc494f 149 if (ftrace_enabled) {
df4fc315
SR
150 ftrace_func_t func;
151
152 if (ops->next == &ftrace_list_end)
153 func = ops->func;
154 else
155 func = ftrace_list_func;
156
978f3a45 157 if (ftrace_pid_trace) {
df4fc315
SR
158 set_ftrace_pid_function(func);
159 func = ftrace_pid_func;
160 }
161
b0fc494f
SR
162 /*
163 * For one func, simply call it directly.
164 * For more than one func, call the chain.
165 */
60a7ecf4 166#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
df4fc315 167 ftrace_trace_function = func;
60a7ecf4 168#else
df4fc315 169 __ftrace_trace_function = func;
60a7ecf4
SR
170 ftrace_trace_function = ftrace_test_stop_func;
171#endif
b0fc494f 172 }
3d083395
SR
173
174 spin_unlock(&ftrace_lock);
16444a8a
ACM
175
176 return 0;
177}
178
e309b41d 179static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 180{
16444a8a
ACM
181 struct ftrace_ops **p;
182 int ret = 0;
183
99ecdc43 184 /* should not be called from interrupt context */
3d083395 185 spin_lock(&ftrace_lock);
16444a8a
ACM
186
187 /*
3d083395
SR
188 * If we are removing the last function, then simply point
189 * to the ftrace_stub.
16444a8a
ACM
190 */
191 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
192 ftrace_trace_function = ftrace_stub;
193 ftrace_list = &ftrace_list_end;
194 goto out;
195 }
196
197 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
198 if (*p == ops)
199 break;
200
201 if (*p != ops) {
202 ret = -1;
203 goto out;
204 }
205
206 *p = (*p)->next;
207
b0fc494f
SR
208 if (ftrace_enabled) {
209 /* If we only have one func left, then call that directly */
df4fc315
SR
210 if (ftrace_list->next == &ftrace_list_end) {
211 ftrace_func_t func = ftrace_list->func;
212
978f3a45 213 if (ftrace_pid_trace) {
df4fc315
SR
214 set_ftrace_pid_function(func);
215 func = ftrace_pid_func;
216 }
217#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
218 ftrace_trace_function = func;
219#else
220 __ftrace_trace_function = func;
221#endif
222 }
b0fc494f 223 }
16444a8a
ACM
224
225 out:
3d083395
SR
226 spin_unlock(&ftrace_lock);
227
228 return ret;
229}
230
df4fc315
SR
231static void ftrace_update_pid_func(void)
232{
233 ftrace_func_t func;
234
235 /* should not be called from interrupt context */
236 spin_lock(&ftrace_lock);
237
238 if (ftrace_trace_function == ftrace_stub)
239 goto out;
240
241 func = ftrace_trace_function;
242
978f3a45 243 if (ftrace_pid_trace) {
df4fc315
SR
244 set_ftrace_pid_function(func);
245 func = ftrace_pid_func;
246 } else {
66eafebc
LW
247 if (func == ftrace_pid_func)
248 func = ftrace_pid_function;
df4fc315
SR
249 }
250
251#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
252 ftrace_trace_function = func;
253#else
254 __ftrace_trace_function = func;
255#endif
256
257 out:
258 spin_unlock(&ftrace_lock);
259}
260
3d083395 261#ifdef CONFIG_DYNAMIC_FTRACE
99ecdc43 262#ifndef CONFIG_FTRACE_MCOUNT_RECORD
cb7be3b2 263# error Dynamic ftrace depends on MCOUNT_RECORD
99ecdc43
SR
264#endif
265
d61f82d0
SR
266enum {
267 FTRACE_ENABLE_CALLS = (1 << 0),
268 FTRACE_DISABLE_CALLS = (1 << 1),
269 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
270 FTRACE_ENABLE_MCOUNT = (1 << 3),
271 FTRACE_DISABLE_MCOUNT = (1 << 4),
5a45cfe1
SR
272 FTRACE_START_FUNC_RET = (1 << 5),
273 FTRACE_STOP_FUNC_RET = (1 << 6),
d61f82d0
SR
274};
275
5072c59f
SR
276static int ftrace_filtered;
277
08f5ac90 278static LIST_HEAD(ftrace_new_addrs);
3d083395 279
41c52c0d 280static DEFINE_MUTEX(ftrace_regex_lock);
3d083395 281
3c1720f0
SR
282struct ftrace_page {
283 struct ftrace_page *next;
431aa3fb 284 int index;
3c1720f0 285 struct dyn_ftrace records[];
aa5e5cea 286};
3c1720f0
SR
287
288#define ENTRIES_PER_PAGE \
289 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
290
291/* estimate from running different kernels */
292#define NR_TO_INIT 10000
293
294static struct ftrace_page *ftrace_pages_start;
295static struct ftrace_page *ftrace_pages;
296
37ad5084
SR
297static struct dyn_ftrace *ftrace_free_records;
298
ecea656d
AS
299
300#ifdef CONFIG_KPROBES
f17845e5
IM
301
302static int frozen_record_count;
303
ecea656d
AS
304static inline void freeze_record(struct dyn_ftrace *rec)
305{
306 if (!(rec->flags & FTRACE_FL_FROZEN)) {
307 rec->flags |= FTRACE_FL_FROZEN;
308 frozen_record_count++;
309 }
310}
311
312static inline void unfreeze_record(struct dyn_ftrace *rec)
313{
314 if (rec->flags & FTRACE_FL_FROZEN) {
315 rec->flags &= ~FTRACE_FL_FROZEN;
316 frozen_record_count--;
317 }
318}
319
320static inline int record_frozen(struct dyn_ftrace *rec)
321{
322 return rec->flags & FTRACE_FL_FROZEN;
323}
324#else
325# define freeze_record(rec) ({ 0; })
326# define unfreeze_record(rec) ({ 0; })
327# define record_frozen(rec) ({ 0; })
328#endif /* CONFIG_KPROBES */
329
e309b41d 330static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084 331{
37ad5084
SR
332 rec->ip = (unsigned long)ftrace_free_records;
333 ftrace_free_records = rec;
334 rec->flags |= FTRACE_FL_FREE;
335}
336
fed1939c
SR
337void ftrace_release(void *start, unsigned long size)
338{
339 struct dyn_ftrace *rec;
340 struct ftrace_page *pg;
341 unsigned long s = (unsigned long)start;
342 unsigned long e = s + size;
343 int i;
344
00fd61ae 345 if (ftrace_disabled || !start)
fed1939c
SR
346 return;
347
99ecdc43 348 /* should not be called from interrupt context */
fed1939c
SR
349 spin_lock(&ftrace_lock);
350
351 for (pg = ftrace_pages_start; pg; pg = pg->next) {
352 for (i = 0; i < pg->index; i++) {
353 rec = &pg->records[i];
354
355 if ((rec->ip >= s) && (rec->ip < e))
356 ftrace_free_rec(rec);
357 }
358 }
359 spin_unlock(&ftrace_lock);
fed1939c
SR
360}
361
e309b41d 362static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 363{
37ad5084
SR
364 struct dyn_ftrace *rec;
365
366 /* First check for freed records */
367 if (ftrace_free_records) {
368 rec = ftrace_free_records;
369
37ad5084 370 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
6912896e 371 FTRACE_WARN_ON_ONCE(1);
37ad5084
SR
372 ftrace_free_records = NULL;
373 return NULL;
374 }
375
376 ftrace_free_records = (void *)rec->ip;
377 memset(rec, 0, sizeof(*rec));
378 return rec;
379 }
380
3c1720f0 381 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
08f5ac90
SR
382 if (!ftrace_pages->next) {
383 /* allocate another page */
384 ftrace_pages->next =
385 (void *)get_zeroed_page(GFP_KERNEL);
386 if (!ftrace_pages->next)
387 return NULL;
388 }
3c1720f0
SR
389 ftrace_pages = ftrace_pages->next;
390 }
391
392 return &ftrace_pages->records[ftrace_pages->index++];
393}
394
08f5ac90 395static struct dyn_ftrace *
d61f82d0 396ftrace_record_ip(unsigned long ip)
3d083395 397{
08f5ac90 398 struct dyn_ftrace *rec;
3d083395 399
f3c7ac40 400 if (ftrace_disabled)
08f5ac90 401 return NULL;
3d083395 402
08f5ac90
SR
403 rec = ftrace_alloc_dyn_node(ip);
404 if (!rec)
405 return NULL;
3d083395 406
08f5ac90 407 rec->ip = ip;
3d083395 408
08f5ac90 409 list_add(&rec->list, &ftrace_new_addrs);
3d083395 410
08f5ac90 411 return rec;
3d083395
SR
412}
413
b17e8a37
SR
414static void print_ip_ins(const char *fmt, unsigned char *p)
415{
416 int i;
417
418 printk(KERN_CONT "%s", fmt);
419
420 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
421 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
422}
423
31e88909 424static void ftrace_bug(int failed, unsigned long ip)
b17e8a37
SR
425{
426 switch (failed) {
427 case -EFAULT:
428 FTRACE_WARN_ON_ONCE(1);
429 pr_info("ftrace faulted on modifying ");
430 print_ip_sym(ip);
431 break;
432 case -EINVAL:
433 FTRACE_WARN_ON_ONCE(1);
434 pr_info("ftrace failed to modify ");
435 print_ip_sym(ip);
b17e8a37 436 print_ip_ins(" actual: ", (unsigned char *)ip);
b17e8a37
SR
437 printk(KERN_CONT "\n");
438 break;
439 case -EPERM:
440 FTRACE_WARN_ON_ONCE(1);
441 pr_info("ftrace faulted on writing ");
442 print_ip_sym(ip);
443 break;
444 default:
445 FTRACE_WARN_ON_ONCE(1);
446 pr_info("ftrace faulted on unknown error ");
447 print_ip_sym(ip);
448 }
449}
450
3c1720f0 451
0eb96701 452static int
31e88909 453__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
5072c59f 454{
41c52c0d 455 unsigned long ip, fl;
e7d3737e
FW
456 unsigned long ftrace_addr;
457
f0001207 458 ftrace_addr = (unsigned long)FTRACE_ADDR;
5072c59f
SR
459
460 ip = rec->ip;
461
982c350b
SR
462 /*
463 * If this record is not to be traced and
464 * it is not enabled then do nothing.
465 *
466 * If this record is not to be traced and
467 * it is enabled then disabled it.
468 *
469 */
470 if (rec->flags & FTRACE_FL_NOTRACE) {
471 if (rec->flags & FTRACE_FL_ENABLED)
472 rec->flags &= ~FTRACE_FL_ENABLED;
473 else
474 return 0;
475
476 } else if (ftrace_filtered && enable) {
5072c59f 477 /*
982c350b 478 * Filtering is on:
5072c59f 479 */
a4500b84 480
982c350b 481 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
5072c59f 482
982c350b
SR
483 /* Record is filtered and enabled, do nothing */
484 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
0eb96701 485 return 0;
5072c59f 486
982c350b
SR
487 /* Record is not filtered and is not enabled do nothing */
488 if (!fl)
489 return 0;
490
491 /* Record is not filtered but enabled, disable it */
492 if (fl == FTRACE_FL_ENABLED)
5072c59f 493 rec->flags &= ~FTRACE_FL_ENABLED;
982c350b
SR
494 else
495 /* Otherwise record is filtered but not enabled, enable it */
5072c59f 496 rec->flags |= FTRACE_FL_ENABLED;
5072c59f 497 } else {
982c350b 498 /* Disable or not filtered */
5072c59f 499
41c52c0d 500 if (enable) {
982c350b 501 /* if record is enabled, do nothing */
5072c59f 502 if (rec->flags & FTRACE_FL_ENABLED)
0eb96701 503 return 0;
982c350b 504
5072c59f 505 rec->flags |= FTRACE_FL_ENABLED;
982c350b 506
5072c59f 507 } else {
982c350b
SR
508
509 /* if record is not enabled do nothing */
5072c59f 510 if (!(rec->flags & FTRACE_FL_ENABLED))
0eb96701 511 return 0;
982c350b 512
5072c59f
SR
513 rec->flags &= ~FTRACE_FL_ENABLED;
514 }
515 }
516
982c350b 517 if (rec->flags & FTRACE_FL_ENABLED)
e7d3737e 518 return ftrace_make_call(rec, ftrace_addr);
31e88909 519 else
e7d3737e 520 return ftrace_make_nop(NULL, rec, ftrace_addr);
5072c59f
SR
521}
522
e309b41d 523static void ftrace_replace_code(int enable)
3c1720f0 524{
0eb96701 525 int i, failed;
3c1720f0
SR
526 struct dyn_ftrace *rec;
527 struct ftrace_page *pg;
3c1720f0 528
3c1720f0
SR
529 for (pg = ftrace_pages_start; pg; pg = pg->next) {
530 for (i = 0; i < pg->index; i++) {
531 rec = &pg->records[i];
532
918c1154
SR
533 /*
534 * Skip over free records and records that have
535 * failed.
536 */
537 if (rec->flags & FTRACE_FL_FREE ||
538 rec->flags & FTRACE_FL_FAILED)
3c1720f0
SR
539 continue;
540
f22f9a89 541 /* ignore updates to this record's mcount site */
98a05ed4
AS
542 if (get_kprobe((void *)rec->ip)) {
543 freeze_record(rec);
f22f9a89 544 continue;
98a05ed4
AS
545 } else {
546 unfreeze_record(rec);
547 }
f22f9a89 548
31e88909 549 failed = __ftrace_replace_code(rec, enable);
0eb96701
AS
550 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
551 rec->flags |= FTRACE_FL_FAILED;
552 if ((system_state == SYSTEM_BOOTING) ||
34078a5e 553 !core_kernel_text(rec->ip)) {
0eb96701 554 ftrace_free_rec(rec);
b17e8a37 555 } else
31e88909 556 ftrace_bug(failed, rec->ip);
0eb96701 557 }
3c1720f0
SR
558 }
559 }
560}
561
492a7ea5 562static int
31e88909 563ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
3c1720f0
SR
564{
565 unsigned long ip;
593eb8a2 566 int ret;
3c1720f0
SR
567
568 ip = rec->ip;
569
25aac9dc 570 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
593eb8a2 571 if (ret) {
31e88909 572 ftrace_bug(ret, ip);
3c1720f0 573 rec->flags |= FTRACE_FL_FAILED;
492a7ea5 574 return 0;
37ad5084 575 }
492a7ea5 576 return 1;
3c1720f0
SR
577}
578
e309b41d 579static int __ftrace_modify_code(void *data)
3d083395 580{
d61f82d0
SR
581 int *command = data;
582
a3583244 583 if (*command & FTRACE_ENABLE_CALLS)
d61f82d0 584 ftrace_replace_code(1);
a3583244 585 else if (*command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
586 ftrace_replace_code(0);
587
588 if (*command & FTRACE_UPDATE_TRACE_FUNC)
589 ftrace_update_ftrace_func(ftrace_trace_function);
590
5a45cfe1
SR
591 if (*command & FTRACE_START_FUNC_RET)
592 ftrace_enable_ftrace_graph_caller();
593 else if (*command & FTRACE_STOP_FUNC_RET)
594 ftrace_disable_ftrace_graph_caller();
595
d61f82d0 596 return 0;
3d083395
SR
597}
598
e309b41d 599static void ftrace_run_update_code(int command)
3d083395 600{
784e2d76 601 stop_machine(__ftrace_modify_code, &command, NULL);
3d083395
SR
602}
603
d61f82d0 604static ftrace_func_t saved_ftrace_func;
60a7ecf4 605static int ftrace_start_up;
df4fc315
SR
606
607static void ftrace_startup_enable(int command)
608{
609 if (saved_ftrace_func != ftrace_trace_function) {
610 saved_ftrace_func = ftrace_trace_function;
611 command |= FTRACE_UPDATE_TRACE_FUNC;
612 }
613
614 if (!command || !ftrace_enabled)
615 return;
616
617 ftrace_run_update_code(command);
618}
d61f82d0 619
5a45cfe1 620static void ftrace_startup(int command)
3d083395 621{
4eebcc81
SR
622 if (unlikely(ftrace_disabled))
623 return;
624
cb7be3b2 625 mutex_lock(&ftrace_start_lock);
60a7ecf4 626 ftrace_start_up++;
982c350b 627 command |= FTRACE_ENABLE_CALLS;
d61f82d0 628
df4fc315 629 ftrace_startup_enable(command);
3d083395 630
cb7be3b2 631 mutex_unlock(&ftrace_start_lock);
3d083395
SR
632}
633
5a45cfe1 634static void ftrace_shutdown(int command)
3d083395 635{
4eebcc81
SR
636 if (unlikely(ftrace_disabled))
637 return;
638
cb7be3b2 639 mutex_lock(&ftrace_start_lock);
60a7ecf4
SR
640 ftrace_start_up--;
641 if (!ftrace_start_up)
d61f82d0 642 command |= FTRACE_DISABLE_CALLS;
3d083395 643
d61f82d0
SR
644 if (saved_ftrace_func != ftrace_trace_function) {
645 saved_ftrace_func = ftrace_trace_function;
646 command |= FTRACE_UPDATE_TRACE_FUNC;
647 }
3d083395 648
d61f82d0
SR
649 if (!command || !ftrace_enabled)
650 goto out;
651
652 ftrace_run_update_code(command);
3d083395 653 out:
cb7be3b2 654 mutex_unlock(&ftrace_start_lock);
3d083395
SR
655}
656
e309b41d 657static void ftrace_startup_sysctl(void)
b0fc494f 658{
d61f82d0
SR
659 int command = FTRACE_ENABLE_MCOUNT;
660
4eebcc81
SR
661 if (unlikely(ftrace_disabled))
662 return;
663
cb7be3b2 664 mutex_lock(&ftrace_start_lock);
d61f82d0
SR
665 /* Force update next time */
666 saved_ftrace_func = NULL;
60a7ecf4
SR
667 /* ftrace_start_up is true if we want ftrace running */
668 if (ftrace_start_up)
d61f82d0
SR
669 command |= FTRACE_ENABLE_CALLS;
670
671 ftrace_run_update_code(command);
cb7be3b2 672 mutex_unlock(&ftrace_start_lock);
b0fc494f
SR
673}
674
e309b41d 675static void ftrace_shutdown_sysctl(void)
b0fc494f 676{
d61f82d0
SR
677 int command = FTRACE_DISABLE_MCOUNT;
678
4eebcc81
SR
679 if (unlikely(ftrace_disabled))
680 return;
681
cb7be3b2 682 mutex_lock(&ftrace_start_lock);
60a7ecf4
SR
683 /* ftrace_start_up is true if ftrace is running */
684 if (ftrace_start_up)
d61f82d0
SR
685 command |= FTRACE_DISABLE_CALLS;
686
687 ftrace_run_update_code(command);
cb7be3b2 688 mutex_unlock(&ftrace_start_lock);
b0fc494f
SR
689}
690
3d083395
SR
691static cycle_t ftrace_update_time;
692static unsigned long ftrace_update_cnt;
693unsigned long ftrace_update_tot_cnt;
694
31e88909 695static int ftrace_update_code(struct module *mod)
3d083395 696{
08f5ac90 697 struct dyn_ftrace *p, *t;
f22f9a89 698 cycle_t start, stop;
3d083395 699
750ed1a4 700 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
701 ftrace_update_cnt = 0;
702
08f5ac90 703 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
3d083395 704
08f5ac90
SR
705 /* If something went wrong, bail without enabling anything */
706 if (unlikely(ftrace_disabled))
707 return -1;
f22f9a89 708
08f5ac90 709 list_del_init(&p->list);
f22f9a89 710
08f5ac90 711 /* convert record (i.e, patch mcount-call with NOP) */
31e88909 712 if (ftrace_code_disable(mod, p)) {
08f5ac90
SR
713 p->flags |= FTRACE_FL_CONVERTED;
714 ftrace_update_cnt++;
715 } else
716 ftrace_free_rec(p);
3d083395
SR
717 }
718
750ed1a4 719 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
720 ftrace_update_time = stop - start;
721 ftrace_update_tot_cnt += ftrace_update_cnt;
722
16444a8a
ACM
723 return 0;
724}
725
68bf21aa 726static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
3c1720f0
SR
727{
728 struct ftrace_page *pg;
729 int cnt;
730 int i;
3c1720f0
SR
731
732 /* allocate a few pages */
733 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
734 if (!ftrace_pages_start)
735 return -1;
736
737 /*
738 * Allocate a few more pages.
739 *
740 * TODO: have some parser search vmlinux before
741 * final linking to find all calls to ftrace.
742 * Then we can:
743 * a) know how many pages to allocate.
744 * and/or
745 * b) set up the table then.
746 *
747 * The dynamic code is still necessary for
748 * modules.
749 */
750
751 pg = ftrace_pages = ftrace_pages_start;
752
68bf21aa 753 cnt = num_to_init / ENTRIES_PER_PAGE;
08f5ac90 754 pr_info("ftrace: allocating %ld entries in %d pages\n",
5821e1b7 755 num_to_init, cnt + 1);
3c1720f0
SR
756
757 for (i = 0; i < cnt; i++) {
758 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
759
760 /* If we fail, we'll try later anyway */
761 if (!pg->next)
762 break;
763
764 pg = pg->next;
765 }
766
767 return 0;
768}
769
5072c59f
SR
770enum {
771 FTRACE_ITER_FILTER = (1 << 0),
772 FTRACE_ITER_CONT = (1 << 1),
41c52c0d 773 FTRACE_ITER_NOTRACE = (1 << 2),
eb9a7bf0 774 FTRACE_ITER_FAILURES = (1 << 3),
5072c59f
SR
775};
776
777#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
778
779struct ftrace_iterator {
5072c59f 780 struct ftrace_page *pg;
431aa3fb 781 int idx;
5072c59f
SR
782 unsigned flags;
783 unsigned char buffer[FTRACE_BUFF_MAX+1];
784 unsigned buffer_idx;
785 unsigned filtered;
786};
787
e309b41d 788static void *
5072c59f
SR
789t_next(struct seq_file *m, void *v, loff_t *pos)
790{
791 struct ftrace_iterator *iter = m->private;
792 struct dyn_ftrace *rec = NULL;
793
794 (*pos)++;
795
99ecdc43
SR
796 /* should not be called from interrupt context */
797 spin_lock(&ftrace_lock);
5072c59f
SR
798 retry:
799 if (iter->idx >= iter->pg->index) {
800 if (iter->pg->next) {
801 iter->pg = iter->pg->next;
802 iter->idx = 0;
803 goto retry;
50cdaf08
LW
804 } else {
805 iter->idx = -1;
5072c59f
SR
806 }
807 } else {
808 rec = &iter->pg->records[iter->idx++];
a9fdda33
SR
809 if ((rec->flags & FTRACE_FL_FREE) ||
810
811 (!(iter->flags & FTRACE_ITER_FAILURES) &&
eb9a7bf0
AS
812 (rec->flags & FTRACE_FL_FAILED)) ||
813
814 ((iter->flags & FTRACE_ITER_FAILURES) &&
a9fdda33 815 !(rec->flags & FTRACE_FL_FAILED)) ||
eb9a7bf0 816
0183fb1c
SR
817 ((iter->flags & FTRACE_ITER_FILTER) &&
818 !(rec->flags & FTRACE_FL_FILTER)) ||
819
41c52c0d
SR
820 ((iter->flags & FTRACE_ITER_NOTRACE) &&
821 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
822 rec = NULL;
823 goto retry;
824 }
825 }
99ecdc43 826 spin_unlock(&ftrace_lock);
5072c59f 827
5072c59f
SR
828 return rec;
829}
830
831static void *t_start(struct seq_file *m, loff_t *pos)
832{
833 struct ftrace_iterator *iter = m->private;
834 void *p = NULL;
5072c59f 835
50cdaf08
LW
836 if (*pos > 0) {
837 if (iter->idx < 0)
838 return p;
839 (*pos)--;
840 iter->idx--;
841 }
5821e1b7 842
50cdaf08 843 p = t_next(m, p, pos);
5072c59f
SR
844
845 return p;
846}
847
848static void t_stop(struct seq_file *m, void *p)
849{
850}
851
852static int t_show(struct seq_file *m, void *v)
853{
854 struct dyn_ftrace *rec = v;
855 char str[KSYM_SYMBOL_LEN];
856
857 if (!rec)
858 return 0;
859
860 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
861
50cdaf08 862 seq_printf(m, "%s\n", str);
5072c59f
SR
863
864 return 0;
865}
866
867static struct seq_operations show_ftrace_seq_ops = {
868 .start = t_start,
869 .next = t_next,
870 .stop = t_stop,
871 .show = t_show,
872};
873
e309b41d 874static int
5072c59f
SR
875ftrace_avail_open(struct inode *inode, struct file *file)
876{
877 struct ftrace_iterator *iter;
878 int ret;
879
4eebcc81
SR
880 if (unlikely(ftrace_disabled))
881 return -ENODEV;
882
5072c59f
SR
883 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
884 if (!iter)
885 return -ENOMEM;
886
887 iter->pg = ftrace_pages_start;
5072c59f
SR
888
889 ret = seq_open(file, &show_ftrace_seq_ops);
890 if (!ret) {
891 struct seq_file *m = file->private_data;
4bf39a94 892
5072c59f 893 m->private = iter;
4bf39a94 894 } else {
5072c59f 895 kfree(iter);
4bf39a94 896 }
5072c59f
SR
897
898 return ret;
899}
900
901int ftrace_avail_release(struct inode *inode, struct file *file)
902{
903 struct seq_file *m = (struct seq_file *)file->private_data;
904 struct ftrace_iterator *iter = m->private;
905
906 seq_release(inode, file);
907 kfree(iter);
4bf39a94 908
5072c59f
SR
909 return 0;
910}
911
eb9a7bf0
AS
912static int
913ftrace_failures_open(struct inode *inode, struct file *file)
914{
915 int ret;
916 struct seq_file *m;
917 struct ftrace_iterator *iter;
918
919 ret = ftrace_avail_open(inode, file);
920 if (!ret) {
921 m = (struct seq_file *)file->private_data;
922 iter = (struct ftrace_iterator *)m->private;
923 iter->flags = FTRACE_ITER_FAILURES;
924 }
925
926 return ret;
927}
928
929
41c52c0d 930static void ftrace_filter_reset(int enable)
5072c59f
SR
931{
932 struct ftrace_page *pg;
933 struct dyn_ftrace *rec;
41c52c0d 934 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f
SR
935 unsigned i;
936
99ecdc43
SR
937 /* should not be called from interrupt context */
938 spin_lock(&ftrace_lock);
41c52c0d
SR
939 if (enable)
940 ftrace_filtered = 0;
5072c59f
SR
941 pg = ftrace_pages_start;
942 while (pg) {
943 for (i = 0; i < pg->index; i++) {
944 rec = &pg->records[i];
945 if (rec->flags & FTRACE_FL_FAILED)
946 continue;
41c52c0d 947 rec->flags &= ~type;
5072c59f
SR
948 }
949 pg = pg->next;
950 }
99ecdc43 951 spin_unlock(&ftrace_lock);
5072c59f
SR
952}
953
e309b41d 954static int
41c52c0d 955ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
956{
957 struct ftrace_iterator *iter;
958 int ret = 0;
959
4eebcc81
SR
960 if (unlikely(ftrace_disabled))
961 return -ENODEV;
962
5072c59f
SR
963 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
964 if (!iter)
965 return -ENOMEM;
966
41c52c0d 967 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
968 if ((file->f_mode & FMODE_WRITE) &&
969 !(file->f_flags & O_APPEND))
41c52c0d 970 ftrace_filter_reset(enable);
5072c59f
SR
971
972 if (file->f_mode & FMODE_READ) {
973 iter->pg = ftrace_pages_start;
41c52c0d
SR
974 iter->flags = enable ? FTRACE_ITER_FILTER :
975 FTRACE_ITER_NOTRACE;
5072c59f
SR
976
977 ret = seq_open(file, &show_ftrace_seq_ops);
978 if (!ret) {
979 struct seq_file *m = file->private_data;
980 m->private = iter;
981 } else
982 kfree(iter);
983 } else
984 file->private_data = iter;
41c52c0d 985 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
986
987 return ret;
988}
989
41c52c0d
SR
990static int
991ftrace_filter_open(struct inode *inode, struct file *file)
992{
993 return ftrace_regex_open(inode, file, 1);
994}
995
996static int
997ftrace_notrace_open(struct inode *inode, struct file *file)
998{
999 return ftrace_regex_open(inode, file, 0);
1000}
1001
e309b41d 1002static ssize_t
41c52c0d 1003ftrace_regex_read(struct file *file, char __user *ubuf,
5072c59f
SR
1004 size_t cnt, loff_t *ppos)
1005{
1006 if (file->f_mode & FMODE_READ)
1007 return seq_read(file, ubuf, cnt, ppos);
1008 else
1009 return -EPERM;
1010}
1011
e309b41d 1012static loff_t
41c52c0d 1013ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
1014{
1015 loff_t ret;
1016
1017 if (file->f_mode & FMODE_READ)
1018 ret = seq_lseek(file, offset, origin);
1019 else
1020 file->f_pos = ret = 1;
1021
1022 return ret;
1023}
1024
1025enum {
1026 MATCH_FULL,
1027 MATCH_FRONT_ONLY,
1028 MATCH_MIDDLE_ONLY,
1029 MATCH_END_ONLY,
1030};
1031
e309b41d 1032static void
41c52c0d 1033ftrace_match(unsigned char *buff, int len, int enable)
5072c59f
SR
1034{
1035 char str[KSYM_SYMBOL_LEN];
1036 char *search = NULL;
1037 struct ftrace_page *pg;
1038 struct dyn_ftrace *rec;
1039 int type = MATCH_FULL;
41c52c0d 1040 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f 1041 unsigned i, match = 0, search_len = 0;
ea3a6d6d
SR
1042 int not = 0;
1043
1044 if (buff[0] == '!') {
1045 not = 1;
1046 buff++;
1047 len--;
1048 }
5072c59f
SR
1049
1050 for (i = 0; i < len; i++) {
1051 if (buff[i] == '*') {
1052 if (!i) {
1053 search = buff + i + 1;
1054 type = MATCH_END_ONLY;
1055 search_len = len - (i + 1);
1056 } else {
1057 if (type == MATCH_END_ONLY) {
1058 type = MATCH_MIDDLE_ONLY;
1059 } else {
1060 match = i;
1061 type = MATCH_FRONT_ONLY;
1062 }
1063 buff[i] = 0;
1064 break;
1065 }
1066 }
1067 }
1068
99ecdc43
SR
1069 /* should not be called from interrupt context */
1070 spin_lock(&ftrace_lock);
41c52c0d
SR
1071 if (enable)
1072 ftrace_filtered = 1;
5072c59f
SR
1073 pg = ftrace_pages_start;
1074 while (pg) {
1075 for (i = 0; i < pg->index; i++) {
1076 int matched = 0;
1077 char *ptr;
1078
1079 rec = &pg->records[i];
1080 if (rec->flags & FTRACE_FL_FAILED)
1081 continue;
1082 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1083 switch (type) {
1084 case MATCH_FULL:
1085 if (strcmp(str, buff) == 0)
1086 matched = 1;
1087 break;
1088 case MATCH_FRONT_ONLY:
1089 if (memcmp(str, buff, match) == 0)
1090 matched = 1;
1091 break;
1092 case MATCH_MIDDLE_ONLY:
1093 if (strstr(str, search))
1094 matched = 1;
1095 break;
1096 case MATCH_END_ONLY:
1097 ptr = strstr(str, search);
1098 if (ptr && (ptr[search_len] == 0))
1099 matched = 1;
1100 break;
1101 }
ea3a6d6d
SR
1102 if (matched) {
1103 if (not)
1104 rec->flags &= ~flag;
1105 else
1106 rec->flags |= flag;
1107 }
5072c59f
SR
1108 }
1109 pg = pg->next;
1110 }
99ecdc43 1111 spin_unlock(&ftrace_lock);
5072c59f
SR
1112}
1113
e309b41d 1114static ssize_t
41c52c0d
SR
1115ftrace_regex_write(struct file *file, const char __user *ubuf,
1116 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
1117{
1118 struct ftrace_iterator *iter;
1119 char ch;
1120 size_t read = 0;
1121 ssize_t ret;
1122
1123 if (!cnt || cnt < 0)
1124 return 0;
1125
41c52c0d 1126 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1127
1128 if (file->f_mode & FMODE_READ) {
1129 struct seq_file *m = file->private_data;
1130 iter = m->private;
1131 } else
1132 iter = file->private_data;
1133
1134 if (!*ppos) {
1135 iter->flags &= ~FTRACE_ITER_CONT;
1136 iter->buffer_idx = 0;
1137 }
1138
1139 ret = get_user(ch, ubuf++);
1140 if (ret)
1141 goto out;
1142 read++;
1143 cnt--;
1144
1145 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1146 /* skip white space */
1147 while (cnt && isspace(ch)) {
1148 ret = get_user(ch, ubuf++);
1149 if (ret)
1150 goto out;
1151 read++;
1152 cnt--;
1153 }
1154
5072c59f
SR
1155 if (isspace(ch)) {
1156 file->f_pos += read;
1157 ret = read;
1158 goto out;
1159 }
1160
1161 iter->buffer_idx = 0;
1162 }
1163
1164 while (cnt && !isspace(ch)) {
1165 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1166 iter->buffer[iter->buffer_idx++] = ch;
1167 else {
1168 ret = -EINVAL;
1169 goto out;
1170 }
1171 ret = get_user(ch, ubuf++);
1172 if (ret)
1173 goto out;
1174 read++;
1175 cnt--;
1176 }
1177
1178 if (isspace(ch)) {
1179 iter->filtered++;
1180 iter->buffer[iter->buffer_idx] = 0;
41c52c0d 1181 ftrace_match(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1182 iter->buffer_idx = 0;
1183 } else
1184 iter->flags |= FTRACE_ITER_CONT;
1185
1186
1187 file->f_pos += read;
1188
1189 ret = read;
1190 out:
41c52c0d 1191 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1192
1193 return ret;
1194}
1195
41c52c0d
SR
1196static ssize_t
1197ftrace_filter_write(struct file *file, const char __user *ubuf,
1198 size_t cnt, loff_t *ppos)
1199{
1200 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1201}
1202
1203static ssize_t
1204ftrace_notrace_write(struct file *file, const char __user *ubuf,
1205 size_t cnt, loff_t *ppos)
1206{
1207 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1208}
1209
1210static void
1211ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1212{
1213 if (unlikely(ftrace_disabled))
1214 return;
1215
1216 mutex_lock(&ftrace_regex_lock);
1217 if (reset)
1218 ftrace_filter_reset(enable);
1219 if (buf)
1220 ftrace_match(buf, len, enable);
1221 mutex_unlock(&ftrace_regex_lock);
1222}
1223
77a2b37d
SR
1224/**
1225 * ftrace_set_filter - set a function to filter on in ftrace
1226 * @buf - the string that holds the function filter text.
1227 * @len - the length of the string.
1228 * @reset - non zero to reset all filters before applying this filter.
1229 *
1230 * Filters denote which functions should be enabled when tracing is enabled.
1231 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1232 */
e309b41d 1233void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 1234{
41c52c0d
SR
1235 ftrace_set_regex(buf, len, reset, 1);
1236}
4eebcc81 1237
41c52c0d
SR
1238/**
1239 * ftrace_set_notrace - set a function to not trace in ftrace
1240 * @buf - the string that holds the function notrace text.
1241 * @len - the length of the string.
1242 * @reset - non zero to reset all filters before applying this filter.
1243 *
1244 * Notrace Filters denote which functions should not be enabled when tracing
1245 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1246 * for tracing.
1247 */
1248void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1249{
1250 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
1251}
1252
e309b41d 1253static int
41c52c0d 1254ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1255{
1256 struct seq_file *m = (struct seq_file *)file->private_data;
1257 struct ftrace_iterator *iter;
1258
41c52c0d 1259 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1260 if (file->f_mode & FMODE_READ) {
1261 iter = m->private;
1262
1263 seq_release(inode, file);
1264 } else
1265 iter = file->private_data;
1266
1267 if (iter->buffer_idx) {
1268 iter->filtered++;
1269 iter->buffer[iter->buffer_idx] = 0;
41c52c0d 1270 ftrace_match(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1271 }
1272
1273 mutex_lock(&ftrace_sysctl_lock);
cb7be3b2 1274 mutex_lock(&ftrace_start_lock);
ee02a2e5 1275 if (ftrace_start_up && ftrace_enabled)
5072c59f 1276 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
cb7be3b2 1277 mutex_unlock(&ftrace_start_lock);
5072c59f
SR
1278 mutex_unlock(&ftrace_sysctl_lock);
1279
1280 kfree(iter);
41c52c0d 1281 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1282 return 0;
1283}
1284
41c52c0d
SR
1285static int
1286ftrace_filter_release(struct inode *inode, struct file *file)
1287{
1288 return ftrace_regex_release(inode, file, 1);
1289}
1290
1291static int
1292ftrace_notrace_release(struct inode *inode, struct file *file)
1293{
1294 return ftrace_regex_release(inode, file, 0);
1295}
1296
5072c59f
SR
1297static struct file_operations ftrace_avail_fops = {
1298 .open = ftrace_avail_open,
1299 .read = seq_read,
1300 .llseek = seq_lseek,
1301 .release = ftrace_avail_release,
1302};
1303
eb9a7bf0
AS
1304static struct file_operations ftrace_failures_fops = {
1305 .open = ftrace_failures_open,
1306 .read = seq_read,
1307 .llseek = seq_lseek,
1308 .release = ftrace_avail_release,
1309};
1310
5072c59f
SR
1311static struct file_operations ftrace_filter_fops = {
1312 .open = ftrace_filter_open,
41c52c0d 1313 .read = ftrace_regex_read,
5072c59f 1314 .write = ftrace_filter_write,
41c52c0d 1315 .llseek = ftrace_regex_lseek,
5072c59f
SR
1316 .release = ftrace_filter_release,
1317};
1318
41c52c0d
SR
1319static struct file_operations ftrace_notrace_fops = {
1320 .open = ftrace_notrace_open,
1321 .read = ftrace_regex_read,
1322 .write = ftrace_notrace_write,
1323 .llseek = ftrace_regex_lseek,
1324 .release = ftrace_notrace_release,
1325};
1326
ea4e2bc4
SR
1327#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1328
1329static DEFINE_MUTEX(graph_lock);
1330
1331int ftrace_graph_count;
1332unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1333
1334static void *
1335g_next(struct seq_file *m, void *v, loff_t *pos)
1336{
1337 unsigned long *array = m->private;
1338 int index = *pos;
1339
1340 (*pos)++;
1341
1342 if (index >= ftrace_graph_count)
1343 return NULL;
1344
1345 return &array[index];
1346}
1347
1348static void *g_start(struct seq_file *m, loff_t *pos)
1349{
1350 void *p = NULL;
1351
1352 mutex_lock(&graph_lock);
1353
1354 p = g_next(m, p, pos);
1355
1356 return p;
1357}
1358
1359static void g_stop(struct seq_file *m, void *p)
1360{
1361 mutex_unlock(&graph_lock);
1362}
1363
1364static int g_show(struct seq_file *m, void *v)
1365{
1366 unsigned long *ptr = v;
1367 char str[KSYM_SYMBOL_LEN];
1368
1369 if (!ptr)
1370 return 0;
1371
1372 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1373
1374 seq_printf(m, "%s\n", str);
1375
1376 return 0;
1377}
1378
1379static struct seq_operations ftrace_graph_seq_ops = {
1380 .start = g_start,
1381 .next = g_next,
1382 .stop = g_stop,
1383 .show = g_show,
1384};
1385
1386static int
1387ftrace_graph_open(struct inode *inode, struct file *file)
1388{
1389 int ret = 0;
1390
1391 if (unlikely(ftrace_disabled))
1392 return -ENODEV;
1393
1394 mutex_lock(&graph_lock);
1395 if ((file->f_mode & FMODE_WRITE) &&
1396 !(file->f_flags & O_APPEND)) {
1397 ftrace_graph_count = 0;
1398 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1399 }
1400
1401 if (file->f_mode & FMODE_READ) {
1402 ret = seq_open(file, &ftrace_graph_seq_ops);
1403 if (!ret) {
1404 struct seq_file *m = file->private_data;
1405 m->private = ftrace_graph_funcs;
1406 }
1407 } else
1408 file->private_data = ftrace_graph_funcs;
1409 mutex_unlock(&graph_lock);
1410
1411 return ret;
1412}
1413
1414static ssize_t
1415ftrace_graph_read(struct file *file, char __user *ubuf,
1416 size_t cnt, loff_t *ppos)
1417{
1418 if (file->f_mode & FMODE_READ)
1419 return seq_read(file, ubuf, cnt, ppos);
1420 else
1421 return -EPERM;
1422}
1423
1424static int
1425ftrace_set_func(unsigned long *array, int idx, char *buffer)
1426{
1427 char str[KSYM_SYMBOL_LEN];
1428 struct dyn_ftrace *rec;
1429 struct ftrace_page *pg;
1430 int found = 0;
faec2ec5 1431 int i, j;
ea4e2bc4
SR
1432
1433 if (ftrace_disabled)
1434 return -ENODEV;
1435
1436 /* should not be called from interrupt context */
1437 spin_lock(&ftrace_lock);
1438
1439 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1440 for (i = 0; i < pg->index; i++) {
1441 rec = &pg->records[i];
1442
1443 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1444 continue;
1445
1446 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1447 if (strcmp(str, buffer) == 0) {
1448 found = 1;
faec2ec5
LW
1449 for (j = 0; j < idx; j++)
1450 if (array[j] == rec->ip) {
1451 found = 0;
1452 break;
1453 }
1454 if (found)
1455 array[idx] = rec->ip;
ea4e2bc4
SR
1456 break;
1457 }
1458 }
1459 }
1460 spin_unlock(&ftrace_lock);
1461
1462 return found ? 0 : -EINVAL;
1463}
1464
1465static ssize_t
1466ftrace_graph_write(struct file *file, const char __user *ubuf,
1467 size_t cnt, loff_t *ppos)
1468{
1469 unsigned char buffer[FTRACE_BUFF_MAX+1];
1470 unsigned long *array;
1471 size_t read = 0;
1472 ssize_t ret;
1473 int index = 0;
1474 char ch;
1475
1476 if (!cnt || cnt < 0)
1477 return 0;
1478
1479 mutex_lock(&graph_lock);
1480
1481 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1482 ret = -EBUSY;
1483 goto out;
1484 }
1485
1486 if (file->f_mode & FMODE_READ) {
1487 struct seq_file *m = file->private_data;
1488 array = m->private;
1489 } else
1490 array = file->private_data;
1491
1492 ret = get_user(ch, ubuf++);
1493 if (ret)
1494 goto out;
1495 read++;
1496 cnt--;
1497
1498 /* skip white space */
1499 while (cnt && isspace(ch)) {
1500 ret = get_user(ch, ubuf++);
1501 if (ret)
1502 goto out;
1503 read++;
1504 cnt--;
1505 }
1506
1507 if (isspace(ch)) {
1508 *ppos += read;
1509 ret = read;
1510 goto out;
1511 }
1512
1513 while (cnt && !isspace(ch)) {
1514 if (index < FTRACE_BUFF_MAX)
1515 buffer[index++] = ch;
1516 else {
1517 ret = -EINVAL;
1518 goto out;
1519 }
1520 ret = get_user(ch, ubuf++);
1521 if (ret)
1522 goto out;
1523 read++;
1524 cnt--;
1525 }
1526 buffer[index] = 0;
1527
1528 /* we allow only one at a time */
1529 ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1530 if (ret)
1531 goto out;
1532
1533 ftrace_graph_count++;
1534
1535 file->f_pos += read;
1536
1537 ret = read;
1538 out:
1539 mutex_unlock(&graph_lock);
1540
1541 return ret;
1542}
1543
1544static const struct file_operations ftrace_graph_fops = {
1545 .open = ftrace_graph_open,
1546 .read = ftrace_graph_read,
1547 .write = ftrace_graph_write,
1548};
1549#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1550
df4fc315 1551static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
5072c59f 1552{
5072c59f
SR
1553 struct dentry *entry;
1554
5072c59f
SR
1555 entry = debugfs_create_file("available_filter_functions", 0444,
1556 d_tracer, NULL, &ftrace_avail_fops);
1557 if (!entry)
1558 pr_warning("Could not create debugfs "
1559 "'available_filter_functions' entry\n");
1560
eb9a7bf0
AS
1561 entry = debugfs_create_file("failures", 0444,
1562 d_tracer, NULL, &ftrace_failures_fops);
1563 if (!entry)
1564 pr_warning("Could not create debugfs 'failures' entry\n");
1565
5072c59f
SR
1566 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1567 NULL, &ftrace_filter_fops);
1568 if (!entry)
1569 pr_warning("Could not create debugfs "
1570 "'set_ftrace_filter' entry\n");
41c52c0d
SR
1571
1572 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1573 NULL, &ftrace_notrace_fops);
1574 if (!entry)
1575 pr_warning("Could not create debugfs "
1576 "'set_ftrace_notrace' entry\n");
ad90c0e3 1577
ea4e2bc4
SR
1578#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1579 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
1580 NULL,
1581 &ftrace_graph_fops);
1582 if (!entry)
1583 pr_warning("Could not create debugfs "
1584 "'set_graph_function' entry\n");
1585#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1586
5072c59f
SR
1587 return 0;
1588}
1589
31e88909
SR
1590static int ftrace_convert_nops(struct module *mod,
1591 unsigned long *start,
68bf21aa
SR
1592 unsigned long *end)
1593{
1594 unsigned long *p;
1595 unsigned long addr;
1596 unsigned long flags;
1597
08f5ac90 1598 mutex_lock(&ftrace_start_lock);
68bf21aa
SR
1599 p = start;
1600 while (p < end) {
1601 addr = ftrace_call_adjust(*p++);
20e5227e
SR
1602 /*
1603 * Some architecture linkers will pad between
1604 * the different mcount_loc sections of different
1605 * object files to satisfy alignments.
1606 * Skip any NULL pointers.
1607 */
1608 if (!addr)
1609 continue;
68bf21aa 1610 ftrace_record_ip(addr);
68bf21aa
SR
1611 }
1612
08f5ac90 1613 /* disable interrupts to prevent kstop machine */
68bf21aa 1614 local_irq_save(flags);
31e88909 1615 ftrace_update_code(mod);
68bf21aa 1616 local_irq_restore(flags);
08f5ac90 1617 mutex_unlock(&ftrace_start_lock);
68bf21aa
SR
1618
1619 return 0;
1620}
1621
31e88909
SR
1622void ftrace_init_module(struct module *mod,
1623 unsigned long *start, unsigned long *end)
90d595fe 1624{
00fd61ae 1625 if (ftrace_disabled || start == end)
fed1939c 1626 return;
31e88909 1627 ftrace_convert_nops(mod, start, end);
90d595fe
SR
1628}
1629
68bf21aa
SR
1630extern unsigned long __start_mcount_loc[];
1631extern unsigned long __stop_mcount_loc[];
1632
1633void __init ftrace_init(void)
1634{
1635 unsigned long count, addr, flags;
1636 int ret;
1637
1638 /* Keep the ftrace pointer to the stub */
1639 addr = (unsigned long)ftrace_stub;
1640
1641 local_irq_save(flags);
1642 ftrace_dyn_arch_init(&addr);
1643 local_irq_restore(flags);
1644
1645 /* ftrace_dyn_arch_init places the return code in addr */
1646 if (addr)
1647 goto failed;
1648
1649 count = __stop_mcount_loc - __start_mcount_loc;
1650
1651 ret = ftrace_dyn_table_alloc(count);
1652 if (ret)
1653 goto failed;
1654
1655 last_ftrace_enabled = ftrace_enabled = 1;
1656
31e88909
SR
1657 ret = ftrace_convert_nops(NULL,
1658 __start_mcount_loc,
68bf21aa
SR
1659 __stop_mcount_loc);
1660
1661 return;
1662 failed:
1663 ftrace_disabled = 1;
1664}
68bf21aa 1665
3d083395 1666#else
0b6e4d56
FW
1667
1668static int __init ftrace_nodyn_init(void)
1669{
1670 ftrace_enabled = 1;
1671 return 0;
1672}
1673device_initcall(ftrace_nodyn_init);
1674
df4fc315
SR
1675static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1676static inline void ftrace_startup_enable(int command) { }
5a45cfe1
SR
1677/* Keep as macros so we do not need to define the commands */
1678# define ftrace_startup(command) do { } while (0)
1679# define ftrace_shutdown(command) do { } while (0)
c7aafc54
IM
1680# define ftrace_startup_sysctl() do { } while (0)
1681# define ftrace_shutdown_sysctl() do { } while (0)
3d083395
SR
1682#endif /* CONFIG_DYNAMIC_FTRACE */
1683
df4fc315
SR
1684static ssize_t
1685ftrace_pid_read(struct file *file, char __user *ubuf,
1686 size_t cnt, loff_t *ppos)
1687{
1688 char buf[64];
1689 int r;
1690
e32d8956
SR
1691 if (ftrace_pid_trace == ftrace_swapper_pid)
1692 r = sprintf(buf, "swapper tasks\n");
1693 else if (ftrace_pid_trace)
978f3a45 1694 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
df4fc315
SR
1695 else
1696 r = sprintf(buf, "no pid\n");
1697
1698 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1699}
1700
e32d8956 1701static void clear_ftrace_swapper(void)
978f3a45
SR
1702{
1703 struct task_struct *p;
e32d8956 1704 int cpu;
978f3a45 1705
e32d8956
SR
1706 get_online_cpus();
1707 for_each_online_cpu(cpu) {
1708 p = idle_task(cpu);
978f3a45 1709 clear_tsk_trace_trace(p);
e32d8956
SR
1710 }
1711 put_online_cpus();
1712}
978f3a45 1713
e32d8956
SR
1714static void set_ftrace_swapper(void)
1715{
1716 struct task_struct *p;
1717 int cpu;
1718
1719 get_online_cpus();
1720 for_each_online_cpu(cpu) {
1721 p = idle_task(cpu);
1722 set_tsk_trace_trace(p);
1723 }
1724 put_online_cpus();
978f3a45
SR
1725}
1726
e32d8956
SR
1727static void clear_ftrace_pid(struct pid *pid)
1728{
1729 struct task_struct *p;
1730
1731 do_each_pid_task(pid, PIDTYPE_PID, p) {
1732 clear_tsk_trace_trace(p);
1733 } while_each_pid_task(pid, PIDTYPE_PID, p);
1734 put_pid(pid);
1735}
1736
1737static void set_ftrace_pid(struct pid *pid)
978f3a45
SR
1738{
1739 struct task_struct *p;
1740
1741 do_each_pid_task(pid, PIDTYPE_PID, p) {
1742 set_tsk_trace_trace(p);
1743 } while_each_pid_task(pid, PIDTYPE_PID, p);
1744}
1745
e32d8956
SR
1746static void clear_ftrace_pid_task(struct pid **pid)
1747{
1748 if (*pid == ftrace_swapper_pid)
1749 clear_ftrace_swapper();
1750 else
1751 clear_ftrace_pid(*pid);
1752
1753 *pid = NULL;
1754}
1755
1756static void set_ftrace_pid_task(struct pid *pid)
1757{
1758 if (pid == ftrace_swapper_pid)
1759 set_ftrace_swapper();
1760 else
1761 set_ftrace_pid(pid);
1762}
1763
df4fc315
SR
1764static ssize_t
1765ftrace_pid_write(struct file *filp, const char __user *ubuf,
1766 size_t cnt, loff_t *ppos)
1767{
978f3a45 1768 struct pid *pid;
df4fc315
SR
1769 char buf[64];
1770 long val;
1771 int ret;
1772
1773 if (cnt >= sizeof(buf))
1774 return -EINVAL;
1775
1776 if (copy_from_user(&buf, ubuf, cnt))
1777 return -EFAULT;
1778
1779 buf[cnt] = 0;
1780
1781 ret = strict_strtol(buf, 10, &val);
1782 if (ret < 0)
1783 return ret;
1784
1785 mutex_lock(&ftrace_start_lock);
978f3a45 1786 if (val < 0) {
df4fc315 1787 /* disable pid tracing */
978f3a45 1788 if (!ftrace_pid_trace)
df4fc315 1789 goto out;
978f3a45
SR
1790
1791 clear_ftrace_pid_task(&ftrace_pid_trace);
df4fc315
SR
1792
1793 } else {
e32d8956
SR
1794 /* swapper task is special */
1795 if (!val) {
1796 pid = ftrace_swapper_pid;
1797 if (pid == ftrace_pid_trace)
1798 goto out;
1799 } else {
1800 pid = find_get_pid(val);
df4fc315 1801
e32d8956
SR
1802 if (pid == ftrace_pid_trace) {
1803 put_pid(pid);
1804 goto out;
1805 }
0ef8cde5 1806 }
0ef8cde5 1807
978f3a45
SR
1808 if (ftrace_pid_trace)
1809 clear_ftrace_pid_task(&ftrace_pid_trace);
1810
1811 if (!pid)
1812 goto out;
1813
1814 ftrace_pid_trace = pid;
1815
1816 set_ftrace_pid_task(ftrace_pid_trace);
df4fc315
SR
1817 }
1818
1819 /* update the function call */
1820 ftrace_update_pid_func();
1821 ftrace_startup_enable(0);
1822
1823 out:
1824 mutex_unlock(&ftrace_start_lock);
1825
1826 return cnt;
1827}
1828
1829static struct file_operations ftrace_pid_fops = {
1830 .read = ftrace_pid_read,
1831 .write = ftrace_pid_write,
1832};
1833
1834static __init int ftrace_init_debugfs(void)
1835{
1836 struct dentry *d_tracer;
1837 struct dentry *entry;
1838
1839 d_tracer = tracing_init_dentry();
1840 if (!d_tracer)
1841 return 0;
1842
1843 ftrace_init_dyn_debugfs(d_tracer);
1844
1845 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1846 NULL, &ftrace_pid_fops);
1847 if (!entry)
1848 pr_warning("Could not create debugfs "
1849 "'set_ftrace_pid' entry\n");
1850 return 0;
1851}
1852
1853fs_initcall(ftrace_init_debugfs);
1854
a2bb6a3d 1855/**
81adbdc0 1856 * ftrace_kill - kill ftrace
a2bb6a3d
SR
1857 *
1858 * This function should be used by panic code. It stops ftrace
1859 * but in a not so nice way. If you need to simply kill ftrace
1860 * from a non-atomic section, use ftrace_kill.
1861 */
81adbdc0 1862void ftrace_kill(void)
a2bb6a3d
SR
1863{
1864 ftrace_disabled = 1;
1865 ftrace_enabled = 0;
a2bb6a3d
SR
1866 clear_ftrace_function();
1867}
1868
16444a8a 1869/**
3d083395
SR
1870 * register_ftrace_function - register a function for profiling
1871 * @ops - ops structure that holds the function for profiling.
16444a8a 1872 *
3d083395
SR
1873 * Register a function to be called by all functions in the
1874 * kernel.
1875 *
1876 * Note: @ops->func and all the functions it calls must be labeled
1877 * with "notrace", otherwise it will go into a
1878 * recursive loop.
16444a8a 1879 */
3d083395 1880int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 1881{
b0fc494f
SR
1882 int ret;
1883
4eebcc81
SR
1884 if (unlikely(ftrace_disabled))
1885 return -1;
1886
b0fc494f 1887 mutex_lock(&ftrace_sysctl_lock);
e7d3737e 1888
b0fc494f 1889 ret = __register_ftrace_function(ops);
5a45cfe1 1890 ftrace_startup(0);
b0fc494f 1891
e7d3737e 1892 mutex_unlock(&ftrace_sysctl_lock);
b0fc494f 1893 return ret;
3d083395
SR
1894}
1895
1896/**
32632920 1897 * unregister_ftrace_function - unregister a function for profiling.
3d083395
SR
1898 * @ops - ops structure that holds the function to unregister
1899 *
1900 * Unregister a function that was added to be called by ftrace profiling.
1901 */
1902int unregister_ftrace_function(struct ftrace_ops *ops)
1903{
1904 int ret;
1905
b0fc494f 1906 mutex_lock(&ftrace_sysctl_lock);
3d083395 1907 ret = __unregister_ftrace_function(ops);
5a45cfe1 1908 ftrace_shutdown(0);
b0fc494f
SR
1909 mutex_unlock(&ftrace_sysctl_lock);
1910
1911 return ret;
1912}
1913
e309b41d 1914int
b0fc494f 1915ftrace_enable_sysctl(struct ctl_table *table, int write,
5072c59f 1916 struct file *file, void __user *buffer, size_t *lenp,
b0fc494f
SR
1917 loff_t *ppos)
1918{
1919 int ret;
1920
4eebcc81
SR
1921 if (unlikely(ftrace_disabled))
1922 return -ENODEV;
1923
b0fc494f
SR
1924 mutex_lock(&ftrace_sysctl_lock);
1925
5072c59f 1926 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
b0fc494f
SR
1927
1928 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1929 goto out;
1930
1931 last_ftrace_enabled = ftrace_enabled;
1932
1933 if (ftrace_enabled) {
1934
1935 ftrace_startup_sysctl();
1936
1937 /* we are starting ftrace again */
1938 if (ftrace_list != &ftrace_list_end) {
1939 if (ftrace_list->next == &ftrace_list_end)
1940 ftrace_trace_function = ftrace_list->func;
1941 else
1942 ftrace_trace_function = ftrace_list_func;
1943 }
1944
1945 } else {
1946 /* stopping ftrace calls (just send to ftrace_stub) */
1947 ftrace_trace_function = ftrace_stub;
1948
1949 ftrace_shutdown_sysctl();
1950 }
1951
1952 out:
1953 mutex_unlock(&ftrace_sysctl_lock);
3d083395 1954 return ret;
16444a8a 1955}
f17845e5 1956
fb52607a 1957#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e7d3737e 1958
287b6e68 1959static atomic_t ftrace_graph_active;
e7d3737e 1960
e49dc19c
SR
1961int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
1962{
1963 return 0;
1964}
1965
287b6e68
FW
1966/* The callbacks that hook a function */
1967trace_func_graph_ret_t ftrace_graph_return =
1968 (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 1969trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
f201ae23
FW
1970
1971/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1972static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1973{
1974 int i;
1975 int ret = 0;
1976 unsigned long flags;
1977 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1978 struct task_struct *g, *t;
1979
1980 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1981 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1982 * sizeof(struct ftrace_ret_stack),
1983 GFP_KERNEL);
1984 if (!ret_stack_list[i]) {
1985 start = 0;
1986 end = i;
1987 ret = -ENOMEM;
1988 goto free;
1989 }
1990 }
1991
1992 read_lock_irqsave(&tasklist_lock, flags);
1993 do_each_thread(g, t) {
1994 if (start == end) {
1995 ret = -EAGAIN;
1996 goto unlock;
1997 }
1998
1999 if (t->ret_stack == NULL) {
f201ae23 2000 t->curr_ret_stack = -1;
48d68b20
FW
2001 /* Make sure IRQs see the -1 first: */
2002 barrier();
2003 t->ret_stack = ret_stack_list[start++];
380c4b14 2004 atomic_set(&t->tracing_graph_pause, 0);
f201ae23
FW
2005 atomic_set(&t->trace_overrun, 0);
2006 }
2007 } while_each_thread(g, t);
2008
2009unlock:
2010 read_unlock_irqrestore(&tasklist_lock, flags);
2011free:
2012 for (i = start; i < end; i++)
2013 kfree(ret_stack_list[i]);
2014 return ret;
2015}
2016
2017/* Allocate a return stack for each task */
fb52607a 2018static int start_graph_tracing(void)
f201ae23
FW
2019{
2020 struct ftrace_ret_stack **ret_stack_list;
2021 int ret;
2022
2023 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2024 sizeof(struct ftrace_ret_stack *),
2025 GFP_KERNEL);
2026
2027 if (!ret_stack_list)
2028 return -ENOMEM;
2029
2030 do {
2031 ret = alloc_retstack_tasklist(ret_stack_list);
2032 } while (ret == -EAGAIN);
2033
2034 kfree(ret_stack_list);
2035 return ret;
2036}
2037
287b6e68
FW
2038int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2039 trace_func_graph_ent_t entryfunc)
15e6cb36 2040{
e7d3737e
FW
2041 int ret = 0;
2042
2043 mutex_lock(&ftrace_sysctl_lock);
2044
287b6e68 2045 atomic_inc(&ftrace_graph_active);
fb52607a 2046 ret = start_graph_tracing();
f201ae23 2047 if (ret) {
287b6e68 2048 atomic_dec(&ftrace_graph_active);
f201ae23
FW
2049 goto out;
2050 }
e53a6319 2051
287b6e68
FW
2052 ftrace_graph_return = retfunc;
2053 ftrace_graph_entry = entryfunc;
e53a6319 2054
5a45cfe1 2055 ftrace_startup(FTRACE_START_FUNC_RET);
e7d3737e
FW
2056
2057out:
2058 mutex_unlock(&ftrace_sysctl_lock);
2059 return ret;
15e6cb36
FW
2060}
2061
fb52607a 2062void unregister_ftrace_graph(void)
15e6cb36 2063{
e7d3737e
FW
2064 mutex_lock(&ftrace_sysctl_lock);
2065
287b6e68
FW
2066 atomic_dec(&ftrace_graph_active);
2067 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 2068 ftrace_graph_entry = ftrace_graph_entry_stub;
5a45cfe1 2069 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
e7d3737e
FW
2070
2071 mutex_unlock(&ftrace_sysctl_lock);
15e6cb36 2072}
f201ae23
FW
2073
2074/* Allocate a return stack for newly created task */
fb52607a 2075void ftrace_graph_init_task(struct task_struct *t)
f201ae23 2076{
287b6e68 2077 if (atomic_read(&ftrace_graph_active)) {
f201ae23
FW
2078 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2079 * sizeof(struct ftrace_ret_stack),
2080 GFP_KERNEL);
2081 if (!t->ret_stack)
2082 return;
2083 t->curr_ret_stack = -1;
380c4b14 2084 atomic_set(&t->tracing_graph_pause, 0);
f201ae23
FW
2085 atomic_set(&t->trace_overrun, 0);
2086 } else
2087 t->ret_stack = NULL;
2088}
2089
fb52607a 2090void ftrace_graph_exit_task(struct task_struct *t)
f201ae23 2091{
eae849ca
FW
2092 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2093
f201ae23 2094 t->ret_stack = NULL;
eae849ca
FW
2095 /* NULL must become visible to IRQs before we free it: */
2096 barrier();
2097
2098 kfree(ret_stack);
f201ae23 2099}
14a866c5
SR
2100
2101void ftrace_graph_stop(void)
2102{
2103 ftrace_stop();
2104}
15e6cb36
FW
2105#endif
2106