ftrace: add debugfs entry 'failures'
[linux-2.6-block.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f
SR
19#include <linux/seq_file.h>
20#include <linux/debugfs.h>
3d083395 21#include <linux/hardirq.h>
2d8b820b 22#include <linux/kthread.h>
5072c59f 23#include <linux/uaccess.h>
2d8b820b 24#include <linux/ftrace.h>
b0fc494f 25#include <linux/sysctl.h>
5072c59f 26#include <linux/ctype.h>
2d8b820b 27#include <linux/hash.h>
3d083395
SR
28#include <linux/list.h>
29
30#include "trace.h"
16444a8a 31
4eebcc81
SR
32/* ftrace_enabled is a method to turn ftrace on or off */
33int ftrace_enabled __read_mostly;
d61f82d0 34static int last_ftrace_enabled;
b0fc494f 35
4eebcc81
SR
36/*
37 * ftrace_disabled is set when an anomaly is discovered.
38 * ftrace_disabled is much stronger than ftrace_enabled.
39 */
40static int ftrace_disabled __read_mostly;
41
3d083395 42static DEFINE_SPINLOCK(ftrace_lock);
b0fc494f
SR
43static DEFINE_MUTEX(ftrace_sysctl_lock);
44
16444a8a
ACM
45static struct ftrace_ops ftrace_list_end __read_mostly =
46{
47 .func = ftrace_stub,
48};
49
50static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
51ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
52
e309b41d 53void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
54{
55 struct ftrace_ops *op = ftrace_list;
56
57 /* in case someone actually ports this to alpha! */
58 read_barrier_depends();
59
60 while (op != &ftrace_list_end) {
61 /* silly alpha */
62 read_barrier_depends();
63 op->func(ip, parent_ip);
64 op = op->next;
65 };
66}
67
68/**
3d083395 69 * clear_ftrace_function - reset the ftrace function
16444a8a 70 *
3d083395
SR
71 * This NULLs the ftrace function and in essence stops
72 * tracing. There may be lag
16444a8a 73 */
3d083395 74void clear_ftrace_function(void)
16444a8a 75{
3d083395
SR
76 ftrace_trace_function = ftrace_stub;
77}
78
e309b41d 79static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395
SR
80{
81 /* Should never be called by interrupts */
82 spin_lock(&ftrace_lock);
16444a8a 83
16444a8a
ACM
84 ops->next = ftrace_list;
85 /*
86 * We are entering ops into the ftrace_list but another
87 * CPU might be walking that list. We need to make sure
88 * the ops->next pointer is valid before another CPU sees
89 * the ops pointer included into the ftrace_list.
90 */
91 smp_wmb();
92 ftrace_list = ops;
3d083395 93
b0fc494f
SR
94 if (ftrace_enabled) {
95 /*
96 * For one func, simply call it directly.
97 * For more than one func, call the chain.
98 */
99 if (ops->next == &ftrace_list_end)
100 ftrace_trace_function = ops->func;
101 else
102 ftrace_trace_function = ftrace_list_func;
103 }
3d083395
SR
104
105 spin_unlock(&ftrace_lock);
16444a8a
ACM
106
107 return 0;
108}
109
e309b41d 110static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 111{
16444a8a
ACM
112 struct ftrace_ops **p;
113 int ret = 0;
114
3d083395 115 spin_lock(&ftrace_lock);
16444a8a
ACM
116
117 /*
3d083395
SR
118 * If we are removing the last function, then simply point
119 * to the ftrace_stub.
16444a8a
ACM
120 */
121 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
122 ftrace_trace_function = ftrace_stub;
123 ftrace_list = &ftrace_list_end;
124 goto out;
125 }
126
127 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
128 if (*p == ops)
129 break;
130
131 if (*p != ops) {
132 ret = -1;
133 goto out;
134 }
135
136 *p = (*p)->next;
137
b0fc494f
SR
138 if (ftrace_enabled) {
139 /* If we only have one func left, then call that directly */
140 if (ftrace_list == &ftrace_list_end ||
141 ftrace_list->next == &ftrace_list_end)
142 ftrace_trace_function = ftrace_list->func;
143 }
16444a8a
ACM
144
145 out:
3d083395
SR
146 spin_unlock(&ftrace_lock);
147
148 return ret;
149}
150
151#ifdef CONFIG_DYNAMIC_FTRACE
152
e1c08bdd 153static struct task_struct *ftraced_task;
e1c08bdd 154
d61f82d0
SR
155enum {
156 FTRACE_ENABLE_CALLS = (1 << 0),
157 FTRACE_DISABLE_CALLS = (1 << 1),
158 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
159 FTRACE_ENABLE_MCOUNT = (1 << 3),
160 FTRACE_DISABLE_MCOUNT = (1 << 4),
161};
162
5072c59f
SR
163static int ftrace_filtered;
164
3d083395
SR
165static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
166
167static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
168
169static DEFINE_SPINLOCK(ftrace_shutdown_lock);
170static DEFINE_MUTEX(ftraced_lock);
41c52c0d 171static DEFINE_MUTEX(ftrace_regex_lock);
3d083395 172
3c1720f0
SR
173struct ftrace_page {
174 struct ftrace_page *next;
aa5e5cea 175 unsigned long index;
3c1720f0 176 struct dyn_ftrace records[];
aa5e5cea 177};
3c1720f0
SR
178
179#define ENTRIES_PER_PAGE \
180 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
181
182/* estimate from running different kernels */
183#define NR_TO_INIT 10000
184
185static struct ftrace_page *ftrace_pages_start;
186static struct ftrace_page *ftrace_pages;
187
3d083395
SR
188static int ftraced_trigger;
189static int ftraced_suspend;
ad90c0e3 190static int ftraced_stop;
3d083395
SR
191
192static int ftrace_record_suspend;
193
37ad5084
SR
194static struct dyn_ftrace *ftrace_free_records;
195
e309b41d 196static inline int
9ff9cdb2 197ftrace_ip_in_hash(unsigned long ip, unsigned long key)
3d083395
SR
198{
199 struct dyn_ftrace *p;
200 struct hlist_node *t;
201 int found = 0;
202
ffdaa358 203 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
3d083395
SR
204 if (p->ip == ip) {
205 found = 1;
206 break;
207 }
208 }
209
210 return found;
211}
212
e309b41d 213static inline void
3d083395
SR
214ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
215{
ffdaa358 216 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
3d083395
SR
217}
218
0eb96701
AS
219/* called from kstop_machine */
220static inline void ftrace_del_hash(struct dyn_ftrace *node)
221{
222 hlist_del(&node->node);
223}
224
e309b41d 225static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084
SR
226{
227 /* no locking, only called from kstop_machine */
228
229 rec->ip = (unsigned long)ftrace_free_records;
230 ftrace_free_records = rec;
231 rec->flags |= FTRACE_FL_FREE;
232}
233
e309b41d 234static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 235{
37ad5084
SR
236 struct dyn_ftrace *rec;
237
238 /* First check for freed records */
239 if (ftrace_free_records) {
240 rec = ftrace_free_records;
241
37ad5084
SR
242 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
243 WARN_ON_ONCE(1);
244 ftrace_free_records = NULL;
4eebcc81
SR
245 ftrace_disabled = 1;
246 ftrace_enabled = 0;
37ad5084
SR
247 return NULL;
248 }
249
250 ftrace_free_records = (void *)rec->ip;
251 memset(rec, 0, sizeof(*rec));
252 return rec;
253 }
254
3c1720f0
SR
255 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
256 if (!ftrace_pages->next)
257 return NULL;
258 ftrace_pages = ftrace_pages->next;
259 }
260
261 return &ftrace_pages->records[ftrace_pages->index++];
262}
263
e309b41d 264static void
d61f82d0 265ftrace_record_ip(unsigned long ip)
3d083395
SR
266{
267 struct dyn_ftrace *node;
268 unsigned long flags;
269 unsigned long key;
270 int resched;
271 int atomic;
2bb6f8d6 272 int cpu;
3d083395 273
4eebcc81 274 if (!ftrace_enabled || ftrace_disabled)
d61f82d0
SR
275 return;
276
3d083395
SR
277 resched = need_resched();
278 preempt_disable_notrace();
279
2bb6f8d6
SR
280 /*
281 * We simply need to protect against recursion.
282 * Use the the raw version of smp_processor_id and not
283 * __get_cpu_var which can call debug hooks that can
284 * cause a recursive crash here.
285 */
286 cpu = raw_smp_processor_id();
287 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
288 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
3d083395
SR
289 goto out;
290
291 if (unlikely(ftrace_record_suspend))
292 goto out;
293
294 key = hash_long(ip, FTRACE_HASHBITS);
295
296 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
297
298 if (ftrace_ip_in_hash(ip, key))
299 goto out;
300
301 atomic = irqs_disabled();
302
303 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
304
305 /* This ip may have hit the hash before the lock */
306 if (ftrace_ip_in_hash(ip, key))
307 goto out_unlock;
308
d61f82d0 309 node = ftrace_alloc_dyn_node(ip);
3d083395
SR
310 if (!node)
311 goto out_unlock;
312
313 node->ip = ip;
314
315 ftrace_add_hash(node, key);
316
317 ftraced_trigger = 1;
318
319 out_unlock:
320 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
321 out:
2bb6f8d6 322 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
3d083395
SR
323
324 /* prevent recursion with scheduler */
325 if (resched)
326 preempt_enable_no_resched_notrace();
327 else
328 preempt_enable_notrace();
329}
330
caf8cdeb
SR
331#define FTRACE_ADDR ((long)(ftrace_caller))
332#define MCOUNT_ADDR ((long)(mcount))
3c1720f0 333
0eb96701 334static int
5072c59f
SR
335__ftrace_replace_code(struct dyn_ftrace *rec,
336 unsigned char *old, unsigned char *new, int enable)
337{
41c52c0d 338 unsigned long ip, fl;
5072c59f
SR
339
340 ip = rec->ip;
341
342 if (ftrace_filtered && enable) {
5072c59f
SR
343 /*
344 * If filtering is on:
345 *
346 * If this record is set to be filtered and
347 * is enabled then do nothing.
348 *
349 * If this record is set to be filtered and
350 * it is not enabled, enable it.
351 *
352 * If this record is not set to be filtered
353 * and it is not enabled do nothing.
354 *
41c52c0d
SR
355 * If this record is set not to trace then
356 * do nothing.
357 *
5072c59f
SR
358 * If this record is not set to be filtered and
359 * it is enabled, disable it.
360 */
361 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
362
363 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
41c52c0d 364 (fl == 0) || (rec->flags & FTRACE_FL_NOTRACE))
0eb96701 365 return 0;
5072c59f
SR
366
367 /*
368 * If it is enabled disable it,
369 * otherwise enable it!
370 */
371 if (fl == FTRACE_FL_ENABLED) {
372 /* swap new and old */
373 new = old;
374 old = ftrace_call_replace(ip, FTRACE_ADDR);
375 rec->flags &= ~FTRACE_FL_ENABLED;
376 } else {
377 new = ftrace_call_replace(ip, FTRACE_ADDR);
378 rec->flags |= FTRACE_FL_ENABLED;
379 }
380 } else {
381
41c52c0d
SR
382 if (enable) {
383 /*
384 * If this record is set not to trace and is
385 * not enabled, do nothing.
386 */
387 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
388 if (fl == FTRACE_FL_NOTRACE)
0eb96701 389 return 0;
41c52c0d 390
5072c59f 391 new = ftrace_call_replace(ip, FTRACE_ADDR);
41c52c0d 392 } else
5072c59f
SR
393 old = ftrace_call_replace(ip, FTRACE_ADDR);
394
395 if (enable) {
396 if (rec->flags & FTRACE_FL_ENABLED)
0eb96701 397 return 0;
5072c59f
SR
398 rec->flags |= FTRACE_FL_ENABLED;
399 } else {
400 if (!(rec->flags & FTRACE_FL_ENABLED))
0eb96701 401 return 0;
5072c59f
SR
402 rec->flags &= ~FTRACE_FL_ENABLED;
403 }
404 }
405
0eb96701 406 return ftrace_modify_code(ip, old, new);
5072c59f
SR
407}
408
e309b41d 409static void ftrace_replace_code(int enable)
3c1720f0 410{
0eb96701 411 int i, failed;
3c1720f0
SR
412 unsigned char *new = NULL, *old = NULL;
413 struct dyn_ftrace *rec;
414 struct ftrace_page *pg;
3c1720f0 415
5072c59f 416 if (enable)
3c1720f0
SR
417 old = ftrace_nop_replace();
418 else
419 new = ftrace_nop_replace();
420
421 for (pg = ftrace_pages_start; pg; pg = pg->next) {
422 for (i = 0; i < pg->index; i++) {
423 rec = &pg->records[i];
424
425 /* don't modify code that has already faulted */
426 if (rec->flags & FTRACE_FL_FAILED)
427 continue;
428
0eb96701
AS
429 failed = __ftrace_replace_code(rec, old, new, enable);
430 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
431 rec->flags |= FTRACE_FL_FAILED;
432 if ((system_state == SYSTEM_BOOTING) ||
433 !kernel_text_address(rec->ip)) {
434 ftrace_del_hash(rec);
435 ftrace_free_rec(rec);
436 }
437 }
3c1720f0
SR
438 }
439 }
440}
441
e309b41d 442static void ftrace_shutdown_replenish(void)
3c1720f0
SR
443{
444 if (ftrace_pages->next)
445 return;
446
447 /* allocate another page */
448 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
449}
3d083395 450
492a7ea5 451static int
d61f82d0 452ftrace_code_disable(struct dyn_ftrace *rec)
3c1720f0
SR
453{
454 unsigned long ip;
455 unsigned char *nop, *call;
456 int failed;
457
458 ip = rec->ip;
459
460 nop = ftrace_nop_replace();
d61f82d0 461 call = ftrace_call_replace(ip, MCOUNT_ADDR);
3c1720f0
SR
462
463 failed = ftrace_modify_code(ip, call, nop);
37ad5084 464 if (failed) {
3c1720f0 465 rec->flags |= FTRACE_FL_FAILED;
492a7ea5 466 return 0;
37ad5084 467 }
492a7ea5 468 return 1;
3c1720f0
SR
469}
470
ad90c0e3
SR
471static int __ftrace_update_code(void *ignore);
472
e309b41d 473static int __ftrace_modify_code(void *data)
3d083395 474{
d61f82d0
SR
475 unsigned long addr;
476 int *command = data;
477
ad90c0e3
SR
478 if (*command & FTRACE_ENABLE_CALLS) {
479 /*
480 * Update any recorded ips now that we have the
481 * machine stopped
482 */
483 __ftrace_update_code(NULL);
d61f82d0 484 ftrace_replace_code(1);
ad90c0e3 485 } else if (*command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
486 ftrace_replace_code(0);
487
488 if (*command & FTRACE_UPDATE_TRACE_FUNC)
489 ftrace_update_ftrace_func(ftrace_trace_function);
490
491 if (*command & FTRACE_ENABLE_MCOUNT) {
492 addr = (unsigned long)ftrace_record_ip;
493 ftrace_mcount_set(&addr);
494 } else if (*command & FTRACE_DISABLE_MCOUNT) {
495 addr = (unsigned long)ftrace_stub;
496 ftrace_mcount_set(&addr);
497 }
498
499 return 0;
3d083395
SR
500}
501
e309b41d 502static void ftrace_run_update_code(int command)
3d083395 503{
d61f82d0 504 stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
3d083395
SR
505}
506
ad90c0e3
SR
507void ftrace_disable_daemon(void)
508{
509 /* Stop the daemon from calling kstop_machine */
510 mutex_lock(&ftraced_lock);
511 ftraced_stop = 1;
512 mutex_unlock(&ftraced_lock);
513
514 ftrace_force_update();
515}
516
517void ftrace_enable_daemon(void)
518{
519 mutex_lock(&ftraced_lock);
520 ftraced_stop = 0;
521 mutex_unlock(&ftraced_lock);
522
523 ftrace_force_update();
524}
525
d61f82d0
SR
526static ftrace_func_t saved_ftrace_func;
527
e309b41d 528static void ftrace_startup(void)
3d083395 529{
d61f82d0
SR
530 int command = 0;
531
4eebcc81
SR
532 if (unlikely(ftrace_disabled))
533 return;
534
3d083395
SR
535 mutex_lock(&ftraced_lock);
536 ftraced_suspend++;
d61f82d0
SR
537 if (ftraced_suspend == 1)
538 command |= FTRACE_ENABLE_CALLS;
539
540 if (saved_ftrace_func != ftrace_trace_function) {
541 saved_ftrace_func = ftrace_trace_function;
542 command |= FTRACE_UPDATE_TRACE_FUNC;
543 }
544
545 if (!command || !ftrace_enabled)
3d083395 546 goto out;
3d083395 547
d61f82d0 548 ftrace_run_update_code(command);
3d083395
SR
549 out:
550 mutex_unlock(&ftraced_lock);
551}
552
e309b41d 553static void ftrace_shutdown(void)
3d083395 554{
d61f82d0
SR
555 int command = 0;
556
4eebcc81
SR
557 if (unlikely(ftrace_disabled))
558 return;
559
3d083395
SR
560 mutex_lock(&ftraced_lock);
561 ftraced_suspend--;
d61f82d0
SR
562 if (!ftraced_suspend)
563 command |= FTRACE_DISABLE_CALLS;
3d083395 564
d61f82d0
SR
565 if (saved_ftrace_func != ftrace_trace_function) {
566 saved_ftrace_func = ftrace_trace_function;
567 command |= FTRACE_UPDATE_TRACE_FUNC;
568 }
3d083395 569
d61f82d0
SR
570 if (!command || !ftrace_enabled)
571 goto out;
572
573 ftrace_run_update_code(command);
3d083395
SR
574 out:
575 mutex_unlock(&ftraced_lock);
576}
577
e309b41d 578static void ftrace_startup_sysctl(void)
b0fc494f 579{
d61f82d0
SR
580 int command = FTRACE_ENABLE_MCOUNT;
581
4eebcc81
SR
582 if (unlikely(ftrace_disabled))
583 return;
584
b0fc494f 585 mutex_lock(&ftraced_lock);
d61f82d0
SR
586 /* Force update next time */
587 saved_ftrace_func = NULL;
b0fc494f
SR
588 /* ftraced_suspend is true if we want ftrace running */
589 if (ftraced_suspend)
d61f82d0
SR
590 command |= FTRACE_ENABLE_CALLS;
591
592 ftrace_run_update_code(command);
b0fc494f
SR
593 mutex_unlock(&ftraced_lock);
594}
595
e309b41d 596static void ftrace_shutdown_sysctl(void)
b0fc494f 597{
d61f82d0
SR
598 int command = FTRACE_DISABLE_MCOUNT;
599
4eebcc81
SR
600 if (unlikely(ftrace_disabled))
601 return;
602
b0fc494f
SR
603 mutex_lock(&ftraced_lock);
604 /* ftraced_suspend is true if ftrace is running */
605 if (ftraced_suspend)
d61f82d0
SR
606 command |= FTRACE_DISABLE_CALLS;
607
608 ftrace_run_update_code(command);
b0fc494f
SR
609 mutex_unlock(&ftraced_lock);
610}
611
3d083395
SR
612static cycle_t ftrace_update_time;
613static unsigned long ftrace_update_cnt;
614unsigned long ftrace_update_tot_cnt;
615
e309b41d 616static int __ftrace_update_code(void *ignore)
3d083395
SR
617{
618 struct dyn_ftrace *p;
0eb96701 619 struct hlist_node *t, *n;
d61f82d0 620 int save_ftrace_enabled;
3d083395
SR
621 cycle_t start, stop;
622 int i;
623
d61f82d0 624 /* Don't be recording funcs now */
ad90c0e3 625 ftrace_record_suspend++;
d61f82d0
SR
626 save_ftrace_enabled = ftrace_enabled;
627 ftrace_enabled = 0;
3d083395 628
750ed1a4 629 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
630 ftrace_update_cnt = 0;
631
632 /* No locks needed, the machine is stopped! */
633 for (i = 0; i < FTRACE_HASHSIZE; i++) {
0eb96701
AS
634 /* all CPUS are stopped, we are safe to modify code */
635 hlist_for_each_entry_safe(p, t, n, &ftrace_hash[i], node) {
636 /* Skip over failed records which have not been
637 * freed. */
638 if (p->flags & FTRACE_FL_FAILED)
639 continue;
3d083395 640
0eb96701
AS
641 /* Unconverted records are always at the head of the
642 * hash bucket. Once we encounter a converted record,
643 * simply skip over to the next bucket. Saves ftraced
644 * some processor cycles (ftrace does its bid for
645 * global warming :-p ). */
646 if (p->flags & (FTRACE_FL_CONVERTED))
647 break;
3d083395 648
0eb96701
AS
649 if (ftrace_code_disable(p)) {
650 p->flags |= FTRACE_FL_CONVERTED;
492a7ea5 651 ftrace_update_cnt++;
0eb96701
AS
652 } else {
653 if ((system_state == SYSTEM_BOOTING) ||
654 !kernel_text_address(p->ip)) {
655 ftrace_del_hash(p);
656 ftrace_free_rec(p);
657
658 }
659 }
3d083395 660 }
3d083395
SR
661 }
662
750ed1a4 663 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
664 ftrace_update_time = stop - start;
665 ftrace_update_tot_cnt += ftrace_update_cnt;
ad90c0e3 666 ftraced_trigger = 0;
3d083395 667
d61f82d0 668 ftrace_enabled = save_ftrace_enabled;
ad90c0e3 669 ftrace_record_suspend--;
16444a8a
ACM
670
671 return 0;
672}
673
ad90c0e3 674static int ftrace_update_code(void)
3d083395 675{
ad90c0e3
SR
676 if (unlikely(ftrace_disabled) ||
677 !ftrace_enabled || !ftraced_trigger)
678 return 0;
4eebcc81 679
3d083395 680 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
ad90c0e3
SR
681
682 return 1;
3d083395
SR
683}
684
e309b41d 685static int ftraced(void *ignore)
3d083395
SR
686{
687 unsigned long usecs;
688
3d083395
SR
689 while (!kthread_should_stop()) {
690
07a267cd
SR
691 set_current_state(TASK_INTERRUPTIBLE);
692
3d083395
SR
693 /* check once a second */
694 schedule_timeout(HZ);
695
4eebcc81
SR
696 if (unlikely(ftrace_disabled))
697 continue;
698
b0fc494f 699 mutex_lock(&ftrace_sysctl_lock);
3d083395 700 mutex_lock(&ftraced_lock);
ad90c0e3
SR
701 if (!ftraced_suspend && !ftraced_stop &&
702 ftrace_update_code()) {
3d083395
SR
703 usecs = nsecs_to_usecs(ftrace_update_time);
704 if (ftrace_update_tot_cnt > 100000) {
705 ftrace_update_tot_cnt = 0;
706 pr_info("hm, dftrace overflow: %lu change%s"
ad90c0e3 707 " (%lu total) in %lu usec%s\n",
3d083395
SR
708 ftrace_update_cnt,
709 ftrace_update_cnt != 1 ? "s" : "",
710 ftrace_update_tot_cnt,
711 usecs, usecs != 1 ? "s" : "");
4eebcc81 712 ftrace_disabled = 1;
3d083395
SR
713 WARN_ON_ONCE(1);
714 }
3d083395
SR
715 }
716 mutex_unlock(&ftraced_lock);
b0fc494f 717 mutex_unlock(&ftrace_sysctl_lock);
3d083395
SR
718
719 ftrace_shutdown_replenish();
3d083395
SR
720 }
721 __set_current_state(TASK_RUNNING);
722 return 0;
723}
724
3c1720f0
SR
725static int __init ftrace_dyn_table_alloc(void)
726{
727 struct ftrace_page *pg;
728 int cnt;
729 int i;
3c1720f0
SR
730
731 /* allocate a few pages */
732 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
733 if (!ftrace_pages_start)
734 return -1;
735
736 /*
737 * Allocate a few more pages.
738 *
739 * TODO: have some parser search vmlinux before
740 * final linking to find all calls to ftrace.
741 * Then we can:
742 * a) know how many pages to allocate.
743 * and/or
744 * b) set up the table then.
745 *
746 * The dynamic code is still necessary for
747 * modules.
748 */
749
750 pg = ftrace_pages = ftrace_pages_start;
751
752 cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
753
754 for (i = 0; i < cnt; i++) {
755 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
756
757 /* If we fail, we'll try later anyway */
758 if (!pg->next)
759 break;
760
761 pg = pg->next;
762 }
763
764 return 0;
765}
766
5072c59f
SR
767enum {
768 FTRACE_ITER_FILTER = (1 << 0),
769 FTRACE_ITER_CONT = (1 << 1),
41c52c0d 770 FTRACE_ITER_NOTRACE = (1 << 2),
eb9a7bf0 771 FTRACE_ITER_FAILURES = (1 << 3),
5072c59f
SR
772};
773
774#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
775
776struct ftrace_iterator {
777 loff_t pos;
778 struct ftrace_page *pg;
779 unsigned idx;
780 unsigned flags;
781 unsigned char buffer[FTRACE_BUFF_MAX+1];
782 unsigned buffer_idx;
783 unsigned filtered;
784};
785
e309b41d 786static void *
5072c59f
SR
787t_next(struct seq_file *m, void *v, loff_t *pos)
788{
789 struct ftrace_iterator *iter = m->private;
790 struct dyn_ftrace *rec = NULL;
791
792 (*pos)++;
793
794 retry:
795 if (iter->idx >= iter->pg->index) {
796 if (iter->pg->next) {
797 iter->pg = iter->pg->next;
798 iter->idx = 0;
799 goto retry;
800 }
801 } else {
802 rec = &iter->pg->records[iter->idx++];
eb9a7bf0
AS
803 if ((!(iter->flags & FTRACE_ITER_FAILURES) &&
804 (rec->flags & FTRACE_FL_FAILED)) ||
805
806 ((iter->flags & FTRACE_ITER_FAILURES) &&
807 (!(rec->flags & FTRACE_FL_FAILED) ||
808 (rec->flags & FTRACE_FL_FREE))) ||
809
5072c59f 810 ((iter->flags & FTRACE_ITER_FILTER) &&
41c52c0d 811 !(rec->flags & FTRACE_FL_FILTER)) ||
eb9a7bf0 812
41c52c0d
SR
813 ((iter->flags & FTRACE_ITER_NOTRACE) &&
814 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
815 rec = NULL;
816 goto retry;
817 }
818 }
819
820 iter->pos = *pos;
821
822 return rec;
823}
824
825static void *t_start(struct seq_file *m, loff_t *pos)
826{
827 struct ftrace_iterator *iter = m->private;
828 void *p = NULL;
829 loff_t l = -1;
830
831 if (*pos != iter->pos) {
832 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
833 ;
834 } else {
835 l = *pos;
836 p = t_next(m, p, &l);
837 }
838
839 return p;
840}
841
842static void t_stop(struct seq_file *m, void *p)
843{
844}
845
846static int t_show(struct seq_file *m, void *v)
847{
848 struct dyn_ftrace *rec = v;
849 char str[KSYM_SYMBOL_LEN];
850
851 if (!rec)
852 return 0;
853
854 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
855
856 seq_printf(m, "%s\n", str);
857
858 return 0;
859}
860
861static struct seq_operations show_ftrace_seq_ops = {
862 .start = t_start,
863 .next = t_next,
864 .stop = t_stop,
865 .show = t_show,
866};
867
e309b41d 868static int
5072c59f
SR
869ftrace_avail_open(struct inode *inode, struct file *file)
870{
871 struct ftrace_iterator *iter;
872 int ret;
873
4eebcc81
SR
874 if (unlikely(ftrace_disabled))
875 return -ENODEV;
876
5072c59f
SR
877 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
878 if (!iter)
879 return -ENOMEM;
880
881 iter->pg = ftrace_pages_start;
882 iter->pos = -1;
883
884 ret = seq_open(file, &show_ftrace_seq_ops);
885 if (!ret) {
886 struct seq_file *m = file->private_data;
4bf39a94 887
5072c59f 888 m->private = iter;
4bf39a94 889 } else {
5072c59f 890 kfree(iter);
4bf39a94 891 }
5072c59f
SR
892
893 return ret;
894}
895
896int ftrace_avail_release(struct inode *inode, struct file *file)
897{
898 struct seq_file *m = (struct seq_file *)file->private_data;
899 struct ftrace_iterator *iter = m->private;
900
901 seq_release(inode, file);
902 kfree(iter);
4bf39a94 903
5072c59f
SR
904 return 0;
905}
906
eb9a7bf0
AS
907static int
908ftrace_failures_open(struct inode *inode, struct file *file)
909{
910 int ret;
911 struct seq_file *m;
912 struct ftrace_iterator *iter;
913
914 ret = ftrace_avail_open(inode, file);
915 if (!ret) {
916 m = (struct seq_file *)file->private_data;
917 iter = (struct ftrace_iterator *)m->private;
918 iter->flags = FTRACE_ITER_FAILURES;
919 }
920
921 return ret;
922}
923
924
41c52c0d 925static void ftrace_filter_reset(int enable)
5072c59f
SR
926{
927 struct ftrace_page *pg;
928 struct dyn_ftrace *rec;
41c52c0d 929 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f
SR
930 unsigned i;
931
932 /* keep kstop machine from running */
933 preempt_disable();
41c52c0d
SR
934 if (enable)
935 ftrace_filtered = 0;
5072c59f
SR
936 pg = ftrace_pages_start;
937 while (pg) {
938 for (i = 0; i < pg->index; i++) {
939 rec = &pg->records[i];
940 if (rec->flags & FTRACE_FL_FAILED)
941 continue;
41c52c0d 942 rec->flags &= ~type;
5072c59f
SR
943 }
944 pg = pg->next;
945 }
946 preempt_enable();
947}
948
e309b41d 949static int
41c52c0d 950ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
951{
952 struct ftrace_iterator *iter;
953 int ret = 0;
954
4eebcc81
SR
955 if (unlikely(ftrace_disabled))
956 return -ENODEV;
957
5072c59f
SR
958 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
959 if (!iter)
960 return -ENOMEM;
961
41c52c0d 962 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
963 if ((file->f_mode & FMODE_WRITE) &&
964 !(file->f_flags & O_APPEND))
41c52c0d 965 ftrace_filter_reset(enable);
5072c59f
SR
966
967 if (file->f_mode & FMODE_READ) {
968 iter->pg = ftrace_pages_start;
969 iter->pos = -1;
41c52c0d
SR
970 iter->flags = enable ? FTRACE_ITER_FILTER :
971 FTRACE_ITER_NOTRACE;
5072c59f
SR
972
973 ret = seq_open(file, &show_ftrace_seq_ops);
974 if (!ret) {
975 struct seq_file *m = file->private_data;
976 m->private = iter;
977 } else
978 kfree(iter);
979 } else
980 file->private_data = iter;
41c52c0d 981 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
982
983 return ret;
984}
985
41c52c0d
SR
986static int
987ftrace_filter_open(struct inode *inode, struct file *file)
988{
989 return ftrace_regex_open(inode, file, 1);
990}
991
992static int
993ftrace_notrace_open(struct inode *inode, struct file *file)
994{
995 return ftrace_regex_open(inode, file, 0);
996}
997
e309b41d 998static ssize_t
41c52c0d 999ftrace_regex_read(struct file *file, char __user *ubuf,
5072c59f
SR
1000 size_t cnt, loff_t *ppos)
1001{
1002 if (file->f_mode & FMODE_READ)
1003 return seq_read(file, ubuf, cnt, ppos);
1004 else
1005 return -EPERM;
1006}
1007
e309b41d 1008static loff_t
41c52c0d 1009ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
1010{
1011 loff_t ret;
1012
1013 if (file->f_mode & FMODE_READ)
1014 ret = seq_lseek(file, offset, origin);
1015 else
1016 file->f_pos = ret = 1;
1017
1018 return ret;
1019}
1020
1021enum {
1022 MATCH_FULL,
1023 MATCH_FRONT_ONLY,
1024 MATCH_MIDDLE_ONLY,
1025 MATCH_END_ONLY,
1026};
1027
e309b41d 1028static void
41c52c0d 1029ftrace_match(unsigned char *buff, int len, int enable)
5072c59f
SR
1030{
1031 char str[KSYM_SYMBOL_LEN];
1032 char *search = NULL;
1033 struct ftrace_page *pg;
1034 struct dyn_ftrace *rec;
1035 int type = MATCH_FULL;
41c52c0d 1036 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f
SR
1037 unsigned i, match = 0, search_len = 0;
1038
1039 for (i = 0; i < len; i++) {
1040 if (buff[i] == '*') {
1041 if (!i) {
1042 search = buff + i + 1;
1043 type = MATCH_END_ONLY;
1044 search_len = len - (i + 1);
1045 } else {
1046 if (type == MATCH_END_ONLY) {
1047 type = MATCH_MIDDLE_ONLY;
1048 } else {
1049 match = i;
1050 type = MATCH_FRONT_ONLY;
1051 }
1052 buff[i] = 0;
1053 break;
1054 }
1055 }
1056 }
1057
1058 /* keep kstop machine from running */
1059 preempt_disable();
41c52c0d
SR
1060 if (enable)
1061 ftrace_filtered = 1;
5072c59f
SR
1062 pg = ftrace_pages_start;
1063 while (pg) {
1064 for (i = 0; i < pg->index; i++) {
1065 int matched = 0;
1066 char *ptr;
1067
1068 rec = &pg->records[i];
1069 if (rec->flags & FTRACE_FL_FAILED)
1070 continue;
1071 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1072 switch (type) {
1073 case MATCH_FULL:
1074 if (strcmp(str, buff) == 0)
1075 matched = 1;
1076 break;
1077 case MATCH_FRONT_ONLY:
1078 if (memcmp(str, buff, match) == 0)
1079 matched = 1;
1080 break;
1081 case MATCH_MIDDLE_ONLY:
1082 if (strstr(str, search))
1083 matched = 1;
1084 break;
1085 case MATCH_END_ONLY:
1086 ptr = strstr(str, search);
1087 if (ptr && (ptr[search_len] == 0))
1088 matched = 1;
1089 break;
1090 }
1091 if (matched)
41c52c0d 1092 rec->flags |= flag;
5072c59f
SR
1093 }
1094 pg = pg->next;
1095 }
1096 preempt_enable();
1097}
1098
e309b41d 1099static ssize_t
41c52c0d
SR
1100ftrace_regex_write(struct file *file, const char __user *ubuf,
1101 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
1102{
1103 struct ftrace_iterator *iter;
1104 char ch;
1105 size_t read = 0;
1106 ssize_t ret;
1107
1108 if (!cnt || cnt < 0)
1109 return 0;
1110
41c52c0d 1111 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1112
1113 if (file->f_mode & FMODE_READ) {
1114 struct seq_file *m = file->private_data;
1115 iter = m->private;
1116 } else
1117 iter = file->private_data;
1118
1119 if (!*ppos) {
1120 iter->flags &= ~FTRACE_ITER_CONT;
1121 iter->buffer_idx = 0;
1122 }
1123
1124 ret = get_user(ch, ubuf++);
1125 if (ret)
1126 goto out;
1127 read++;
1128 cnt--;
1129
1130 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1131 /* skip white space */
1132 while (cnt && isspace(ch)) {
1133 ret = get_user(ch, ubuf++);
1134 if (ret)
1135 goto out;
1136 read++;
1137 cnt--;
1138 }
1139
5072c59f
SR
1140 if (isspace(ch)) {
1141 file->f_pos += read;
1142 ret = read;
1143 goto out;
1144 }
1145
1146 iter->buffer_idx = 0;
1147 }
1148
1149 while (cnt && !isspace(ch)) {
1150 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1151 iter->buffer[iter->buffer_idx++] = ch;
1152 else {
1153 ret = -EINVAL;
1154 goto out;
1155 }
1156 ret = get_user(ch, ubuf++);
1157 if (ret)
1158 goto out;
1159 read++;
1160 cnt--;
1161 }
1162
1163 if (isspace(ch)) {
1164 iter->filtered++;
1165 iter->buffer[iter->buffer_idx] = 0;
41c52c0d 1166 ftrace_match(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1167 iter->buffer_idx = 0;
1168 } else
1169 iter->flags |= FTRACE_ITER_CONT;
1170
1171
1172 file->f_pos += read;
1173
1174 ret = read;
1175 out:
41c52c0d 1176 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1177
1178 return ret;
1179}
1180
41c52c0d
SR
1181static ssize_t
1182ftrace_filter_write(struct file *file, const char __user *ubuf,
1183 size_t cnt, loff_t *ppos)
1184{
1185 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1186}
1187
1188static ssize_t
1189ftrace_notrace_write(struct file *file, const char __user *ubuf,
1190 size_t cnt, loff_t *ppos)
1191{
1192 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1193}
1194
1195static void
1196ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1197{
1198 if (unlikely(ftrace_disabled))
1199 return;
1200
1201 mutex_lock(&ftrace_regex_lock);
1202 if (reset)
1203 ftrace_filter_reset(enable);
1204 if (buf)
1205 ftrace_match(buf, len, enable);
1206 mutex_unlock(&ftrace_regex_lock);
1207}
1208
77a2b37d
SR
1209/**
1210 * ftrace_set_filter - set a function to filter on in ftrace
1211 * @buf - the string that holds the function filter text.
1212 * @len - the length of the string.
1213 * @reset - non zero to reset all filters before applying this filter.
1214 *
1215 * Filters denote which functions should be enabled when tracing is enabled.
1216 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1217 */
e309b41d 1218void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 1219{
41c52c0d
SR
1220 ftrace_set_regex(buf, len, reset, 1);
1221}
4eebcc81 1222
41c52c0d
SR
1223/**
1224 * ftrace_set_notrace - set a function to not trace in ftrace
1225 * @buf - the string that holds the function notrace text.
1226 * @len - the length of the string.
1227 * @reset - non zero to reset all filters before applying this filter.
1228 *
1229 * Notrace Filters denote which functions should not be enabled when tracing
1230 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1231 * for tracing.
1232 */
1233void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1234{
1235 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
1236}
1237
e309b41d 1238static int
41c52c0d 1239ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1240{
1241 struct seq_file *m = (struct seq_file *)file->private_data;
1242 struct ftrace_iterator *iter;
1243
41c52c0d 1244 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1245 if (file->f_mode & FMODE_READ) {
1246 iter = m->private;
1247
1248 seq_release(inode, file);
1249 } else
1250 iter = file->private_data;
1251
1252 if (iter->buffer_idx) {
1253 iter->filtered++;
1254 iter->buffer[iter->buffer_idx] = 0;
41c52c0d 1255 ftrace_match(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1256 }
1257
1258 mutex_lock(&ftrace_sysctl_lock);
1259 mutex_lock(&ftraced_lock);
1260 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1261 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1262 mutex_unlock(&ftraced_lock);
1263 mutex_unlock(&ftrace_sysctl_lock);
1264
1265 kfree(iter);
41c52c0d 1266 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1267 return 0;
1268}
1269
41c52c0d
SR
1270static int
1271ftrace_filter_release(struct inode *inode, struct file *file)
1272{
1273 return ftrace_regex_release(inode, file, 1);
1274}
1275
1276static int
1277ftrace_notrace_release(struct inode *inode, struct file *file)
1278{
1279 return ftrace_regex_release(inode, file, 0);
1280}
1281
ad90c0e3
SR
1282static ssize_t
1283ftraced_read(struct file *filp, char __user *ubuf,
1284 size_t cnt, loff_t *ppos)
1285{
1286 /* don't worry about races */
1287 char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1288 int r = strlen(buf);
1289
1290 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1291}
1292
1293static ssize_t
1294ftraced_write(struct file *filp, const char __user *ubuf,
1295 size_t cnt, loff_t *ppos)
1296{
1297 char buf[64];
1298 long val;
1299 int ret;
1300
1301 if (cnt >= sizeof(buf))
1302 return -EINVAL;
1303
1304 if (copy_from_user(&buf, ubuf, cnt))
1305 return -EFAULT;
1306
1307 if (strncmp(buf, "enable", 6) == 0)
1308 val = 1;
1309 else if (strncmp(buf, "disable", 7) == 0)
1310 val = 0;
1311 else {
1312 buf[cnt] = 0;
1313
1314 ret = strict_strtoul(buf, 10, &val);
1315 if (ret < 0)
1316 return ret;
1317
1318 val = !!val;
1319 }
1320
1321 if (val)
1322 ftrace_enable_daemon();
1323 else
1324 ftrace_disable_daemon();
1325
1326 filp->f_pos += cnt;
1327
1328 return cnt;
1329}
1330
5072c59f
SR
1331static struct file_operations ftrace_avail_fops = {
1332 .open = ftrace_avail_open,
1333 .read = seq_read,
1334 .llseek = seq_lseek,
1335 .release = ftrace_avail_release,
1336};
1337
eb9a7bf0
AS
1338static struct file_operations ftrace_failures_fops = {
1339 .open = ftrace_failures_open,
1340 .read = seq_read,
1341 .llseek = seq_lseek,
1342 .release = ftrace_avail_release,
1343};
1344
5072c59f
SR
1345static struct file_operations ftrace_filter_fops = {
1346 .open = ftrace_filter_open,
41c52c0d 1347 .read = ftrace_regex_read,
5072c59f 1348 .write = ftrace_filter_write,
41c52c0d 1349 .llseek = ftrace_regex_lseek,
5072c59f
SR
1350 .release = ftrace_filter_release,
1351};
1352
41c52c0d
SR
1353static struct file_operations ftrace_notrace_fops = {
1354 .open = ftrace_notrace_open,
1355 .read = ftrace_regex_read,
1356 .write = ftrace_notrace_write,
1357 .llseek = ftrace_regex_lseek,
1358 .release = ftrace_notrace_release,
1359};
1360
ad90c0e3
SR
1361static struct file_operations ftraced_fops = {
1362 .open = tracing_open_generic,
1363 .read = ftraced_read,
1364 .write = ftraced_write,
1365};
1366
e1c08bdd
SR
1367/**
1368 * ftrace_force_update - force an update to all recording ftrace functions
e1c08bdd
SR
1369 */
1370int ftrace_force_update(void)
1371{
e1c08bdd
SR
1372 int ret = 0;
1373
4eebcc81 1374 if (unlikely(ftrace_disabled))
e1c08bdd
SR
1375 return -ENODEV;
1376
ad90c0e3 1377 mutex_lock(&ftrace_sysctl_lock);
e1c08bdd 1378 mutex_lock(&ftraced_lock);
e1c08bdd 1379
ad90c0e3
SR
1380 /*
1381 * If ftraced_trigger is not set, then there is nothing
1382 * to update.
1383 */
1384 if (ftraced_trigger && !ftrace_update_code())
1385 ret = -EBUSY;
e1c08bdd
SR
1386
1387 mutex_unlock(&ftraced_lock);
ad90c0e3 1388 mutex_unlock(&ftrace_sysctl_lock);
e1c08bdd
SR
1389
1390 return ret;
1391}
1392
4eebcc81
SR
1393static void ftrace_force_shutdown(void)
1394{
1395 struct task_struct *task;
1396 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1397
1398 mutex_lock(&ftraced_lock);
1399 task = ftraced_task;
1400 ftraced_task = NULL;
1401 ftraced_suspend = -1;
1402 ftrace_run_update_code(command);
1403 mutex_unlock(&ftraced_lock);
1404
1405 if (task)
1406 kthread_stop(task);
1407}
1408
5072c59f
SR
1409static __init int ftrace_init_debugfs(void)
1410{
1411 struct dentry *d_tracer;
1412 struct dentry *entry;
1413
1414 d_tracer = tracing_init_dentry();
1415
1416 entry = debugfs_create_file("available_filter_functions", 0444,
1417 d_tracer, NULL, &ftrace_avail_fops);
1418 if (!entry)
1419 pr_warning("Could not create debugfs "
1420 "'available_filter_functions' entry\n");
1421
eb9a7bf0
AS
1422 entry = debugfs_create_file("failures", 0444,
1423 d_tracer, NULL, &ftrace_failures_fops);
1424 if (!entry)
1425 pr_warning("Could not create debugfs 'failures' entry\n");
1426
5072c59f
SR
1427 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1428 NULL, &ftrace_filter_fops);
1429 if (!entry)
1430 pr_warning("Could not create debugfs "
1431 "'set_ftrace_filter' entry\n");
41c52c0d
SR
1432
1433 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1434 NULL, &ftrace_notrace_fops);
1435 if (!entry)
1436 pr_warning("Could not create debugfs "
1437 "'set_ftrace_notrace' entry\n");
ad90c0e3
SR
1438
1439 entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1440 NULL, &ftraced_fops);
1441 if (!entry)
1442 pr_warning("Could not create debugfs "
1443 "'ftraced_enabled' entry\n");
5072c59f
SR
1444 return 0;
1445}
1446
1447fs_initcall(ftrace_init_debugfs);
1448
e309b41d 1449static int __init ftrace_dynamic_init(void)
3d083395
SR
1450{
1451 struct task_struct *p;
d61f82d0 1452 unsigned long addr;
3d083395
SR
1453 int ret;
1454
d61f82d0 1455 addr = (unsigned long)ftrace_record_ip;
9ff9cdb2 1456
d61f82d0
SR
1457 stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1458
1459 /* ftrace_dyn_arch_init places the return code in addr */
4eebcc81
SR
1460 if (addr) {
1461 ret = (int)addr;
1462 goto failed;
1463 }
d61f82d0 1464
3c1720f0 1465 ret = ftrace_dyn_table_alloc();
3d083395 1466 if (ret)
4eebcc81 1467 goto failed;
3d083395
SR
1468
1469 p = kthread_run(ftraced, NULL, "ftraced");
4eebcc81
SR
1470 if (IS_ERR(p)) {
1471 ret = -1;
1472 goto failed;
1473 }
3d083395 1474
d61f82d0 1475 last_ftrace_enabled = ftrace_enabled = 1;
e1c08bdd 1476 ftraced_task = p;
3d083395
SR
1477
1478 return 0;
4eebcc81
SR
1479
1480 failed:
1481 ftrace_disabled = 1;
1482 return ret;
3d083395
SR
1483}
1484
d61f82d0 1485core_initcall(ftrace_dynamic_init);
3d083395 1486#else
c7aafc54
IM
1487# define ftrace_startup() do { } while (0)
1488# define ftrace_shutdown() do { } while (0)
1489# define ftrace_startup_sysctl() do { } while (0)
1490# define ftrace_shutdown_sysctl() do { } while (0)
4eebcc81 1491# define ftrace_force_shutdown() do { } while (0)
3d083395
SR
1492#endif /* CONFIG_DYNAMIC_FTRACE */
1493
4eebcc81
SR
1494/**
1495 * ftrace_kill - totally shutdown ftrace
1496 *
1497 * This is a safety measure. If something was detected that seems
1498 * wrong, calling this function will keep ftrace from doing
1499 * any more modifications, and updates.
1500 * used when something went wrong.
1501 */
1502void ftrace_kill(void)
1503{
1504 mutex_lock(&ftrace_sysctl_lock);
1505 ftrace_disabled = 1;
1506 ftrace_enabled = 0;
1507
1508 clear_ftrace_function();
1509 mutex_unlock(&ftrace_sysctl_lock);
1510
1511 /* Try to totally disable ftrace */
1512 ftrace_force_shutdown();
1513}
1514
16444a8a 1515/**
3d083395
SR
1516 * register_ftrace_function - register a function for profiling
1517 * @ops - ops structure that holds the function for profiling.
16444a8a 1518 *
3d083395
SR
1519 * Register a function to be called by all functions in the
1520 * kernel.
1521 *
1522 * Note: @ops->func and all the functions it calls must be labeled
1523 * with "notrace", otherwise it will go into a
1524 * recursive loop.
16444a8a 1525 */
3d083395 1526int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 1527{
b0fc494f
SR
1528 int ret;
1529
4eebcc81
SR
1530 if (unlikely(ftrace_disabled))
1531 return -1;
1532
b0fc494f 1533 mutex_lock(&ftrace_sysctl_lock);
b0fc494f 1534 ret = __register_ftrace_function(ops);
d61f82d0 1535 ftrace_startup();
b0fc494f
SR
1536 mutex_unlock(&ftrace_sysctl_lock);
1537
1538 return ret;
3d083395
SR
1539}
1540
1541/**
1542 * unregister_ftrace_function - unresgister a function for profiling.
1543 * @ops - ops structure that holds the function to unregister
1544 *
1545 * Unregister a function that was added to be called by ftrace profiling.
1546 */
1547int unregister_ftrace_function(struct ftrace_ops *ops)
1548{
1549 int ret;
1550
b0fc494f 1551 mutex_lock(&ftrace_sysctl_lock);
3d083395 1552 ret = __unregister_ftrace_function(ops);
d61f82d0 1553 ftrace_shutdown();
b0fc494f
SR
1554 mutex_unlock(&ftrace_sysctl_lock);
1555
1556 return ret;
1557}
1558
e309b41d 1559int
b0fc494f 1560ftrace_enable_sysctl(struct ctl_table *table, int write,
5072c59f 1561 struct file *file, void __user *buffer, size_t *lenp,
b0fc494f
SR
1562 loff_t *ppos)
1563{
1564 int ret;
1565
4eebcc81
SR
1566 if (unlikely(ftrace_disabled))
1567 return -ENODEV;
1568
b0fc494f
SR
1569 mutex_lock(&ftrace_sysctl_lock);
1570
5072c59f 1571 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
b0fc494f
SR
1572
1573 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1574 goto out;
1575
1576 last_ftrace_enabled = ftrace_enabled;
1577
1578 if (ftrace_enabled) {
1579
1580 ftrace_startup_sysctl();
1581
1582 /* we are starting ftrace again */
1583 if (ftrace_list != &ftrace_list_end) {
1584 if (ftrace_list->next == &ftrace_list_end)
1585 ftrace_trace_function = ftrace_list->func;
1586 else
1587 ftrace_trace_function = ftrace_list_func;
1588 }
1589
1590 } else {
1591 /* stopping ftrace calls (just send to ftrace_stub) */
1592 ftrace_trace_function = ftrace_stub;
1593
1594 ftrace_shutdown_sysctl();
1595 }
1596
1597 out:
1598 mutex_unlock(&ftrace_sysctl_lock);
3d083395 1599 return ret;
16444a8a 1600}