include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[linux-2.6-block.git] / kernel / trace / trace_selftest.c
CommitLineData
60a11774
SR
1/* Include in trace.c */
2
9cc26a26 3#include <linux/stringify.h>
60a11774 4#include <linux/kthread.h>
c7aafc54 5#include <linux/delay.h>
5a0e3ad6 6#include <linux/slab.h>
60a11774 7
e309b41d 8static inline int trace_valid_entry(struct trace_entry *entry)
60a11774
SR
9{
10 switch (entry->type) {
11 case TRACE_FN:
12 case TRACE_CTX:
57422797 13 case TRACE_WAKE:
06fa75ab 14 case TRACE_STACK:
dd0e545f 15 case TRACE_PRINT:
06fa75ab 16 case TRACE_SPECIAL:
80e5ea45 17 case TRACE_BRANCH:
7447dce9
FW
18 case TRACE_GRAPH_ENT:
19 case TRACE_GRAPH_RET:
321bb5e1 20 case TRACE_HW_BRANCHES:
0722db01 21 case TRACE_KSYM:
60a11774
SR
22 return 1;
23 }
24 return 0;
25}
26
3928a8a2 27static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
60a11774 28{
3928a8a2
SR
29 struct ring_buffer_event *event;
30 struct trace_entry *entry;
4b3e3d22 31 unsigned int loops = 0;
60a11774 32
3928a8a2
SR
33 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
34 entry = ring_buffer_event_data(event);
60a11774 35
4b3e3d22
SR
36 /*
37 * The ring buffer is a size of trace_buf_size, if
38 * we loop more than the size, there's something wrong
39 * with the ring buffer.
40 */
41 if (loops++ > trace_buf_size) {
42 printk(KERN_CONT ".. bad ring buffer ");
43 goto failed;
44 }
3928a8a2 45 if (!trace_valid_entry(entry)) {
c7aafc54 46 printk(KERN_CONT ".. invalid entry %d ",
3928a8a2 47 entry->type);
60a11774
SR
48 goto failed;
49 }
60a11774 50 }
60a11774
SR
51 return 0;
52
53 failed:
08bafa0e
SR
54 /* disable tracing */
55 tracing_disabled = 1;
60a11774
SR
56 printk(KERN_CONT ".. corrupted trace buffer .. ");
57 return -1;
58}
59
60/*
61 * Test the trace buffer to see if all the elements
62 * are still sane.
63 */
64static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
65{
30afdcb1
SR
66 unsigned long flags, cnt = 0;
67 int cpu, ret = 0;
60a11774 68
30afdcb1 69 /* Don't allow flipping of max traces now */
d51ad7ac 70 local_irq_save(flags);
0199c4e6 71 arch_spin_lock(&ftrace_max_lock);
60a11774 72
3928a8a2 73 cnt = ring_buffer_entries(tr->buffer);
60a11774 74
0c5119c1
SR
75 /*
76 * The trace_test_buffer_cpu runs a while loop to consume all data.
77 * If the calling tracer is broken, and is constantly filling
78 * the buffer, this will run forever, and hard lock the box.
79 * We disable the ring buffer while we do this test to prevent
80 * a hard lock up.
81 */
82 tracing_off();
3928a8a2
SR
83 for_each_possible_cpu(cpu) {
84 ret = trace_test_buffer_cpu(tr, cpu);
60a11774
SR
85 if (ret)
86 break;
87 }
0c5119c1 88 tracing_on();
0199c4e6 89 arch_spin_unlock(&ftrace_max_lock);
d51ad7ac 90 local_irq_restore(flags);
60a11774
SR
91
92 if (count)
93 *count = cnt;
94
95 return ret;
96}
97
1c80025a
FW
98static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
99{
100 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
101 trace->name, init_ret);
102}
606576ce 103#ifdef CONFIG_FUNCTION_TRACER
77a2b37d
SR
104
105#ifdef CONFIG_DYNAMIC_FTRACE
106
77a2b37d
SR
107/* Test dynamic code modification and ftrace filters */
108int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
109 struct trace_array *tr,
110 int (*func)(void))
111{
77a2b37d
SR
112 int save_ftrace_enabled = ftrace_enabled;
113 int save_tracer_enabled = tracer_enabled;
dd0e545f 114 unsigned long count;
4e491d14 115 char *func_name;
dd0e545f 116 int ret;
77a2b37d
SR
117
118 /* The ftrace test PASSED */
119 printk(KERN_CONT "PASSED\n");
120 pr_info("Testing dynamic ftrace: ");
121
122 /* enable tracing, and record the filter function */
123 ftrace_enabled = 1;
124 tracer_enabled = 1;
125
126 /* passed in by parameter to fool gcc from optimizing */
127 func();
128
4e491d14 129 /*
73d8b8bc 130 * Some archs *cough*PowerPC*cough* add characters to the
4e491d14 131 * start of the function names. We simply put a '*' to
73d8b8bc 132 * accommodate them.
4e491d14 133 */
9cc26a26 134 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
4e491d14 135
77a2b37d 136 /* filter only on our function */
4e491d14 137 ftrace_set_filter(func_name, strlen(func_name), 1);
77a2b37d
SR
138
139 /* enable tracing */
b6f11df2 140 ret = tracer_init(trace, tr);
1c80025a
FW
141 if (ret) {
142 warn_failed_init_tracer(trace, ret);
143 goto out;
144 }
dd0e545f 145
77a2b37d
SR
146 /* Sleep for a 1/10 of a second */
147 msleep(100);
148
149 /* we should have nothing in the buffer */
150 ret = trace_test_buffer(tr, &count);
151 if (ret)
152 goto out;
153
154 if (count) {
155 ret = -1;
156 printk(KERN_CONT ".. filter did not filter .. ");
157 goto out;
158 }
159
160 /* call our function again */
161 func();
162
163 /* sleep again */
164 msleep(100);
165
166 /* stop the tracing. */
bbf5b1a0 167 tracing_stop();
77a2b37d
SR
168 ftrace_enabled = 0;
169
170 /* check the trace buffer */
171 ret = trace_test_buffer(tr, &count);
172 trace->reset(tr);
bbf5b1a0 173 tracing_start();
77a2b37d
SR
174
175 /* we should only have one item */
176 if (!ret && count != 1) {
06fa75ab 177 printk(KERN_CONT ".. filter failed count=%ld ..", count);
77a2b37d
SR
178 ret = -1;
179 goto out;
180 }
bbf5b1a0 181
77a2b37d
SR
182 out:
183 ftrace_enabled = save_ftrace_enabled;
184 tracer_enabled = save_tracer_enabled;
185
186 /* Enable tracing on all functions again */
187 ftrace_set_filter(NULL, 0, 1);
188
189 return ret;
190}
191#else
192# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
193#endif /* CONFIG_DYNAMIC_FTRACE */
e9a22d1f 194
60a11774
SR
195/*
196 * Simple verification test of ftrace function tracer.
197 * Enable ftrace, sleep 1/10 second, and then read the trace
198 * buffer to see if all is in order.
199 */
200int
201trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
202{
77a2b37d
SR
203 int save_ftrace_enabled = ftrace_enabled;
204 int save_tracer_enabled = tracer_enabled;
dd0e545f
SR
205 unsigned long count;
206 int ret;
60a11774 207
77a2b37d
SR
208 /* make sure msleep has been recorded */
209 msleep(1);
210
60a11774 211 /* start the tracing */
c7aafc54 212 ftrace_enabled = 1;
77a2b37d 213 tracer_enabled = 1;
c7aafc54 214
b6f11df2 215 ret = tracer_init(trace, tr);
1c80025a
FW
216 if (ret) {
217 warn_failed_init_tracer(trace, ret);
218 goto out;
219 }
220
60a11774
SR
221 /* Sleep for a 1/10 of a second */
222 msleep(100);
223 /* stop the tracing. */
bbf5b1a0 224 tracing_stop();
c7aafc54
IM
225 ftrace_enabled = 0;
226
60a11774
SR
227 /* check the trace buffer */
228 ret = trace_test_buffer(tr, &count);
229 trace->reset(tr);
bbf5b1a0 230 tracing_start();
60a11774
SR
231
232 if (!ret && !count) {
233 printk(KERN_CONT ".. no entries found ..");
234 ret = -1;
77a2b37d 235 goto out;
60a11774
SR
236 }
237
77a2b37d
SR
238 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
239 DYN_FTRACE_TEST_NAME);
240
241 out:
242 ftrace_enabled = save_ftrace_enabled;
243 tracer_enabled = save_tracer_enabled;
244
4eebcc81
SR
245 /* kill ftrace totally if we failed */
246 if (ret)
247 ftrace_kill();
248
60a11774
SR
249 return ret;
250}
606576ce 251#endif /* CONFIG_FUNCTION_TRACER */
60a11774 252
7447dce9
FW
253
254#ifdef CONFIG_FUNCTION_GRAPH_TRACER
cf586b61
FW
255
256/* Maximum number of functions to trace before diagnosing a hang */
257#define GRAPH_MAX_FUNC_TEST 100000000
258
259static void __ftrace_dump(bool disable_tracing);
260static unsigned int graph_hang_thresh;
261
262/* Wrap the real function entry probe to avoid possible hanging */
263static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
264{
265 /* This is harmlessly racy, we want to approximately detect a hang */
266 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
267 ftrace_graph_stop();
268 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
269 if (ftrace_dump_on_oops)
270 __ftrace_dump(false);
271 return 0;
272 }
273
274 return trace_graph_entry(trace);
275}
276
7447dce9
FW
277/*
278 * Pretty much the same than for the function tracer from which the selftest
279 * has been borrowed.
280 */
281int
282trace_selftest_startup_function_graph(struct tracer *trace,
283 struct trace_array *tr)
284{
285 int ret;
286 unsigned long count;
287
cf586b61
FW
288 /*
289 * Simulate the init() callback but we attach a watchdog callback
290 * to detect and recover from possible hangs
291 */
292 tracing_reset_online_cpus(tr);
1a0799a8 293 set_graph_array(tr);
cf586b61
FW
294 ret = register_ftrace_graph(&trace_graph_return,
295 &trace_graph_entry_watchdog);
7447dce9
FW
296 if (ret) {
297 warn_failed_init_tracer(trace, ret);
298 goto out;
299 }
cf586b61 300 tracing_start_cmdline_record();
7447dce9
FW
301
302 /* Sleep for a 1/10 of a second */
303 msleep(100);
304
cf586b61
FW
305 /* Have we just recovered from a hang? */
306 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
0cf53ff6 307 tracing_selftest_disabled = true;
cf586b61
FW
308 ret = -1;
309 goto out;
310 }
311
7447dce9
FW
312 tracing_stop();
313
314 /* check the trace buffer */
315 ret = trace_test_buffer(tr, &count);
316
317 trace->reset(tr);
318 tracing_start();
319
320 if (!ret && !count) {
321 printk(KERN_CONT ".. no entries found ..");
322 ret = -1;
323 goto out;
324 }
325
326 /* Don't test dynamic tracing, the function tracer already did */
327
328out:
329 /* Stop it if we failed */
330 if (ret)
331 ftrace_graph_stop();
332
333 return ret;
334}
335#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
336
337
60a11774
SR
338#ifdef CONFIG_IRQSOFF_TRACER
339int
340trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
341{
342 unsigned long save_max = tracing_max_latency;
343 unsigned long count;
344 int ret;
345
346 /* start the tracing */
b6f11df2 347 ret = tracer_init(trace, tr);
1c80025a
FW
348 if (ret) {
349 warn_failed_init_tracer(trace, ret);
350 return ret;
351 }
352
60a11774
SR
353 /* reset the max latency */
354 tracing_max_latency = 0;
355 /* disable interrupts for a bit */
356 local_irq_disable();
357 udelay(100);
358 local_irq_enable();
49036200
FW
359
360 /*
361 * Stop the tracer to avoid a warning subsequent
362 * to buffer flipping failure because tracing_stop()
363 * disables the tr and max buffers, making flipping impossible
364 * in case of parallels max irqs off latencies.
365 */
366 trace->stop(tr);
60a11774 367 /* stop the tracing. */
bbf5b1a0 368 tracing_stop();
60a11774
SR
369 /* check both trace buffers */
370 ret = trace_test_buffer(tr, NULL);
371 if (!ret)
372 ret = trace_test_buffer(&max_tr, &count);
373 trace->reset(tr);
bbf5b1a0 374 tracing_start();
60a11774
SR
375
376 if (!ret && !count) {
377 printk(KERN_CONT ".. no entries found ..");
378 ret = -1;
379 }
380
381 tracing_max_latency = save_max;
382
383 return ret;
384}
385#endif /* CONFIG_IRQSOFF_TRACER */
386
387#ifdef CONFIG_PREEMPT_TRACER
388int
389trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
390{
391 unsigned long save_max = tracing_max_latency;
392 unsigned long count;
393 int ret;
394
769c48eb
SR
395 /*
396 * Now that the big kernel lock is no longer preemptable,
397 * and this is called with the BKL held, it will always
398 * fail. If preemption is already disabled, simply
399 * pass the test. When the BKL is removed, or becomes
400 * preemptible again, we will once again test this,
401 * so keep it in.
402 */
403 if (preempt_count()) {
404 printk(KERN_CONT "can not test ... force ");
405 return 0;
406 }
407
60a11774 408 /* start the tracing */
b6f11df2 409 ret = tracer_init(trace, tr);
1c80025a
FW
410 if (ret) {
411 warn_failed_init_tracer(trace, ret);
412 return ret;
413 }
414
60a11774
SR
415 /* reset the max latency */
416 tracing_max_latency = 0;
417 /* disable preemption for a bit */
418 preempt_disable();
419 udelay(100);
420 preempt_enable();
49036200
FW
421
422 /*
423 * Stop the tracer to avoid a warning subsequent
424 * to buffer flipping failure because tracing_stop()
425 * disables the tr and max buffers, making flipping impossible
426 * in case of parallels max preempt off latencies.
427 */
428 trace->stop(tr);
60a11774 429 /* stop the tracing. */
bbf5b1a0 430 tracing_stop();
60a11774
SR
431 /* check both trace buffers */
432 ret = trace_test_buffer(tr, NULL);
433 if (!ret)
434 ret = trace_test_buffer(&max_tr, &count);
435 trace->reset(tr);
bbf5b1a0 436 tracing_start();
60a11774
SR
437
438 if (!ret && !count) {
439 printk(KERN_CONT ".. no entries found ..");
440 ret = -1;
441 }
442
443 tracing_max_latency = save_max;
444
445 return ret;
446}
447#endif /* CONFIG_PREEMPT_TRACER */
448
449#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
450int
451trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
452{
453 unsigned long save_max = tracing_max_latency;
454 unsigned long count;
455 int ret;
456
769c48eb
SR
457 /*
458 * Now that the big kernel lock is no longer preemptable,
459 * and this is called with the BKL held, it will always
460 * fail. If preemption is already disabled, simply
461 * pass the test. When the BKL is removed, or becomes
462 * preemptible again, we will once again test this,
463 * so keep it in.
464 */
465 if (preempt_count()) {
466 printk(KERN_CONT "can not test ... force ");
467 return 0;
468 }
469
60a11774 470 /* start the tracing */
b6f11df2 471 ret = tracer_init(trace, tr);
1c80025a
FW
472 if (ret) {
473 warn_failed_init_tracer(trace, ret);
ac1d52d0 474 goto out_no_start;
1c80025a 475 }
60a11774
SR
476
477 /* reset the max latency */
478 tracing_max_latency = 0;
479
480 /* disable preemption and interrupts for a bit */
481 preempt_disable();
482 local_irq_disable();
483 udelay(100);
484 preempt_enable();
485 /* reverse the order of preempt vs irqs */
486 local_irq_enable();
487
49036200
FW
488 /*
489 * Stop the tracer to avoid a warning subsequent
490 * to buffer flipping failure because tracing_stop()
491 * disables the tr and max buffers, making flipping impossible
492 * in case of parallels max irqs/preempt off latencies.
493 */
494 trace->stop(tr);
60a11774 495 /* stop the tracing. */
bbf5b1a0 496 tracing_stop();
60a11774
SR
497 /* check both trace buffers */
498 ret = trace_test_buffer(tr, NULL);
ac1d52d0 499 if (ret)
60a11774
SR
500 goto out;
501
502 ret = trace_test_buffer(&max_tr, &count);
ac1d52d0 503 if (ret)
60a11774
SR
504 goto out;
505
506 if (!ret && !count) {
507 printk(KERN_CONT ".. no entries found ..");
508 ret = -1;
509 goto out;
510 }
511
512 /* do the test by disabling interrupts first this time */
513 tracing_max_latency = 0;
bbf5b1a0 514 tracing_start();
49036200
FW
515 trace->start(tr);
516
60a11774
SR
517 preempt_disable();
518 local_irq_disable();
519 udelay(100);
520 preempt_enable();
521 /* reverse the order of preempt vs irqs */
522 local_irq_enable();
523
49036200 524 trace->stop(tr);
60a11774 525 /* stop the tracing. */
bbf5b1a0 526 tracing_stop();
60a11774
SR
527 /* check both trace buffers */
528 ret = trace_test_buffer(tr, NULL);
529 if (ret)
530 goto out;
531
532 ret = trace_test_buffer(&max_tr, &count);
533
534 if (!ret && !count) {
535 printk(KERN_CONT ".. no entries found ..");
536 ret = -1;
537 goto out;
538 }
539
ac1d52d0 540out:
bbf5b1a0 541 tracing_start();
ac1d52d0
FW
542out_no_start:
543 trace->reset(tr);
60a11774
SR
544 tracing_max_latency = save_max;
545
546 return ret;
547}
548#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
549
fb1b6d8b
SN
550#ifdef CONFIG_NOP_TRACER
551int
552trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
553{
554 /* What could possibly go wrong? */
555 return 0;
556}
557#endif
558
60a11774
SR
559#ifdef CONFIG_SCHED_TRACER
560static int trace_wakeup_test_thread(void *data)
561{
60a11774 562 /* Make this a RT thread, doesn't need to be too high */
05bd68c5
SR
563 struct sched_param param = { .sched_priority = 5 };
564 struct completion *x = data;
60a11774 565
05bd68c5 566 sched_setscheduler(current, SCHED_FIFO, &param);
60a11774
SR
567
568 /* Make it know we have a new prio */
569 complete(x);
570
571 /* now go to sleep and let the test wake us up */
572 set_current_state(TASK_INTERRUPTIBLE);
573 schedule();
574
575 /* we are awake, now wait to disappear */
576 while (!kthread_should_stop()) {
577 /*
578 * This is an RT task, do short sleeps to let
579 * others run.
580 */
581 msleep(100);
582 }
583
584 return 0;
585}
586
587int
588trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
589{
590 unsigned long save_max = tracing_max_latency;
591 struct task_struct *p;
592 struct completion isrt;
593 unsigned long count;
594 int ret;
595
596 init_completion(&isrt);
597
598 /* create a high prio thread */
599 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
c7aafc54 600 if (IS_ERR(p)) {
60a11774
SR
601 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
602 return -1;
603 }
604
605 /* make sure the thread is running at an RT prio */
606 wait_for_completion(&isrt);
607
608 /* start the tracing */
b6f11df2 609 ret = tracer_init(trace, tr);
1c80025a
FW
610 if (ret) {
611 warn_failed_init_tracer(trace, ret);
612 return ret;
613 }
614
60a11774
SR
615 /* reset the max latency */
616 tracing_max_latency = 0;
617
618 /* sleep to let the RT thread sleep too */
619 msleep(100);
620
621 /*
622 * Yes this is slightly racy. It is possible that for some
623 * strange reason that the RT thread we created, did not
624 * call schedule for 100ms after doing the completion,
625 * and we do a wakeup on a task that already is awake.
626 * But that is extremely unlikely, and the worst thing that
627 * happens in such a case, is that we disable tracing.
628 * Honestly, if this race does happen something is horrible
629 * wrong with the system.
630 */
631
632 wake_up_process(p);
633
5aa60c60
SR
634 /* give a little time to let the thread wake up */
635 msleep(100);
636
60a11774 637 /* stop the tracing. */
bbf5b1a0 638 tracing_stop();
60a11774
SR
639 /* check both trace buffers */
640 ret = trace_test_buffer(tr, NULL);
641 if (!ret)
642 ret = trace_test_buffer(&max_tr, &count);
643
644
645 trace->reset(tr);
bbf5b1a0 646 tracing_start();
60a11774
SR
647
648 tracing_max_latency = save_max;
649
650 /* kill the thread */
651 kthread_stop(p);
652
653 if (!ret && !count) {
654 printk(KERN_CONT ".. no entries found ..");
655 ret = -1;
656 }
657
658 return ret;
659}
660#endif /* CONFIG_SCHED_TRACER */
661
662#ifdef CONFIG_CONTEXT_SWITCH_TRACER
663int
664trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
665{
666 unsigned long count;
667 int ret;
668
669 /* start the tracing */
b6f11df2 670 ret = tracer_init(trace, tr);
1c80025a
FW
671 if (ret) {
672 warn_failed_init_tracer(trace, ret);
673 return ret;
674 }
675
60a11774
SR
676 /* Sleep for a 1/10 of a second */
677 msleep(100);
678 /* stop the tracing. */
bbf5b1a0 679 tracing_stop();
60a11774
SR
680 /* check the trace buffer */
681 ret = trace_test_buffer(tr, &count);
682 trace->reset(tr);
bbf5b1a0 683 tracing_start();
60a11774
SR
684
685 if (!ret && !count) {
686 printk(KERN_CONT ".. no entries found ..");
687 ret = -1;
688 }
689
690 return ret;
691}
692#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
a6dd24f8
IM
693
694#ifdef CONFIG_SYSPROF_TRACER
695int
696trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
697{
698 unsigned long count;
699 int ret;
700
701 /* start the tracing */
b6f11df2 702 ret = tracer_init(trace, tr);
1c80025a
FW
703 if (ret) {
704 warn_failed_init_tracer(trace, ret);
d2ef7c2f 705 return ret;
1c80025a
FW
706 }
707
a6dd24f8
IM
708 /* Sleep for a 1/10 of a second */
709 msleep(100);
710 /* stop the tracing. */
bbf5b1a0 711 tracing_stop();
a6dd24f8
IM
712 /* check the trace buffer */
713 ret = trace_test_buffer(tr, &count);
714 trace->reset(tr);
bbf5b1a0 715 tracing_start();
a6dd24f8 716
d2ef7c2f
WH
717 if (!ret && !count) {
718 printk(KERN_CONT ".. no entries found ..");
719 ret = -1;
720 }
721
a6dd24f8
IM
722 return ret;
723}
724#endif /* CONFIG_SYSPROF_TRACER */
80e5ea45
SR
725
726#ifdef CONFIG_BRANCH_TRACER
727int
728trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
729{
730 unsigned long count;
731 int ret;
732
733 /* start the tracing */
b6f11df2 734 ret = tracer_init(trace, tr);
1c80025a
FW
735 if (ret) {
736 warn_failed_init_tracer(trace, ret);
737 return ret;
738 }
739
80e5ea45
SR
740 /* Sleep for a 1/10 of a second */
741 msleep(100);
742 /* stop the tracing. */
743 tracing_stop();
744 /* check the trace buffer */
745 ret = trace_test_buffer(tr, &count);
746 trace->reset(tr);
747 tracing_start();
748
d2ef7c2f
WH
749 if (!ret && !count) {
750 printk(KERN_CONT ".. no entries found ..");
751 ret = -1;
752 }
753
80e5ea45
SR
754 return ret;
755}
756#endif /* CONFIG_BRANCH_TRACER */
321bb5e1
MM
757
758#ifdef CONFIG_HW_BRANCH_TRACER
759int
760trace_selftest_startup_hw_branches(struct tracer *trace,
761 struct trace_array *tr)
762{
4d657e51 763 struct trace_iterator *iter;
321bb5e1 764 struct tracer tracer;
e9a22d1f
IM
765 unsigned long count;
766 int ret;
321bb5e1
MM
767
768 if (!trace->open) {
769 printk(KERN_CONT "missing open function...");
770 return -1;
771 }
772
773 ret = tracer_init(trace, tr);
774 if (ret) {
775 warn_failed_init_tracer(trace, ret);
776 return ret;
777 }
778
779 /*
780 * The hw-branch tracer needs to collect the trace from the various
781 * cpu trace buffers - before tracing is stopped.
782 */
4d657e51
MM
783 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
784 if (!iter)
785 return -ENOMEM;
786
321bb5e1
MM
787 memcpy(&tracer, trace, sizeof(tracer));
788
4d657e51
MM
789 iter->trace = &tracer;
790 iter->tr = tr;
791 iter->pos = -1;
792 mutex_init(&iter->mutex);
321bb5e1 793
4d657e51 794 trace->open(iter);
321bb5e1 795
4d657e51
MM
796 mutex_destroy(&iter->mutex);
797 kfree(iter);
321bb5e1
MM
798
799 tracing_stop();
800
801 ret = trace_test_buffer(tr, &count);
802 trace->reset(tr);
803 tracing_start();
804
805 if (!ret && !count) {
806 printk(KERN_CONT "no entries found..");
807 ret = -1;
808 }
809
810 return ret;
811}
812#endif /* CONFIG_HW_BRANCH_TRACER */
0722db01
P
813
814#ifdef CONFIG_KSYM_TRACER
815static int ksym_selftest_dummy;
816
817int
818trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr)
819{
820 unsigned long count;
821 int ret;
822
823 /* start the tracing */
824 ret = tracer_init(trace, tr);
825 if (ret) {
826 warn_failed_init_tracer(trace, ret);
827 return ret;
828 }
829
830 ksym_selftest_dummy = 0;
831 /* Register the read-write tracing request */
30ff21e3
LZ
832
833 ret = process_new_ksym_entry("ksym_selftest_dummy",
24f1e32c 834 HW_BREAKPOINT_R | HW_BREAKPOINT_W,
0722db01
P
835 (unsigned long)(&ksym_selftest_dummy));
836
837 if (ret < 0) {
838 printk(KERN_CONT "ksym_trace read-write startup test failed\n");
839 goto ret_path;
840 }
841 /* Perform a read and a write operation over the dummy variable to
842 * trigger the tracer
843 */
844 if (ksym_selftest_dummy == 0)
845 ksym_selftest_dummy++;
846
847 /* stop the tracing. */
848 tracing_stop();
849 /* check the trace buffer */
850 ret = trace_test_buffer(tr, &count);
851 trace->reset(tr);
852 tracing_start();
853
854 /* read & write operations - one each is performed on the dummy variable
855 * triggering two entries in the trace buffer
856 */
857 if (!ret && count != 2) {
858 printk(KERN_CONT "Ksym tracer startup test failed");
859 ret = -1;
860 }
861
862ret_path:
863 return ret;
864}
865#endif /* CONFIG_KSYM_TRACER */
866