1 /* Include in trace.c */
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
6 static inline int trace_valid_entry(struct trace_entry *entry)
23 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
25 struct ring_buffer_event *event;
26 struct trace_entry *entry;
27 unsigned int loops = 0;
29 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
30 entry = ring_buffer_event_data(event);
33 * The ring buffer is a size of trace_buf_size, if
34 * we loop more than the size, there's something wrong
35 * with the ring buffer.
37 if (loops++ > trace_buf_size) {
38 printk(KERN_CONT ".. bad ring buffer ");
41 if (!trace_valid_entry(entry)) {
42 printk(KERN_CONT ".. invalid entry %d ",
52 printk(KERN_CONT ".. corrupted trace buffer .. ");
57 * Test the trace buffer to see if all the elements
60 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
62 unsigned long flags, cnt = 0;
65 /* Don't allow flipping of max traces now */
66 local_irq_save(flags);
67 __raw_spin_lock(&ftrace_max_lock);
69 cnt = ring_buffer_entries(tr->buffer);
72 * The trace_test_buffer_cpu runs a while loop to consume all data.
73 * If the calling tracer is broken, and is constantly filling
74 * the buffer, this will run forever, and hard lock the box.
75 * We disable the ring buffer while we do this test to prevent
79 for_each_possible_cpu(cpu) {
80 ret = trace_test_buffer_cpu(tr, cpu);
85 __raw_spin_unlock(&ftrace_max_lock);
86 local_irq_restore(flags);
94 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
96 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
97 trace->name, init_ret);
99 #ifdef CONFIG_FUNCTION_TRACER
101 #ifdef CONFIG_DYNAMIC_FTRACE
104 #define STR(x) __STR(x)
106 /* Test dynamic code modification and ftrace filters */
107 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
108 struct trace_array *tr,
111 int save_ftrace_enabled = ftrace_enabled;
112 int save_tracer_enabled = tracer_enabled;
117 /* The ftrace test PASSED */
118 printk(KERN_CONT "PASSED\n");
119 pr_info("Testing dynamic ftrace: ");
121 /* enable tracing, and record the filter function */
125 /* passed in by parameter to fool gcc from optimizing */
129 * Some archs *cough*PowerPC*cough* add characters to the
130 * start of the function names. We simply put a '*' to
133 func_name = "*" STR(DYN_FTRACE_TEST_NAME);
135 /* filter only on our function */
136 ftrace_set_filter(func_name, strlen(func_name), 1);
139 ret = tracer_init(trace, tr);
141 warn_failed_init_tracer(trace, ret);
145 /* Sleep for a 1/10 of a second */
148 /* we should have nothing in the buffer */
149 ret = trace_test_buffer(tr, &count);
155 printk(KERN_CONT ".. filter did not filter .. ");
159 /* call our function again */
165 /* stop the tracing. */
169 /* check the trace buffer */
170 ret = trace_test_buffer(tr, &count);
174 /* we should only have one item */
175 if (!ret && count != 1) {
176 printk(KERN_CONT ".. filter failed count=%ld ..", count);
182 ftrace_enabled = save_ftrace_enabled;
183 tracer_enabled = save_tracer_enabled;
185 /* Enable tracing on all functions again */
186 ftrace_set_filter(NULL, 0, 1);
191 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
192 #endif /* CONFIG_DYNAMIC_FTRACE */
194 * Simple verification test of ftrace function tracer.
195 * Enable ftrace, sleep 1/10 second, and then read the trace
196 * buffer to see if all is in order.
199 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
201 int save_ftrace_enabled = ftrace_enabled;
202 int save_tracer_enabled = tracer_enabled;
206 /* make sure msleep has been recorded */
209 /* start the tracing */
213 ret = tracer_init(trace, tr);
215 warn_failed_init_tracer(trace, ret);
219 /* Sleep for a 1/10 of a second */
221 /* stop the tracing. */
225 /* check the trace buffer */
226 ret = trace_test_buffer(tr, &count);
230 if (!ret && !count) {
231 printk(KERN_CONT ".. no entries found ..");
236 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
237 DYN_FTRACE_TEST_NAME);
240 ftrace_enabled = save_ftrace_enabled;
241 tracer_enabled = save_tracer_enabled;
243 /* kill ftrace totally if we failed */
249 #endif /* CONFIG_FUNCTION_TRACER */
252 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
254 * Pretty much the same than for the function tracer from which the selftest
258 trace_selftest_startup_function_graph(struct tracer *trace,
259 struct trace_array *tr)
264 ret = tracer_init(trace, tr);
266 warn_failed_init_tracer(trace, ret);
270 /* Sleep for a 1/10 of a second */
275 /* check the trace buffer */
276 ret = trace_test_buffer(tr, &count);
281 if (!ret && !count) {
282 printk(KERN_CONT ".. no entries found ..");
287 /* Don't test dynamic tracing, the function tracer already did */
290 /* Stop it if we failed */
296 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
299 #ifdef CONFIG_IRQSOFF_TRACER
301 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
303 unsigned long save_max = tracing_max_latency;
307 /* start the tracing */
308 ret = tracer_init(trace, tr);
310 warn_failed_init_tracer(trace, ret);
314 /* reset the max latency */
315 tracing_max_latency = 0;
316 /* disable interrupts for a bit */
320 /* stop the tracing. */
322 /* check both trace buffers */
323 ret = trace_test_buffer(tr, NULL);
325 ret = trace_test_buffer(&max_tr, &count);
329 if (!ret && !count) {
330 printk(KERN_CONT ".. no entries found ..");
334 tracing_max_latency = save_max;
338 #endif /* CONFIG_IRQSOFF_TRACER */
340 #ifdef CONFIG_PREEMPT_TRACER
342 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
344 unsigned long save_max = tracing_max_latency;
349 * Now that the big kernel lock is no longer preemptable,
350 * and this is called with the BKL held, it will always
351 * fail. If preemption is already disabled, simply
352 * pass the test. When the BKL is removed, or becomes
353 * preemptible again, we will once again test this,
356 if (preempt_count()) {
357 printk(KERN_CONT "can not test ... force ");
361 /* start the tracing */
362 ret = tracer_init(trace, tr);
364 warn_failed_init_tracer(trace, ret);
368 /* reset the max latency */
369 tracing_max_latency = 0;
370 /* disable preemption for a bit */
374 /* stop the tracing. */
376 /* check both trace buffers */
377 ret = trace_test_buffer(tr, NULL);
379 ret = trace_test_buffer(&max_tr, &count);
383 if (!ret && !count) {
384 printk(KERN_CONT ".. no entries found ..");
388 tracing_max_latency = save_max;
392 #endif /* CONFIG_PREEMPT_TRACER */
394 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
396 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
398 unsigned long save_max = tracing_max_latency;
403 * Now that the big kernel lock is no longer preemptable,
404 * and this is called with the BKL held, it will always
405 * fail. If preemption is already disabled, simply
406 * pass the test. When the BKL is removed, or becomes
407 * preemptible again, we will once again test this,
410 if (preempt_count()) {
411 printk(KERN_CONT "can not test ... force ");
415 /* start the tracing */
416 ret = tracer_init(trace, tr);
418 warn_failed_init_tracer(trace, ret);
422 /* reset the max latency */
423 tracing_max_latency = 0;
425 /* disable preemption and interrupts for a bit */
430 /* reverse the order of preempt vs irqs */
433 /* stop the tracing. */
435 /* check both trace buffers */
436 ret = trace_test_buffer(tr, NULL);
442 ret = trace_test_buffer(&max_tr, &count);
448 if (!ret && !count) {
449 printk(KERN_CONT ".. no entries found ..");
455 /* do the test by disabling interrupts first this time */
456 tracing_max_latency = 0;
462 /* reverse the order of preempt vs irqs */
465 /* stop the tracing. */
467 /* check both trace buffers */
468 ret = trace_test_buffer(tr, NULL);
472 ret = trace_test_buffer(&max_tr, &count);
474 if (!ret && !count) {
475 printk(KERN_CONT ".. no entries found ..");
483 tracing_max_latency = save_max;
487 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
489 #ifdef CONFIG_NOP_TRACER
491 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
493 /* What could possibly go wrong? */
498 #ifdef CONFIG_SCHED_TRACER
499 static int trace_wakeup_test_thread(void *data)
501 /* Make this a RT thread, doesn't need to be too high */
502 struct sched_param param = { .sched_priority = 5 };
503 struct completion *x = data;
505 sched_setscheduler(current, SCHED_FIFO, ¶m);
507 /* Make it know we have a new prio */
510 /* now go to sleep and let the test wake us up */
511 set_current_state(TASK_INTERRUPTIBLE);
514 /* we are awake, now wait to disappear */
515 while (!kthread_should_stop()) {
517 * This is an RT task, do short sleeps to let
527 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
529 unsigned long save_max = tracing_max_latency;
530 struct task_struct *p;
531 struct completion isrt;
535 init_completion(&isrt);
537 /* create a high prio thread */
538 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
540 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
544 /* make sure the thread is running at an RT prio */
545 wait_for_completion(&isrt);
547 /* start the tracing */
548 ret = tracer_init(trace, tr);
550 warn_failed_init_tracer(trace, ret);
554 /* reset the max latency */
555 tracing_max_latency = 0;
557 /* sleep to let the RT thread sleep too */
561 * Yes this is slightly racy. It is possible that for some
562 * strange reason that the RT thread we created, did not
563 * call schedule for 100ms after doing the completion,
564 * and we do a wakeup on a task that already is awake.
565 * But that is extremely unlikely, and the worst thing that
566 * happens in such a case, is that we disable tracing.
567 * Honestly, if this race does happen something is horrible
568 * wrong with the system.
573 /* give a little time to let the thread wake up */
576 /* stop the tracing. */
578 /* check both trace buffers */
579 ret = trace_test_buffer(tr, NULL);
581 ret = trace_test_buffer(&max_tr, &count);
587 tracing_max_latency = save_max;
589 /* kill the thread */
592 if (!ret && !count) {
593 printk(KERN_CONT ".. no entries found ..");
599 #endif /* CONFIG_SCHED_TRACER */
601 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
603 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
608 /* start the tracing */
609 ret = tracer_init(trace, tr);
611 warn_failed_init_tracer(trace, ret);
615 /* Sleep for a 1/10 of a second */
617 /* stop the tracing. */
619 /* check the trace buffer */
620 ret = trace_test_buffer(tr, &count);
624 if (!ret && !count) {
625 printk(KERN_CONT ".. no entries found ..");
631 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
633 #ifdef CONFIG_SYSPROF_TRACER
635 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
640 /* start the tracing */
641 ret = tracer_init(trace, tr);
643 warn_failed_init_tracer(trace, ret);
647 /* Sleep for a 1/10 of a second */
649 /* stop the tracing. */
651 /* check the trace buffer */
652 ret = trace_test_buffer(tr, &count);
656 if (!ret && !count) {
657 printk(KERN_CONT ".. no entries found ..");
663 #endif /* CONFIG_SYSPROF_TRACER */
665 #ifdef CONFIG_BRANCH_TRACER
667 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
672 /* start the tracing */
673 ret = tracer_init(trace, tr);
675 warn_failed_init_tracer(trace, ret);
679 /* Sleep for a 1/10 of a second */
681 /* stop the tracing. */
683 /* check the trace buffer */
684 ret = trace_test_buffer(tr, &count);
688 if (!ret && !count) {
689 printk(KERN_CONT ".. no entries found ..");
695 #endif /* CONFIG_BRANCH_TRACER */