ftrace: align __mcount_loc sections
[linux-block.git] / kernel / trace / trace_selftest.c
CommitLineData
60a11774
SR
1/* Include in trace.c */
2
3#include <linux/kthread.h>
c7aafc54 4#include <linux/delay.h>
60a11774 5
e309b41d 6static inline int trace_valid_entry(struct trace_entry *entry)
60a11774
SR
7{
8 switch (entry->type) {
9 case TRACE_FN:
10 case TRACE_CTX:
57422797 11 case TRACE_WAKE:
dd0e545f 12 case TRACE_CONT:
06fa75ab 13 case TRACE_STACK:
dd0e545f 14 case TRACE_PRINT:
06fa75ab 15 case TRACE_SPECIAL:
60a11774
SR
16 return 1;
17 }
18 return 0;
19}
20
3928a8a2 21static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
60a11774 22{
3928a8a2
SR
23 struct ring_buffer_event *event;
24 struct trace_entry *entry;
60a11774 25
3928a8a2
SR
26 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
27 entry = ring_buffer_event_data(event);
60a11774 28
3928a8a2 29 if (!trace_valid_entry(entry)) {
c7aafc54 30 printk(KERN_CONT ".. invalid entry %d ",
3928a8a2 31 entry->type);
60a11774
SR
32 goto failed;
33 }
60a11774 34 }
60a11774
SR
35 return 0;
36
37 failed:
08bafa0e
SR
38 /* disable tracing */
39 tracing_disabled = 1;
60a11774
SR
40 printk(KERN_CONT ".. corrupted trace buffer .. ");
41 return -1;
42}
43
44/*
45 * Test the trace buffer to see if all the elements
46 * are still sane.
47 */
48static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
49{
30afdcb1
SR
50 unsigned long flags, cnt = 0;
51 int cpu, ret = 0;
60a11774 52
30afdcb1
SR
53 /* Don't allow flipping of max traces now */
54 raw_local_irq_save(flags);
55 __raw_spin_lock(&ftrace_max_lock);
60a11774 56
3928a8a2 57 cnt = ring_buffer_entries(tr->buffer);
60a11774 58
3928a8a2
SR
59 for_each_possible_cpu(cpu) {
60 ret = trace_test_buffer_cpu(tr, cpu);
60a11774
SR
61 if (ret)
62 break;
63 }
30afdcb1
SR
64 __raw_spin_unlock(&ftrace_max_lock);
65 raw_local_irq_restore(flags);
60a11774
SR
66
67 if (count)
68 *count = cnt;
69
70 return ret;
71}
72
606576ce 73#ifdef CONFIG_FUNCTION_TRACER
77a2b37d
SR
74
75#ifdef CONFIG_DYNAMIC_FTRACE
76
77a2b37d
SR
77#define __STR(x) #x
78#define STR(x) __STR(x)
77a2b37d
SR
79
80/* Test dynamic code modification and ftrace filters */
81int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
82 struct trace_array *tr,
83 int (*func)(void))
84{
77a2b37d
SR
85 int save_ftrace_enabled = ftrace_enabled;
86 int save_tracer_enabled = tracer_enabled;
dd0e545f 87 unsigned long count;
4e491d14 88 char *func_name;
dd0e545f 89 int ret;
77a2b37d
SR
90
91 /* The ftrace test PASSED */
92 printk(KERN_CONT "PASSED\n");
93 pr_info("Testing dynamic ftrace: ");
94
95 /* enable tracing, and record the filter function */
96 ftrace_enabled = 1;
97 tracer_enabled = 1;
98
99 /* passed in by parameter to fool gcc from optimizing */
100 func();
101
4e491d14
SR
102 /*
103 * Some archs *cough*PowerPC*cough* add charachters to the
104 * start of the function names. We simply put a '*' to
105 * accomodate them.
106 */
107 func_name = "*" STR(DYN_FTRACE_TEST_NAME);
108
77a2b37d 109 /* filter only on our function */
4e491d14 110 ftrace_set_filter(func_name, strlen(func_name), 1);
77a2b37d
SR
111
112 /* enable tracing */
77a2b37d 113 trace->init(tr);
dd0e545f 114
77a2b37d
SR
115 /* Sleep for a 1/10 of a second */
116 msleep(100);
117
118 /* we should have nothing in the buffer */
119 ret = trace_test_buffer(tr, &count);
120 if (ret)
121 goto out;
122
123 if (count) {
124 ret = -1;
125 printk(KERN_CONT ".. filter did not filter .. ");
126 goto out;
127 }
128
129 /* call our function again */
130 func();
131
132 /* sleep again */
133 msleep(100);
134
135 /* stop the tracing. */
bbf5b1a0 136 tracing_stop();
77a2b37d
SR
137 ftrace_enabled = 0;
138
139 /* check the trace buffer */
140 ret = trace_test_buffer(tr, &count);
141 trace->reset(tr);
bbf5b1a0 142 tracing_start();
77a2b37d
SR
143
144 /* we should only have one item */
145 if (!ret && count != 1) {
06fa75ab 146 printk(KERN_CONT ".. filter failed count=%ld ..", count);
77a2b37d
SR
147 ret = -1;
148 goto out;
149 }
bbf5b1a0 150
77a2b37d
SR
151 out:
152 ftrace_enabled = save_ftrace_enabled;
153 tracer_enabled = save_tracer_enabled;
154
155 /* Enable tracing on all functions again */
156 ftrace_set_filter(NULL, 0, 1);
157
158 return ret;
159}
160#else
161# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
162#endif /* CONFIG_DYNAMIC_FTRACE */
60a11774
SR
163/*
164 * Simple verification test of ftrace function tracer.
165 * Enable ftrace, sleep 1/10 second, and then read the trace
166 * buffer to see if all is in order.
167 */
168int
169trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
170{
77a2b37d
SR
171 int save_ftrace_enabled = ftrace_enabled;
172 int save_tracer_enabled = tracer_enabled;
dd0e545f
SR
173 unsigned long count;
174 int ret;
60a11774 175
77a2b37d
SR
176 /* make sure msleep has been recorded */
177 msleep(1);
178
60a11774 179 /* start the tracing */
c7aafc54 180 ftrace_enabled = 1;
77a2b37d 181 tracer_enabled = 1;
c7aafc54 182
60a11774
SR
183 trace->init(tr);
184 /* Sleep for a 1/10 of a second */
185 msleep(100);
186 /* stop the tracing. */
bbf5b1a0 187 tracing_stop();
c7aafc54
IM
188 ftrace_enabled = 0;
189
60a11774
SR
190 /* check the trace buffer */
191 ret = trace_test_buffer(tr, &count);
192 trace->reset(tr);
bbf5b1a0 193 tracing_start();
60a11774
SR
194
195 if (!ret && !count) {
196 printk(KERN_CONT ".. no entries found ..");
197 ret = -1;
77a2b37d 198 goto out;
60a11774
SR
199 }
200
77a2b37d
SR
201 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
202 DYN_FTRACE_TEST_NAME);
203
204 out:
205 ftrace_enabled = save_ftrace_enabled;
206 tracer_enabled = save_tracer_enabled;
207
4eebcc81
SR
208 /* kill ftrace totally if we failed */
209 if (ret)
210 ftrace_kill();
211
60a11774
SR
212 return ret;
213}
606576ce 214#endif /* CONFIG_FUNCTION_TRACER */
60a11774
SR
215
216#ifdef CONFIG_IRQSOFF_TRACER
217int
218trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
219{
220 unsigned long save_max = tracing_max_latency;
221 unsigned long count;
222 int ret;
223
224 /* start the tracing */
60a11774
SR
225 trace->init(tr);
226 /* reset the max latency */
227 tracing_max_latency = 0;
228 /* disable interrupts for a bit */
229 local_irq_disable();
230 udelay(100);
231 local_irq_enable();
232 /* stop the tracing. */
bbf5b1a0 233 tracing_stop();
60a11774
SR
234 /* check both trace buffers */
235 ret = trace_test_buffer(tr, NULL);
236 if (!ret)
237 ret = trace_test_buffer(&max_tr, &count);
238 trace->reset(tr);
bbf5b1a0 239 tracing_start();
60a11774
SR
240
241 if (!ret && !count) {
242 printk(KERN_CONT ".. no entries found ..");
243 ret = -1;
244 }
245
246 tracing_max_latency = save_max;
247
248 return ret;
249}
250#endif /* CONFIG_IRQSOFF_TRACER */
251
252#ifdef CONFIG_PREEMPT_TRACER
253int
254trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
255{
256 unsigned long save_max = tracing_max_latency;
257 unsigned long count;
258 int ret;
259
260 /* start the tracing */
60a11774
SR
261 trace->init(tr);
262 /* reset the max latency */
263 tracing_max_latency = 0;
264 /* disable preemption for a bit */
265 preempt_disable();
266 udelay(100);
267 preempt_enable();
268 /* stop the tracing. */
bbf5b1a0 269 tracing_stop();
60a11774
SR
270 /* check both trace buffers */
271 ret = trace_test_buffer(tr, NULL);
272 if (!ret)
273 ret = trace_test_buffer(&max_tr, &count);
274 trace->reset(tr);
bbf5b1a0 275 tracing_start();
60a11774
SR
276
277 if (!ret && !count) {
278 printk(KERN_CONT ".. no entries found ..");
279 ret = -1;
280 }
281
282 tracing_max_latency = save_max;
283
284 return ret;
285}
286#endif /* CONFIG_PREEMPT_TRACER */
287
288#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
289int
290trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
291{
292 unsigned long save_max = tracing_max_latency;
293 unsigned long count;
294 int ret;
295
296 /* start the tracing */
60a11774
SR
297 trace->init(tr);
298
299 /* reset the max latency */
300 tracing_max_latency = 0;
301
302 /* disable preemption and interrupts for a bit */
303 preempt_disable();
304 local_irq_disable();
305 udelay(100);
306 preempt_enable();
307 /* reverse the order of preempt vs irqs */
308 local_irq_enable();
309
310 /* stop the tracing. */
bbf5b1a0 311 tracing_stop();
60a11774
SR
312 /* check both trace buffers */
313 ret = trace_test_buffer(tr, NULL);
bbf5b1a0
SR
314 if (ret) {
315 tracing_start();
60a11774 316 goto out;
bbf5b1a0 317 }
60a11774
SR
318
319 ret = trace_test_buffer(&max_tr, &count);
bbf5b1a0
SR
320 if (ret) {
321 tracing_start();
60a11774 322 goto out;
bbf5b1a0 323 }
60a11774
SR
324
325 if (!ret && !count) {
326 printk(KERN_CONT ".. no entries found ..");
327 ret = -1;
bbf5b1a0 328 tracing_start();
60a11774
SR
329 goto out;
330 }
331
332 /* do the test by disabling interrupts first this time */
333 tracing_max_latency = 0;
bbf5b1a0 334 tracing_start();
60a11774
SR
335 preempt_disable();
336 local_irq_disable();
337 udelay(100);
338 preempt_enable();
339 /* reverse the order of preempt vs irqs */
340 local_irq_enable();
341
342 /* stop the tracing. */
bbf5b1a0 343 tracing_stop();
60a11774
SR
344 /* check both trace buffers */
345 ret = trace_test_buffer(tr, NULL);
346 if (ret)
347 goto out;
348
349 ret = trace_test_buffer(&max_tr, &count);
350
351 if (!ret && !count) {
352 printk(KERN_CONT ".. no entries found ..");
353 ret = -1;
354 goto out;
355 }
356
357 out:
358 trace->reset(tr);
bbf5b1a0 359 tracing_start();
60a11774
SR
360 tracing_max_latency = save_max;
361
362 return ret;
363}
364#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
365
fb1b6d8b
SN
366#ifdef CONFIG_NOP_TRACER
367int
368trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
369{
370 /* What could possibly go wrong? */
371 return 0;
372}
373#endif
374
60a11774
SR
375#ifdef CONFIG_SCHED_TRACER
376static int trace_wakeup_test_thread(void *data)
377{
60a11774 378 /* Make this a RT thread, doesn't need to be too high */
05bd68c5
SR
379 struct sched_param param = { .sched_priority = 5 };
380 struct completion *x = data;
60a11774 381
05bd68c5 382 sched_setscheduler(current, SCHED_FIFO, &param);
60a11774
SR
383
384 /* Make it know we have a new prio */
385 complete(x);
386
387 /* now go to sleep and let the test wake us up */
388 set_current_state(TASK_INTERRUPTIBLE);
389 schedule();
390
391 /* we are awake, now wait to disappear */
392 while (!kthread_should_stop()) {
393 /*
394 * This is an RT task, do short sleeps to let
395 * others run.
396 */
397 msleep(100);
398 }
399
400 return 0;
401}
402
403int
404trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
405{
406 unsigned long save_max = tracing_max_latency;
407 struct task_struct *p;
408 struct completion isrt;
409 unsigned long count;
410 int ret;
411
412 init_completion(&isrt);
413
414 /* create a high prio thread */
415 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
c7aafc54 416 if (IS_ERR(p)) {
60a11774
SR
417 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
418 return -1;
419 }
420
421 /* make sure the thread is running at an RT prio */
422 wait_for_completion(&isrt);
423
424 /* start the tracing */
60a11774
SR
425 trace->init(tr);
426 /* reset the max latency */
427 tracing_max_latency = 0;
428
429 /* sleep to let the RT thread sleep too */
430 msleep(100);
431
432 /*
433 * Yes this is slightly racy. It is possible that for some
434 * strange reason that the RT thread we created, did not
435 * call schedule for 100ms after doing the completion,
436 * and we do a wakeup on a task that already is awake.
437 * But that is extremely unlikely, and the worst thing that
438 * happens in such a case, is that we disable tracing.
439 * Honestly, if this race does happen something is horrible
440 * wrong with the system.
441 */
442
443 wake_up_process(p);
444
5aa60c60
SR
445 /* give a little time to let the thread wake up */
446 msleep(100);
447
60a11774 448 /* stop the tracing. */
bbf5b1a0 449 tracing_stop();
60a11774
SR
450 /* check both trace buffers */
451 ret = trace_test_buffer(tr, NULL);
452 if (!ret)
453 ret = trace_test_buffer(&max_tr, &count);
454
455
456 trace->reset(tr);
bbf5b1a0 457 tracing_start();
60a11774
SR
458
459 tracing_max_latency = save_max;
460
461 /* kill the thread */
462 kthread_stop(p);
463
464 if (!ret && !count) {
465 printk(KERN_CONT ".. no entries found ..");
466 ret = -1;
467 }
468
469 return ret;
470}
471#endif /* CONFIG_SCHED_TRACER */
472
473#ifdef CONFIG_CONTEXT_SWITCH_TRACER
474int
475trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
476{
477 unsigned long count;
478 int ret;
479
480 /* start the tracing */
60a11774
SR
481 trace->init(tr);
482 /* Sleep for a 1/10 of a second */
483 msleep(100);
484 /* stop the tracing. */
bbf5b1a0 485 tracing_stop();
60a11774
SR
486 /* check the trace buffer */
487 ret = trace_test_buffer(tr, &count);
488 trace->reset(tr);
bbf5b1a0 489 tracing_start();
60a11774
SR
490
491 if (!ret && !count) {
492 printk(KERN_CONT ".. no entries found ..");
493 ret = -1;
494 }
495
496 return ret;
497}
498#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
a6dd24f8
IM
499
500#ifdef CONFIG_SYSPROF_TRACER
501int
502trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
503{
504 unsigned long count;
505 int ret;
506
507 /* start the tracing */
a6dd24f8
IM
508 trace->init(tr);
509 /* Sleep for a 1/10 of a second */
510 msleep(100);
511 /* stop the tracing. */
bbf5b1a0 512 tracing_stop();
a6dd24f8
IM
513 /* check the trace buffer */
514 ret = trace_test_buffer(tr, &count);
515 trace->reset(tr);
bbf5b1a0 516 tracing_start();
a6dd24f8 517
a6dd24f8
IM
518 return ret;
519}
520#endif /* CONFIG_SYSPROF_TRACER */