Commit | Line | Data |
---|---|---|
60a11774 SR |
1 | /* Include in trace.c */ |
2 | ||
3 | #include <linux/kthread.h> | |
c7aafc54 | 4 | #include <linux/delay.h> |
60a11774 | 5 | |
e309b41d | 6 | static inline int trace_valid_entry(struct trace_entry *entry) |
60a11774 SR |
7 | { |
8 | switch (entry->type) { | |
9 | case TRACE_FN: | |
10 | case TRACE_CTX: | |
57422797 | 11 | case TRACE_WAKE: |
dd0e545f | 12 | case TRACE_CONT: |
06fa75ab | 13 | case TRACE_STACK: |
dd0e545f | 14 | case TRACE_PRINT: |
06fa75ab | 15 | case TRACE_SPECIAL: |
60a11774 SR |
16 | return 1; |
17 | } | |
18 | return 0; | |
19 | } | |
20 | ||
3928a8a2 | 21 | static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) |
60a11774 | 22 | { |
3928a8a2 SR |
23 | struct ring_buffer_event *event; |
24 | struct trace_entry *entry; | |
60a11774 | 25 | |
3928a8a2 SR |
26 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { |
27 | entry = ring_buffer_event_data(event); | |
60a11774 | 28 | |
3928a8a2 | 29 | if (!trace_valid_entry(entry)) { |
c7aafc54 | 30 | printk(KERN_CONT ".. invalid entry %d ", |
3928a8a2 | 31 | entry->type); |
60a11774 SR |
32 | goto failed; |
33 | } | |
60a11774 | 34 | } |
60a11774 SR |
35 | return 0; |
36 | ||
37 | failed: | |
08bafa0e SR |
38 | /* disable tracing */ |
39 | tracing_disabled = 1; | |
60a11774 SR |
40 | printk(KERN_CONT ".. corrupted trace buffer .. "); |
41 | return -1; | |
42 | } | |
43 | ||
44 | /* | |
45 | * Test the trace buffer to see if all the elements | |
46 | * are still sane. | |
47 | */ | |
48 | static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |
49 | { | |
30afdcb1 SR |
50 | unsigned long flags, cnt = 0; |
51 | int cpu, ret = 0; | |
60a11774 | 52 | |
30afdcb1 SR |
53 | /* Don't allow flipping of max traces now */ |
54 | raw_local_irq_save(flags); | |
55 | __raw_spin_lock(&ftrace_max_lock); | |
60a11774 | 56 | |
3928a8a2 | 57 | cnt = ring_buffer_entries(tr->buffer); |
60a11774 | 58 | |
3928a8a2 SR |
59 | for_each_possible_cpu(cpu) { |
60 | ret = trace_test_buffer_cpu(tr, cpu); | |
60a11774 SR |
61 | if (ret) |
62 | break; | |
63 | } | |
30afdcb1 SR |
64 | __raw_spin_unlock(&ftrace_max_lock); |
65 | raw_local_irq_restore(flags); | |
60a11774 SR |
66 | |
67 | if (count) | |
68 | *count = cnt; | |
69 | ||
70 | return ret; | |
71 | } | |
72 | ||
606576ce | 73 | #ifdef CONFIG_FUNCTION_TRACER |
77a2b37d SR |
74 | |
75 | #ifdef CONFIG_DYNAMIC_FTRACE | |
76 | ||
77a2b37d SR |
77 | #define __STR(x) #x |
78 | #define STR(x) __STR(x) | |
77a2b37d SR |
79 | |
80 | /* Test dynamic code modification and ftrace filters */ | |
81 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |
82 | struct trace_array *tr, | |
83 | int (*func)(void)) | |
84 | { | |
77a2b37d SR |
85 | int save_ftrace_enabled = ftrace_enabled; |
86 | int save_tracer_enabled = tracer_enabled; | |
dd0e545f | 87 | unsigned long count; |
4e491d14 | 88 | char *func_name; |
dd0e545f | 89 | int ret; |
77a2b37d SR |
90 | |
91 | /* The ftrace test PASSED */ | |
92 | printk(KERN_CONT "PASSED\n"); | |
93 | pr_info("Testing dynamic ftrace: "); | |
94 | ||
95 | /* enable tracing, and record the filter function */ | |
96 | ftrace_enabled = 1; | |
97 | tracer_enabled = 1; | |
98 | ||
99 | /* passed in by parameter to fool gcc from optimizing */ | |
100 | func(); | |
101 | ||
4e491d14 SR |
102 | /* |
103 | * Some archs *cough*PowerPC*cough* add charachters to the | |
104 | * start of the function names. We simply put a '*' to | |
105 | * accomodate them. | |
106 | */ | |
107 | func_name = "*" STR(DYN_FTRACE_TEST_NAME); | |
108 | ||
77a2b37d | 109 | /* filter only on our function */ |
4e491d14 | 110 | ftrace_set_filter(func_name, strlen(func_name), 1); |
77a2b37d SR |
111 | |
112 | /* enable tracing */ | |
77a2b37d | 113 | trace->init(tr); |
dd0e545f | 114 | |
77a2b37d SR |
115 | /* Sleep for a 1/10 of a second */ |
116 | msleep(100); | |
117 | ||
118 | /* we should have nothing in the buffer */ | |
119 | ret = trace_test_buffer(tr, &count); | |
120 | if (ret) | |
121 | goto out; | |
122 | ||
123 | if (count) { | |
124 | ret = -1; | |
125 | printk(KERN_CONT ".. filter did not filter .. "); | |
126 | goto out; | |
127 | } | |
128 | ||
129 | /* call our function again */ | |
130 | func(); | |
131 | ||
132 | /* sleep again */ | |
133 | msleep(100); | |
134 | ||
135 | /* stop the tracing. */ | |
bbf5b1a0 | 136 | tracing_stop(); |
77a2b37d SR |
137 | ftrace_enabled = 0; |
138 | ||
139 | /* check the trace buffer */ | |
140 | ret = trace_test_buffer(tr, &count); | |
141 | trace->reset(tr); | |
bbf5b1a0 | 142 | tracing_start(); |
77a2b37d SR |
143 | |
144 | /* we should only have one item */ | |
145 | if (!ret && count != 1) { | |
06fa75ab | 146 | printk(KERN_CONT ".. filter failed count=%ld ..", count); |
77a2b37d SR |
147 | ret = -1; |
148 | goto out; | |
149 | } | |
bbf5b1a0 | 150 | |
77a2b37d SR |
151 | out: |
152 | ftrace_enabled = save_ftrace_enabled; | |
153 | tracer_enabled = save_tracer_enabled; | |
154 | ||
155 | /* Enable tracing on all functions again */ | |
156 | ftrace_set_filter(NULL, 0, 1); | |
157 | ||
158 | return ret; | |
159 | } | |
160 | #else | |
161 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | |
162 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
60a11774 SR |
163 | /* |
164 | * Simple verification test of ftrace function tracer. | |
165 | * Enable ftrace, sleep 1/10 second, and then read the trace | |
166 | * buffer to see if all is in order. | |
167 | */ | |
168 | int | |
169 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |
170 | { | |
77a2b37d SR |
171 | int save_ftrace_enabled = ftrace_enabled; |
172 | int save_tracer_enabled = tracer_enabled; | |
dd0e545f SR |
173 | unsigned long count; |
174 | int ret; | |
60a11774 | 175 | |
77a2b37d SR |
176 | /* make sure msleep has been recorded */ |
177 | msleep(1); | |
178 | ||
60a11774 | 179 | /* start the tracing */ |
c7aafc54 | 180 | ftrace_enabled = 1; |
77a2b37d | 181 | tracer_enabled = 1; |
c7aafc54 | 182 | |
60a11774 SR |
183 | trace->init(tr); |
184 | /* Sleep for a 1/10 of a second */ | |
185 | msleep(100); | |
186 | /* stop the tracing. */ | |
bbf5b1a0 | 187 | tracing_stop(); |
c7aafc54 IM |
188 | ftrace_enabled = 0; |
189 | ||
60a11774 SR |
190 | /* check the trace buffer */ |
191 | ret = trace_test_buffer(tr, &count); | |
192 | trace->reset(tr); | |
bbf5b1a0 | 193 | tracing_start(); |
60a11774 SR |
194 | |
195 | if (!ret && !count) { | |
196 | printk(KERN_CONT ".. no entries found .."); | |
197 | ret = -1; | |
77a2b37d | 198 | goto out; |
60a11774 SR |
199 | } |
200 | ||
77a2b37d SR |
201 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, |
202 | DYN_FTRACE_TEST_NAME); | |
203 | ||
204 | out: | |
205 | ftrace_enabled = save_ftrace_enabled; | |
206 | tracer_enabled = save_tracer_enabled; | |
207 | ||
4eebcc81 SR |
208 | /* kill ftrace totally if we failed */ |
209 | if (ret) | |
210 | ftrace_kill(); | |
211 | ||
60a11774 SR |
212 | return ret; |
213 | } | |
606576ce | 214 | #endif /* CONFIG_FUNCTION_TRACER */ |
60a11774 SR |
215 | |
216 | #ifdef CONFIG_IRQSOFF_TRACER | |
217 | int | |
218 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |
219 | { | |
220 | unsigned long save_max = tracing_max_latency; | |
221 | unsigned long count; | |
222 | int ret; | |
223 | ||
224 | /* start the tracing */ | |
60a11774 SR |
225 | trace->init(tr); |
226 | /* reset the max latency */ | |
227 | tracing_max_latency = 0; | |
228 | /* disable interrupts for a bit */ | |
229 | local_irq_disable(); | |
230 | udelay(100); | |
231 | local_irq_enable(); | |
232 | /* stop the tracing. */ | |
bbf5b1a0 | 233 | tracing_stop(); |
60a11774 SR |
234 | /* check both trace buffers */ |
235 | ret = trace_test_buffer(tr, NULL); | |
236 | if (!ret) | |
237 | ret = trace_test_buffer(&max_tr, &count); | |
238 | trace->reset(tr); | |
bbf5b1a0 | 239 | tracing_start(); |
60a11774 SR |
240 | |
241 | if (!ret && !count) { | |
242 | printk(KERN_CONT ".. no entries found .."); | |
243 | ret = -1; | |
244 | } | |
245 | ||
246 | tracing_max_latency = save_max; | |
247 | ||
248 | return ret; | |
249 | } | |
250 | #endif /* CONFIG_IRQSOFF_TRACER */ | |
251 | ||
252 | #ifdef CONFIG_PREEMPT_TRACER | |
253 | int | |
254 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |
255 | { | |
256 | unsigned long save_max = tracing_max_latency; | |
257 | unsigned long count; | |
258 | int ret; | |
259 | ||
769c48eb SR |
260 | /* |
261 | * Now that the big kernel lock is no longer preemptable, | |
262 | * and this is called with the BKL held, it will always | |
263 | * fail. If preemption is already disabled, simply | |
264 | * pass the test. When the BKL is removed, or becomes | |
265 | * preemptible again, we will once again test this, | |
266 | * so keep it in. | |
267 | */ | |
268 | if (preempt_count()) { | |
269 | printk(KERN_CONT "can not test ... force "); | |
270 | return 0; | |
271 | } | |
272 | ||
60a11774 | 273 | /* start the tracing */ |
60a11774 SR |
274 | trace->init(tr); |
275 | /* reset the max latency */ | |
276 | tracing_max_latency = 0; | |
277 | /* disable preemption for a bit */ | |
278 | preempt_disable(); | |
279 | udelay(100); | |
280 | preempt_enable(); | |
281 | /* stop the tracing. */ | |
bbf5b1a0 | 282 | tracing_stop(); |
60a11774 SR |
283 | /* check both trace buffers */ |
284 | ret = trace_test_buffer(tr, NULL); | |
285 | if (!ret) | |
286 | ret = trace_test_buffer(&max_tr, &count); | |
287 | trace->reset(tr); | |
bbf5b1a0 | 288 | tracing_start(); |
60a11774 SR |
289 | |
290 | if (!ret && !count) { | |
291 | printk(KERN_CONT ".. no entries found .."); | |
292 | ret = -1; | |
293 | } | |
294 | ||
295 | tracing_max_latency = save_max; | |
296 | ||
297 | return ret; | |
298 | } | |
299 | #endif /* CONFIG_PREEMPT_TRACER */ | |
300 | ||
301 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | |
302 | int | |
303 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | |
304 | { | |
305 | unsigned long save_max = tracing_max_latency; | |
306 | unsigned long count; | |
307 | int ret; | |
308 | ||
769c48eb SR |
309 | /* |
310 | * Now that the big kernel lock is no longer preemptable, | |
311 | * and this is called with the BKL held, it will always | |
312 | * fail. If preemption is already disabled, simply | |
313 | * pass the test. When the BKL is removed, or becomes | |
314 | * preemptible again, we will once again test this, | |
315 | * so keep it in. | |
316 | */ | |
317 | if (preempt_count()) { | |
318 | printk(KERN_CONT "can not test ... force "); | |
319 | return 0; | |
320 | } | |
321 | ||
60a11774 | 322 | /* start the tracing */ |
60a11774 SR |
323 | trace->init(tr); |
324 | ||
325 | /* reset the max latency */ | |
326 | tracing_max_latency = 0; | |
327 | ||
328 | /* disable preemption and interrupts for a bit */ | |
329 | preempt_disable(); | |
330 | local_irq_disable(); | |
331 | udelay(100); | |
332 | preempt_enable(); | |
333 | /* reverse the order of preempt vs irqs */ | |
334 | local_irq_enable(); | |
335 | ||
336 | /* stop the tracing. */ | |
bbf5b1a0 | 337 | tracing_stop(); |
60a11774 SR |
338 | /* check both trace buffers */ |
339 | ret = trace_test_buffer(tr, NULL); | |
bbf5b1a0 SR |
340 | if (ret) { |
341 | tracing_start(); | |
60a11774 | 342 | goto out; |
bbf5b1a0 | 343 | } |
60a11774 SR |
344 | |
345 | ret = trace_test_buffer(&max_tr, &count); | |
bbf5b1a0 SR |
346 | if (ret) { |
347 | tracing_start(); | |
60a11774 | 348 | goto out; |
bbf5b1a0 | 349 | } |
60a11774 SR |
350 | |
351 | if (!ret && !count) { | |
352 | printk(KERN_CONT ".. no entries found .."); | |
353 | ret = -1; | |
bbf5b1a0 | 354 | tracing_start(); |
60a11774 SR |
355 | goto out; |
356 | } | |
357 | ||
358 | /* do the test by disabling interrupts first this time */ | |
359 | tracing_max_latency = 0; | |
bbf5b1a0 | 360 | tracing_start(); |
60a11774 SR |
361 | preempt_disable(); |
362 | local_irq_disable(); | |
363 | udelay(100); | |
364 | preempt_enable(); | |
365 | /* reverse the order of preempt vs irqs */ | |
366 | local_irq_enable(); | |
367 | ||
368 | /* stop the tracing. */ | |
bbf5b1a0 | 369 | tracing_stop(); |
60a11774 SR |
370 | /* check both trace buffers */ |
371 | ret = trace_test_buffer(tr, NULL); | |
372 | if (ret) | |
373 | goto out; | |
374 | ||
375 | ret = trace_test_buffer(&max_tr, &count); | |
376 | ||
377 | if (!ret && !count) { | |
378 | printk(KERN_CONT ".. no entries found .."); | |
379 | ret = -1; | |
380 | goto out; | |
381 | } | |
382 | ||
383 | out: | |
384 | trace->reset(tr); | |
bbf5b1a0 | 385 | tracing_start(); |
60a11774 SR |
386 | tracing_max_latency = save_max; |
387 | ||
388 | return ret; | |
389 | } | |
390 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | |
391 | ||
fb1b6d8b SN |
392 | #ifdef CONFIG_NOP_TRACER |
393 | int | |
394 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | |
395 | { | |
396 | /* What could possibly go wrong? */ | |
397 | return 0; | |
398 | } | |
399 | #endif | |
400 | ||
60a11774 SR |
401 | #ifdef CONFIG_SCHED_TRACER |
402 | static int trace_wakeup_test_thread(void *data) | |
403 | { | |
60a11774 | 404 | /* Make this a RT thread, doesn't need to be too high */ |
05bd68c5 SR |
405 | struct sched_param param = { .sched_priority = 5 }; |
406 | struct completion *x = data; | |
60a11774 | 407 | |
05bd68c5 | 408 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
60a11774 SR |
409 | |
410 | /* Make it know we have a new prio */ | |
411 | complete(x); | |
412 | ||
413 | /* now go to sleep and let the test wake us up */ | |
414 | set_current_state(TASK_INTERRUPTIBLE); | |
415 | schedule(); | |
416 | ||
417 | /* we are awake, now wait to disappear */ | |
418 | while (!kthread_should_stop()) { | |
419 | /* | |
420 | * This is an RT task, do short sleeps to let | |
421 | * others run. | |
422 | */ | |
423 | msleep(100); | |
424 | } | |
425 | ||
426 | return 0; | |
427 | } | |
428 | ||
429 | int | |
430 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |
431 | { | |
432 | unsigned long save_max = tracing_max_latency; | |
433 | struct task_struct *p; | |
434 | struct completion isrt; | |
435 | unsigned long count; | |
436 | int ret; | |
437 | ||
438 | init_completion(&isrt); | |
439 | ||
440 | /* create a high prio thread */ | |
441 | p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); | |
c7aafc54 | 442 | if (IS_ERR(p)) { |
60a11774 SR |
443 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); |
444 | return -1; | |
445 | } | |
446 | ||
447 | /* make sure the thread is running at an RT prio */ | |
448 | wait_for_completion(&isrt); | |
449 | ||
450 | /* start the tracing */ | |
60a11774 SR |
451 | trace->init(tr); |
452 | /* reset the max latency */ | |
453 | tracing_max_latency = 0; | |
454 | ||
455 | /* sleep to let the RT thread sleep too */ | |
456 | msleep(100); | |
457 | ||
458 | /* | |
459 | * Yes this is slightly racy. It is possible that for some | |
460 | * strange reason that the RT thread we created, did not | |
461 | * call schedule for 100ms after doing the completion, | |
462 | * and we do a wakeup on a task that already is awake. | |
463 | * But that is extremely unlikely, and the worst thing that | |
464 | * happens in such a case, is that we disable tracing. | |
465 | * Honestly, if this race does happen something is horrible | |
466 | * wrong with the system. | |
467 | */ | |
468 | ||
469 | wake_up_process(p); | |
470 | ||
5aa60c60 SR |
471 | /* give a little time to let the thread wake up */ |
472 | msleep(100); | |
473 | ||
60a11774 | 474 | /* stop the tracing. */ |
bbf5b1a0 | 475 | tracing_stop(); |
60a11774 SR |
476 | /* check both trace buffers */ |
477 | ret = trace_test_buffer(tr, NULL); | |
478 | if (!ret) | |
479 | ret = trace_test_buffer(&max_tr, &count); | |
480 | ||
481 | ||
482 | trace->reset(tr); | |
bbf5b1a0 | 483 | tracing_start(); |
60a11774 SR |
484 | |
485 | tracing_max_latency = save_max; | |
486 | ||
487 | /* kill the thread */ | |
488 | kthread_stop(p); | |
489 | ||
490 | if (!ret && !count) { | |
491 | printk(KERN_CONT ".. no entries found .."); | |
492 | ret = -1; | |
493 | } | |
494 | ||
495 | return ret; | |
496 | } | |
497 | #endif /* CONFIG_SCHED_TRACER */ | |
498 | ||
499 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | |
500 | int | |
501 | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) | |
502 | { | |
503 | unsigned long count; | |
504 | int ret; | |
505 | ||
506 | /* start the tracing */ | |
60a11774 SR |
507 | trace->init(tr); |
508 | /* Sleep for a 1/10 of a second */ | |
509 | msleep(100); | |
510 | /* stop the tracing. */ | |
bbf5b1a0 | 511 | tracing_stop(); |
60a11774 SR |
512 | /* check the trace buffer */ |
513 | ret = trace_test_buffer(tr, &count); | |
514 | trace->reset(tr); | |
bbf5b1a0 | 515 | tracing_start(); |
60a11774 SR |
516 | |
517 | if (!ret && !count) { | |
518 | printk(KERN_CONT ".. no entries found .."); | |
519 | ret = -1; | |
520 | } | |
521 | ||
522 | return ret; | |
523 | } | |
524 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | |
a6dd24f8 IM |
525 | |
526 | #ifdef CONFIG_SYSPROF_TRACER | |
527 | int | |
528 | trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | |
529 | { | |
530 | unsigned long count; | |
531 | int ret; | |
532 | ||
533 | /* start the tracing */ | |
a6dd24f8 IM |
534 | trace->init(tr); |
535 | /* Sleep for a 1/10 of a second */ | |
536 | msleep(100); | |
537 | /* stop the tracing. */ | |
bbf5b1a0 | 538 | tracing_stop(); |
a6dd24f8 IM |
539 | /* check the trace buffer */ |
540 | ret = trace_test_buffer(tr, &count); | |
541 | trace->reset(tr); | |
bbf5b1a0 | 542 | tracing_start(); |
a6dd24f8 | 543 | |
a6dd24f8 IM |
544 | return ret; |
545 | } | |
546 | #endif /* CONFIG_SYSPROF_TRACER */ |