ftrace - fix dynamic ftrace memory leak
[linux-2.6-block.git] / kernel / trace / trace_selftest.c
CommitLineData
60a11774
SR
1/* Include in trace.c */
2
3#include <linux/kthread.h>
c7aafc54 4#include <linux/delay.h>
60a11774
SR
5
6static inline int trace_valid_entry(struct trace_entry *entry)
7{
8 switch (entry->type) {
9 case TRACE_FN:
10 case TRACE_CTX:
11 return 1;
12 }
13 return 0;
14}
15
16static int
17trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
18{
60a11774 19 struct trace_entry *entries;
c7aafc54 20 struct page *page;
60a11774
SR
21 int idx = 0;
22 int i;
23
c7aafc54 24 BUG_ON(list_empty(&data->trace_pages));
60a11774
SR
25 page = list_entry(data->trace_pages.next, struct page, lru);
26 entries = page_address(page);
27
c7aafc54 28 if (head_page(data) != entries)
60a11774
SR
29 goto failed;
30
31 /*
32 * The starting trace buffer always has valid elements,
c7aafc54 33 * if any element exists.
60a11774 34 */
c7aafc54 35 entries = head_page(data);
60a11774
SR
36
37 for (i = 0; i < tr->entries; i++) {
38
c7aafc54
IM
39 if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
40 printk(KERN_CONT ".. invalid entry %d ",
41 entries[idx].type);
60a11774
SR
42 goto failed;
43 }
44
45 idx++;
46 if (idx >= ENTRIES_PER_PAGE) {
47 page = virt_to_page(entries);
48 if (page->lru.next == &data->trace_pages) {
49 if (i != tr->entries - 1) {
50 printk(KERN_CONT ".. entries buffer mismatch");
51 goto failed;
52 }
53 } else {
54 page = list_entry(page->lru.next, struct page, lru);
55 entries = page_address(page);
56 }
57 idx = 0;
58 }
59 }
60
61 page = virt_to_page(entries);
62 if (page->lru.next != &data->trace_pages) {
63 printk(KERN_CONT ".. too many entries");
64 goto failed;
65 }
66
67 return 0;
68
69 failed:
08bafa0e
SR
70 /* disable tracing */
71 tracing_disabled = 1;
60a11774
SR
72 printk(KERN_CONT ".. corrupted trace buffer .. ");
73 return -1;
74}
75
76/*
77 * Test the trace buffer to see if all the elements
78 * are still sane.
79 */
80static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
81{
82 unsigned long cnt = 0;
83 int cpu;
84 int ret = 0;
85
86 for_each_possible_cpu(cpu) {
c7aafc54 87 if (!head_page(tr->data[cpu]))
60a11774
SR
88 continue;
89
90 cnt += tr->data[cpu]->trace_idx;
60a11774
SR
91
92 ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
93 if (ret)
94 break;
95 }
96
97 if (count)
98 *count = cnt;
99
100 return ret;
101}
102
103#ifdef CONFIG_FTRACE
77a2b37d
SR
104
105#ifdef CONFIG_DYNAMIC_FTRACE
106
107#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
108#define __STR(x) #x
109#define STR(x) __STR(x)
110static int DYN_FTRACE_TEST_NAME(void)
111{
112 /* used to call mcount */
113 return 0;
114}
115
116/* Test dynamic code modification and ftrace filters */
117int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
118 struct trace_array *tr,
119 int (*func)(void))
120{
121 unsigned long count;
122 int ret;
123 int save_ftrace_enabled = ftrace_enabled;
124 int save_tracer_enabled = tracer_enabled;
125
126 /* The ftrace test PASSED */
127 printk(KERN_CONT "PASSED\n");
128 pr_info("Testing dynamic ftrace: ");
129
130 /* enable tracing, and record the filter function */
131 ftrace_enabled = 1;
132 tracer_enabled = 1;
133
134 /* passed in by parameter to fool gcc from optimizing */
135 func();
136
137 /* update the records */
138 ret = ftrace_force_update();
139 if (ret) {
140 printk(KERN_CONT ".. ftraced failed .. ");
141 return ret;
142 }
143
144 /* filter only on our function */
145 ftrace_set_filter(STR(DYN_FTRACE_TEST_NAME),
146 sizeof(STR(DYN_FTRACE_TEST_NAME)), 1);
147
148 /* enable tracing */
149 tr->ctrl = 1;
150 trace->init(tr);
151 /* Sleep for a 1/10 of a second */
152 msleep(100);
153
154 /* we should have nothing in the buffer */
155 ret = trace_test_buffer(tr, &count);
156 if (ret)
157 goto out;
158
159 if (count) {
160 ret = -1;
161 printk(KERN_CONT ".. filter did not filter .. ");
162 goto out;
163 }
164
165 /* call our function again */
166 func();
167
168 /* sleep again */
169 msleep(100);
170
171 /* stop the tracing. */
172 tr->ctrl = 0;
173 trace->ctrl_update(tr);
174 ftrace_enabled = 0;
175
176 /* check the trace buffer */
177 ret = trace_test_buffer(tr, &count);
178 trace->reset(tr);
179
180 /* we should only have one item */
181 if (!ret && count != 1) {
182 printk(KERN_CONT ".. filter failed ..");
183 ret = -1;
184 goto out;
185 }
186 out:
187 ftrace_enabled = save_ftrace_enabled;
188 tracer_enabled = save_tracer_enabled;
189
190 /* Enable tracing on all functions again */
191 ftrace_set_filter(NULL, 0, 1);
192
193 return ret;
194}
195#else
196# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
197#endif /* CONFIG_DYNAMIC_FTRACE */
60a11774
SR
198/*
199 * Simple verification test of ftrace function tracer.
200 * Enable ftrace, sleep 1/10 second, and then read the trace
201 * buffer to see if all is in order.
202 */
203int
204trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
205{
206 unsigned long count;
207 int ret;
77a2b37d
SR
208 int save_ftrace_enabled = ftrace_enabled;
209 int save_tracer_enabled = tracer_enabled;
60a11774 210
77a2b37d
SR
211 /* make sure msleep has been recorded */
212 msleep(1);
213
214 /* force the recorded functions to be traced */
60a11774
SR
215 ret = ftrace_force_update();
216 if (ret) {
217 printk(KERN_CONT ".. ftraced failed .. ");
218 return ret;
219 }
220
221 /* start the tracing */
c7aafc54 222 ftrace_enabled = 1;
77a2b37d 223 tracer_enabled = 1;
c7aafc54 224
60a11774
SR
225 tr->ctrl = 1;
226 trace->init(tr);
227 /* Sleep for a 1/10 of a second */
228 msleep(100);
229 /* stop the tracing. */
230 tr->ctrl = 0;
231 trace->ctrl_update(tr);
c7aafc54
IM
232 ftrace_enabled = 0;
233
60a11774
SR
234 /* check the trace buffer */
235 ret = trace_test_buffer(tr, &count);
236 trace->reset(tr);
237
238 if (!ret && !count) {
239 printk(KERN_CONT ".. no entries found ..");
240 ret = -1;
77a2b37d 241 goto out;
60a11774
SR
242 }
243
77a2b37d
SR
244 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
245 DYN_FTRACE_TEST_NAME);
246
247 out:
248 ftrace_enabled = save_ftrace_enabled;
249 tracer_enabled = save_tracer_enabled;
250
60a11774
SR
251 return ret;
252}
253#endif /* CONFIG_FTRACE */
254
255#ifdef CONFIG_IRQSOFF_TRACER
256int
257trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
258{
259 unsigned long save_max = tracing_max_latency;
260 unsigned long count;
261 int ret;
262
263 /* start the tracing */
264 tr->ctrl = 1;
265 trace->init(tr);
266 /* reset the max latency */
267 tracing_max_latency = 0;
268 /* disable interrupts for a bit */
269 local_irq_disable();
270 udelay(100);
271 local_irq_enable();
272 /* stop the tracing. */
273 tr->ctrl = 0;
274 trace->ctrl_update(tr);
275 /* check both trace buffers */
276 ret = trace_test_buffer(tr, NULL);
277 if (!ret)
278 ret = trace_test_buffer(&max_tr, &count);
279 trace->reset(tr);
280
281 if (!ret && !count) {
282 printk(KERN_CONT ".. no entries found ..");
283 ret = -1;
284 }
285
286 tracing_max_latency = save_max;
287
288 return ret;
289}
290#endif /* CONFIG_IRQSOFF_TRACER */
291
292#ifdef CONFIG_PREEMPT_TRACER
293int
294trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
295{
296 unsigned long save_max = tracing_max_latency;
297 unsigned long count;
298 int ret;
299
300 /* start the tracing */
301 tr->ctrl = 1;
302 trace->init(tr);
303 /* reset the max latency */
304 tracing_max_latency = 0;
305 /* disable preemption for a bit */
306 preempt_disable();
307 udelay(100);
308 preempt_enable();
309 /* stop the tracing. */
310 tr->ctrl = 0;
311 trace->ctrl_update(tr);
312 /* check both trace buffers */
313 ret = trace_test_buffer(tr, NULL);
314 if (!ret)
315 ret = trace_test_buffer(&max_tr, &count);
316 trace->reset(tr);
317
318 if (!ret && !count) {
319 printk(KERN_CONT ".. no entries found ..");
320 ret = -1;
321 }
322
323 tracing_max_latency = save_max;
324
325 return ret;
326}
327#endif /* CONFIG_PREEMPT_TRACER */
328
329#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
330int
331trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
332{
333 unsigned long save_max = tracing_max_latency;
334 unsigned long count;
335 int ret;
336
337 /* start the tracing */
338 tr->ctrl = 1;
339 trace->init(tr);
340
341 /* reset the max latency */
342 tracing_max_latency = 0;
343
344 /* disable preemption and interrupts for a bit */
345 preempt_disable();
346 local_irq_disable();
347 udelay(100);
348 preempt_enable();
349 /* reverse the order of preempt vs irqs */
350 local_irq_enable();
351
352 /* stop the tracing. */
353 tr->ctrl = 0;
354 trace->ctrl_update(tr);
355 /* check both trace buffers */
356 ret = trace_test_buffer(tr, NULL);
357 if (ret)
358 goto out;
359
360 ret = trace_test_buffer(&max_tr, &count);
361 if (ret)
362 goto out;
363
364 if (!ret && !count) {
365 printk(KERN_CONT ".. no entries found ..");
366 ret = -1;
367 goto out;
368 }
369
370 /* do the test by disabling interrupts first this time */
371 tracing_max_latency = 0;
372 tr->ctrl = 1;
373 trace->ctrl_update(tr);
374 preempt_disable();
375 local_irq_disable();
376 udelay(100);
377 preempt_enable();
378 /* reverse the order of preempt vs irqs */
379 local_irq_enable();
380
381 /* stop the tracing. */
382 tr->ctrl = 0;
383 trace->ctrl_update(tr);
384 /* check both trace buffers */
385 ret = trace_test_buffer(tr, NULL);
386 if (ret)
387 goto out;
388
389 ret = trace_test_buffer(&max_tr, &count);
390
391 if (!ret && !count) {
392 printk(KERN_CONT ".. no entries found ..");
393 ret = -1;
394 goto out;
395 }
396
397 out:
398 trace->reset(tr);
399 tracing_max_latency = save_max;
400
401 return ret;
402}
403#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
404
405#ifdef CONFIG_SCHED_TRACER
406static int trace_wakeup_test_thread(void *data)
407{
408 struct completion *x = data;
409
410 /* Make this a RT thread, doesn't need to be too high */
411
412 rt_mutex_setprio(current, MAX_RT_PRIO - 5);
413
414 /* Make it know we have a new prio */
415 complete(x);
416
417 /* now go to sleep and let the test wake us up */
418 set_current_state(TASK_INTERRUPTIBLE);
419 schedule();
420
421 /* we are awake, now wait to disappear */
422 while (!kthread_should_stop()) {
423 /*
424 * This is an RT task, do short sleeps to let
425 * others run.
426 */
427 msleep(100);
428 }
429
430 return 0;
431}
432
433int
434trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
435{
436 unsigned long save_max = tracing_max_latency;
437 struct task_struct *p;
438 struct completion isrt;
439 unsigned long count;
440 int ret;
441
442 init_completion(&isrt);
443
444 /* create a high prio thread */
445 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
c7aafc54 446 if (IS_ERR(p)) {
60a11774
SR
447 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
448 return -1;
449 }
450
451 /* make sure the thread is running at an RT prio */
452 wait_for_completion(&isrt);
453
454 /* start the tracing */
455 tr->ctrl = 1;
456 trace->init(tr);
457 /* reset the max latency */
458 tracing_max_latency = 0;
459
460 /* sleep to let the RT thread sleep too */
461 msleep(100);
462
463 /*
464 * Yes this is slightly racy. It is possible that for some
465 * strange reason that the RT thread we created, did not
466 * call schedule for 100ms after doing the completion,
467 * and we do a wakeup on a task that already is awake.
468 * But that is extremely unlikely, and the worst thing that
469 * happens in such a case, is that we disable tracing.
470 * Honestly, if this race does happen something is horrible
471 * wrong with the system.
472 */
473
474 wake_up_process(p);
475
476 /* stop the tracing. */
477 tr->ctrl = 0;
478 trace->ctrl_update(tr);
479 /* check both trace buffers */
480 ret = trace_test_buffer(tr, NULL);
481 if (!ret)
482 ret = trace_test_buffer(&max_tr, &count);
483
484
485 trace->reset(tr);
486
487 tracing_max_latency = save_max;
488
489 /* kill the thread */
490 kthread_stop(p);
491
492 if (!ret && !count) {
493 printk(KERN_CONT ".. no entries found ..");
494 ret = -1;
495 }
496
497 return ret;
498}
499#endif /* CONFIG_SCHED_TRACER */
500
501#ifdef CONFIG_CONTEXT_SWITCH_TRACER
502int
503trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
504{
505 unsigned long count;
506 int ret;
507
508 /* start the tracing */
509 tr->ctrl = 1;
510 trace->init(tr);
511 /* Sleep for a 1/10 of a second */
512 msleep(100);
513 /* stop the tracing. */
514 tr->ctrl = 0;
515 trace->ctrl_update(tr);
516 /* check the trace buffer */
517 ret = trace_test_buffer(tr, &count);
518 trace->reset(tr);
519
520 if (!ret && !count) {
521 printk(KERN_CONT ".. no entries found ..");
522 ret = -1;
523 }
524
525 return ret;
526}
527#endif /* CONFIG_CONTEXT_SWITCH_TRACER */