Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
60a11774 SR |
2 | /* Include in trace.c */ |
3 | ||
ae7e81c0 | 4 | #include <uapi/linux/sched/types.h> |
9cc26a26 | 5 | #include <linux/stringify.h> |
60a11774 | 6 | #include <linux/kthread.h> |
c7aafc54 | 7 | #include <linux/delay.h> |
5a0e3ad6 | 8 | #include <linux/slab.h> |
60a11774 | 9 | |
e309b41d | 10 | static inline int trace_valid_entry(struct trace_entry *entry) |
60a11774 SR |
11 | { |
12 | switch (entry->type) { | |
13 | case TRACE_FN: | |
14 | case TRACE_CTX: | |
57422797 | 15 | case TRACE_WAKE: |
06fa75ab | 16 | case TRACE_STACK: |
dd0e545f | 17 | case TRACE_PRINT: |
80e5ea45 | 18 | case TRACE_BRANCH: |
7447dce9 | 19 | case TRACE_GRAPH_ENT: |
21e92806 | 20 | case TRACE_GRAPH_RETADDR_ENT: |
7447dce9 | 21 | case TRACE_GRAPH_RET: |
60a11774 SR |
22 | return 1; |
23 | } | |
24 | return 0; | |
25 | } | |
26 | ||
1c5eb448 | 27 | static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu) |
60a11774 | 28 | { |
3928a8a2 SR |
29 | struct ring_buffer_event *event; |
30 | struct trace_entry *entry; | |
4b3e3d22 | 31 | unsigned int loops = 0; |
60a11774 | 32 | |
12883efb | 33 | while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) { |
3928a8a2 | 34 | entry = ring_buffer_event_data(event); |
60a11774 | 35 | |
4b3e3d22 SR |
36 | /* |
37 | * The ring buffer is a size of trace_buf_size, if | |
38 | * we loop more than the size, there's something wrong | |
39 | * with the ring buffer. | |
40 | */ | |
41 | if (loops++ > trace_buf_size) { | |
42 | printk(KERN_CONT ".. bad ring buffer "); | |
43 | goto failed; | |
44 | } | |
3928a8a2 | 45 | if (!trace_valid_entry(entry)) { |
c7aafc54 | 46 | printk(KERN_CONT ".. invalid entry %d ", |
3928a8a2 | 47 | entry->type); |
60a11774 SR |
48 | goto failed; |
49 | } | |
60a11774 | 50 | } |
60a11774 SR |
51 | return 0; |
52 | ||
53 | failed: | |
08bafa0e SR |
54 | /* disable tracing */ |
55 | tracing_disabled = 1; | |
60a11774 SR |
56 | printk(KERN_CONT ".. corrupted trace buffer .. "); |
57 | return -1; | |
58 | } | |
59 | ||
60 | /* | |
61 | * Test the trace buffer to see if all the elements | |
62 | * are still sane. | |
63 | */ | |
1c5eb448 | 64 | static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count) |
60a11774 | 65 | { |
30afdcb1 SR |
66 | unsigned long flags, cnt = 0; |
67 | int cpu, ret = 0; | |
60a11774 | 68 | |
30afdcb1 | 69 | /* Don't allow flipping of max traces now */ |
d51ad7ac | 70 | local_irq_save(flags); |
0b9b12c1 | 71 | arch_spin_lock(&buf->tr->max_lock); |
60a11774 | 72 | |
12883efb | 73 | cnt = ring_buffer_entries(buf->buffer); |
60a11774 | 74 | |
0c5119c1 SR |
75 | /* |
76 | * The trace_test_buffer_cpu runs a while loop to consume all data. | |
77 | * If the calling tracer is broken, and is constantly filling | |
78 | * the buffer, this will run forever, and hard lock the box. | |
79 | * We disable the ring buffer while we do this test to prevent | |
80 | * a hard lock up. | |
81 | */ | |
82 | tracing_off(); | |
3928a8a2 | 83 | for_each_possible_cpu(cpu) { |
12883efb | 84 | ret = trace_test_buffer_cpu(buf, cpu); |
60a11774 SR |
85 | if (ret) |
86 | break; | |
87 | } | |
0c5119c1 | 88 | tracing_on(); |
0b9b12c1 | 89 | arch_spin_unlock(&buf->tr->max_lock); |
d51ad7ac | 90 | local_irq_restore(flags); |
60a11774 SR |
91 | |
92 | if (count) | |
93 | *count = cnt; | |
94 | ||
95 | return ret; | |
96 | } | |
97 | ||
1c80025a FW |
98 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) |
99 | { | |
100 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | |
101 | trace->name, init_ret); | |
102 | } | |
606576ce | 103 | #ifdef CONFIG_FUNCTION_TRACER |
77a2b37d SR |
104 | |
105 | #ifdef CONFIG_DYNAMIC_FTRACE | |
106 | ||
95950c2e SR |
107 | static int trace_selftest_test_probe1_cnt; |
108 | static void trace_selftest_test_probe1_func(unsigned long ip, | |
2f5f6ad9 | 109 | unsigned long pip, |
a1e2e31d | 110 | struct ftrace_ops *op, |
d19ad077 | 111 | struct ftrace_regs *fregs) |
95950c2e SR |
112 | { |
113 | trace_selftest_test_probe1_cnt++; | |
114 | } | |
115 | ||
116 | static int trace_selftest_test_probe2_cnt; | |
117 | static void trace_selftest_test_probe2_func(unsigned long ip, | |
2f5f6ad9 | 118 | unsigned long pip, |
a1e2e31d | 119 | struct ftrace_ops *op, |
d19ad077 | 120 | struct ftrace_regs *fregs) |
95950c2e SR |
121 | { |
122 | trace_selftest_test_probe2_cnt++; | |
123 | } | |
124 | ||
125 | static int trace_selftest_test_probe3_cnt; | |
126 | static void trace_selftest_test_probe3_func(unsigned long ip, | |
2f5f6ad9 | 127 | unsigned long pip, |
a1e2e31d | 128 | struct ftrace_ops *op, |
d19ad077 | 129 | struct ftrace_regs *fregs) |
95950c2e SR |
130 | { |
131 | trace_selftest_test_probe3_cnt++; | |
132 | } | |
133 | ||
134 | static int trace_selftest_test_global_cnt; | |
135 | static void trace_selftest_test_global_func(unsigned long ip, | |
2f5f6ad9 | 136 | unsigned long pip, |
a1e2e31d | 137 | struct ftrace_ops *op, |
d19ad077 | 138 | struct ftrace_regs *fregs) |
95950c2e SR |
139 | { |
140 | trace_selftest_test_global_cnt++; | |
141 | } | |
142 | ||
143 | static int trace_selftest_test_dyn_cnt; | |
144 | static void trace_selftest_test_dyn_func(unsigned long ip, | |
2f5f6ad9 | 145 | unsigned long pip, |
a1e2e31d | 146 | struct ftrace_ops *op, |
d19ad077 | 147 | struct ftrace_regs *fregs) |
95950c2e SR |
148 | { |
149 | trace_selftest_test_dyn_cnt++; | |
150 | } | |
151 | ||
152 | static struct ftrace_ops test_probe1 = { | |
153 | .func = trace_selftest_test_probe1_func, | |
154 | }; | |
155 | ||
156 | static struct ftrace_ops test_probe2 = { | |
157 | .func = trace_selftest_test_probe2_func, | |
158 | }; | |
159 | ||
160 | static struct ftrace_ops test_probe3 = { | |
161 | .func = trace_selftest_test_probe3_func, | |
162 | }; | |
163 | ||
95950c2e SR |
164 | static void print_counts(void) |
165 | { | |
166 | printk("(%d %d %d %d %d) ", | |
167 | trace_selftest_test_probe1_cnt, | |
168 | trace_selftest_test_probe2_cnt, | |
169 | trace_selftest_test_probe3_cnt, | |
170 | trace_selftest_test_global_cnt, | |
171 | trace_selftest_test_dyn_cnt); | |
172 | } | |
173 | ||
174 | static void reset_counts(void) | |
175 | { | |
176 | trace_selftest_test_probe1_cnt = 0; | |
177 | trace_selftest_test_probe2_cnt = 0; | |
178 | trace_selftest_test_probe3_cnt = 0; | |
179 | trace_selftest_test_global_cnt = 0; | |
180 | trace_selftest_test_dyn_cnt = 0; | |
181 | } | |
182 | ||
4104d326 | 183 | static int trace_selftest_ops(struct trace_array *tr, int cnt) |
95950c2e SR |
184 | { |
185 | int save_ftrace_enabled = ftrace_enabled; | |
186 | struct ftrace_ops *dyn_ops; | |
187 | char *func1_name; | |
188 | char *func2_name; | |
189 | int len1; | |
190 | int len2; | |
191 | int ret = -1; | |
192 | ||
193 | printk(KERN_CONT "PASSED\n"); | |
194 | pr_info("Testing dynamic ftrace ops #%d: ", cnt); | |
195 | ||
196 | ftrace_enabled = 1; | |
197 | reset_counts(); | |
198 | ||
199 | /* Handle PPC64 '.' name */ | |
200 | func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | |
201 | func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); | |
202 | len1 = strlen(func1_name); | |
203 | len2 = strlen(func2_name); | |
204 | ||
205 | /* | |
206 | * Probe 1 will trace function 1. | |
207 | * Probe 2 will trace function 2. | |
208 | * Probe 3 will trace functions 1 and 2. | |
209 | */ | |
210 | ftrace_set_filter(&test_probe1, func1_name, len1, 1); | |
211 | ftrace_set_filter(&test_probe2, func2_name, len2, 1); | |
212 | ftrace_set_filter(&test_probe3, func1_name, len1, 1); | |
213 | ftrace_set_filter(&test_probe3, func2_name, len2, 0); | |
214 | ||
215 | register_ftrace_function(&test_probe1); | |
216 | register_ftrace_function(&test_probe2); | |
217 | register_ftrace_function(&test_probe3); | |
4104d326 SRRH |
218 | /* First time we are running with main function */ |
219 | if (cnt > 1) { | |
220 | ftrace_init_array_ops(tr, trace_selftest_test_global_func); | |
221 | register_ftrace_function(tr->ops); | |
222 | } | |
95950c2e SR |
223 | |
224 | DYN_FTRACE_TEST_NAME(); | |
225 | ||
226 | print_counts(); | |
227 | ||
228 | if (trace_selftest_test_probe1_cnt != 1) | |
229 | goto out; | |
230 | if (trace_selftest_test_probe2_cnt != 0) | |
231 | goto out; | |
232 | if (trace_selftest_test_probe3_cnt != 1) | |
233 | goto out; | |
4104d326 SRRH |
234 | if (cnt > 1) { |
235 | if (trace_selftest_test_global_cnt == 0) | |
236 | goto out; | |
237 | } | |
95950c2e SR |
238 | |
239 | DYN_FTRACE_TEST_NAME2(); | |
240 | ||
241 | print_counts(); | |
242 | ||
243 | if (trace_selftest_test_probe1_cnt != 1) | |
244 | goto out; | |
245 | if (trace_selftest_test_probe2_cnt != 1) | |
246 | goto out; | |
247 | if (trace_selftest_test_probe3_cnt != 2) | |
248 | goto out; | |
249 | ||
250 | /* Add a dynamic probe */ | |
251 | dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); | |
252 | if (!dyn_ops) { | |
253 | printk("MEMORY ERROR "); | |
254 | goto out; | |
255 | } | |
256 | ||
257 | dyn_ops->func = trace_selftest_test_dyn_func; | |
258 | ||
259 | register_ftrace_function(dyn_ops); | |
260 | ||
261 | trace_selftest_test_global_cnt = 0; | |
262 | ||
263 | DYN_FTRACE_TEST_NAME(); | |
264 | ||
265 | print_counts(); | |
266 | ||
267 | if (trace_selftest_test_probe1_cnt != 2) | |
268 | goto out_free; | |
269 | if (trace_selftest_test_probe2_cnt != 1) | |
270 | goto out_free; | |
271 | if (trace_selftest_test_probe3_cnt != 3) | |
272 | goto out_free; | |
4104d326 SRRH |
273 | if (cnt > 1) { |
274 | if (trace_selftest_test_global_cnt == 0) | |
46320a6a | 275 | goto out_free; |
4104d326 | 276 | } |
95950c2e SR |
277 | if (trace_selftest_test_dyn_cnt == 0) |
278 | goto out_free; | |
279 | ||
280 | DYN_FTRACE_TEST_NAME2(); | |
281 | ||
282 | print_counts(); | |
283 | ||
284 | if (trace_selftest_test_probe1_cnt != 2) | |
285 | goto out_free; | |
286 | if (trace_selftest_test_probe2_cnt != 2) | |
287 | goto out_free; | |
288 | if (trace_selftest_test_probe3_cnt != 4) | |
289 | goto out_free; | |
290 | ||
43c9dd8d CP |
291 | /* Remove trace function from probe 3 */ |
292 | func1_name = "!" __stringify(DYN_FTRACE_TEST_NAME); | |
293 | len1 = strlen(func1_name); | |
294 | ||
295 | ftrace_set_filter(&test_probe3, func1_name, len1, 0); | |
296 | ||
297 | DYN_FTRACE_TEST_NAME(); | |
298 | ||
299 | print_counts(); | |
300 | ||
301 | if (trace_selftest_test_probe1_cnt != 3) | |
302 | goto out_free; | |
303 | if (trace_selftest_test_probe2_cnt != 2) | |
304 | goto out_free; | |
305 | if (trace_selftest_test_probe3_cnt != 4) | |
306 | goto out_free; | |
307 | if (cnt > 1) { | |
308 | if (trace_selftest_test_global_cnt == 0) | |
309 | goto out_free; | |
310 | } | |
311 | if (trace_selftest_test_dyn_cnt == 0) | |
312 | goto out_free; | |
313 | ||
314 | DYN_FTRACE_TEST_NAME2(); | |
315 | ||
316 | print_counts(); | |
317 | ||
318 | if (trace_selftest_test_probe1_cnt != 3) | |
319 | goto out_free; | |
320 | if (trace_selftest_test_probe2_cnt != 3) | |
321 | goto out_free; | |
322 | if (trace_selftest_test_probe3_cnt != 5) | |
323 | goto out_free; | |
324 | ||
95950c2e SR |
325 | ret = 0; |
326 | out_free: | |
327 | unregister_ftrace_function(dyn_ops); | |
328 | kfree(dyn_ops); | |
329 | ||
330 | out: | |
331 | /* Purposely unregister in the same order */ | |
332 | unregister_ftrace_function(&test_probe1); | |
333 | unregister_ftrace_function(&test_probe2); | |
334 | unregister_ftrace_function(&test_probe3); | |
4104d326 SRRH |
335 | if (cnt > 1) |
336 | unregister_ftrace_function(tr->ops); | |
337 | ftrace_reset_array_ops(tr); | |
95950c2e SR |
338 | |
339 | /* Make sure everything is off */ | |
340 | reset_counts(); | |
341 | DYN_FTRACE_TEST_NAME(); | |
342 | DYN_FTRACE_TEST_NAME(); | |
343 | ||
344 | if (trace_selftest_test_probe1_cnt || | |
345 | trace_selftest_test_probe2_cnt || | |
346 | trace_selftest_test_probe3_cnt || | |
347 | trace_selftest_test_global_cnt || | |
348 | trace_selftest_test_dyn_cnt) | |
349 | ret = -1; | |
350 | ||
351 | ftrace_enabled = save_ftrace_enabled; | |
352 | ||
353 | return ret; | |
354 | } | |
355 | ||
77a2b37d | 356 | /* Test dynamic code modification and ftrace filters */ |
ad1438a0 FF |
357 | static int trace_selftest_startup_dynamic_tracing(struct tracer *trace, |
358 | struct trace_array *tr, | |
359 | int (*func)(void)) | |
77a2b37d | 360 | { |
77a2b37d | 361 | int save_ftrace_enabled = ftrace_enabled; |
dd0e545f | 362 | unsigned long count; |
4e491d14 | 363 | char *func_name; |
dd0e545f | 364 | int ret; |
77a2b37d SR |
365 | |
366 | /* The ftrace test PASSED */ | |
367 | printk(KERN_CONT "PASSED\n"); | |
368 | pr_info("Testing dynamic ftrace: "); | |
369 | ||
370 | /* enable tracing, and record the filter function */ | |
371 | ftrace_enabled = 1; | |
77a2b37d SR |
372 | |
373 | /* passed in by parameter to fool gcc from optimizing */ | |
374 | func(); | |
375 | ||
4e491d14 | 376 | /* |
73d8b8bc | 377 | * Some archs *cough*PowerPC*cough* add characters to the |
4e491d14 | 378 | * start of the function names. We simply put a '*' to |
73d8b8bc | 379 | * accommodate them. |
4e491d14 | 380 | */ |
9cc26a26 | 381 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
4e491d14 | 382 | |
77a2b37d | 383 | /* filter only on our function */ |
936e074b | 384 | ftrace_set_global_filter(func_name, strlen(func_name), 1); |
77a2b37d SR |
385 | |
386 | /* enable tracing */ | |
b6f11df2 | 387 | ret = tracer_init(trace, tr); |
1c80025a FW |
388 | if (ret) { |
389 | warn_failed_init_tracer(trace, ret); | |
390 | goto out; | |
391 | } | |
dd0e545f | 392 | |
77a2b37d SR |
393 | /* Sleep for a 1/10 of a second */ |
394 | msleep(100); | |
395 | ||
396 | /* we should have nothing in the buffer */ | |
1c5eb448 | 397 | ret = trace_test_buffer(&tr->array_buffer, &count); |
77a2b37d SR |
398 | if (ret) |
399 | goto out; | |
400 | ||
401 | if (count) { | |
402 | ret = -1; | |
403 | printk(KERN_CONT ".. filter did not filter .. "); | |
404 | goto out; | |
405 | } | |
406 | ||
407 | /* call our function again */ | |
408 | func(); | |
409 | ||
410 | /* sleep again */ | |
411 | msleep(100); | |
412 | ||
413 | /* stop the tracing. */ | |
bbf5b1a0 | 414 | tracing_stop(); |
77a2b37d SR |
415 | ftrace_enabled = 0; |
416 | ||
417 | /* check the trace buffer */ | |
1c5eb448 | 418 | ret = trace_test_buffer(&tr->array_buffer, &count); |
3ddee63a SRRH |
419 | |
420 | ftrace_enabled = 1; | |
bbf5b1a0 | 421 | tracing_start(); |
77a2b37d SR |
422 | |
423 | /* we should only have one item */ | |
424 | if (!ret && count != 1) { | |
95950c2e | 425 | trace->reset(tr); |
06fa75ab | 426 | printk(KERN_CONT ".. filter failed count=%ld ..", count); |
77a2b37d SR |
427 | ret = -1; |
428 | goto out; | |
429 | } | |
bbf5b1a0 | 430 | |
95950c2e | 431 | /* Test the ops with global tracing running */ |
4104d326 | 432 | ret = trace_selftest_ops(tr, 1); |
95950c2e SR |
433 | trace->reset(tr); |
434 | ||
77a2b37d SR |
435 | out: |
436 | ftrace_enabled = save_ftrace_enabled; | |
77a2b37d SR |
437 | |
438 | /* Enable tracing on all functions again */ | |
936e074b | 439 | ftrace_set_global_filter(NULL, 0, 1); |
77a2b37d | 440 | |
95950c2e SR |
441 | /* Test the ops with global tracing off */ |
442 | if (!ret) | |
4104d326 | 443 | ret = trace_selftest_ops(tr, 2); |
95950c2e | 444 | |
77a2b37d SR |
445 | return ret; |
446 | } | |
ea701f11 SR |
447 | |
448 | static int trace_selftest_recursion_cnt; | |
449 | static void trace_selftest_test_recursion_func(unsigned long ip, | |
450 | unsigned long pip, | |
451 | struct ftrace_ops *op, | |
d19ad077 | 452 | struct ftrace_regs *fregs) |
ea701f11 SR |
453 | { |
454 | /* | |
455 | * This function is registered without the recursion safe flag. | |
456 | * The ftrace infrastructure should provide the recursion | |
457 | * protection. If not, this will crash the kernel! | |
458 | */ | |
9640388b SR |
459 | if (trace_selftest_recursion_cnt++ > 10) |
460 | return; | |
ea701f11 SR |
461 | DYN_FTRACE_TEST_NAME(); |
462 | } | |
463 | ||
464 | static void trace_selftest_test_recursion_safe_func(unsigned long ip, | |
465 | unsigned long pip, | |
466 | struct ftrace_ops *op, | |
d19ad077 | 467 | struct ftrace_regs *fregs) |
ea701f11 SR |
468 | { |
469 | /* | |
470 | * We said we would provide our own recursion. By calling | |
471 | * this function again, we should recurse back into this function | |
472 | * and count again. But this only happens if the arch supports | |
473 | * all of ftrace features and nothing else is using the function | |
474 | * tracing utility. | |
475 | */ | |
476 | if (trace_selftest_recursion_cnt++) | |
477 | return; | |
478 | DYN_FTRACE_TEST_NAME(); | |
479 | } | |
480 | ||
481 | static struct ftrace_ops test_rec_probe = { | |
482 | .func = trace_selftest_test_recursion_func, | |
a25d036d | 483 | .flags = FTRACE_OPS_FL_RECURSION, |
ea701f11 SR |
484 | }; |
485 | ||
486 | static struct ftrace_ops test_recsafe_probe = { | |
487 | .func = trace_selftest_test_recursion_safe_func, | |
ea701f11 SR |
488 | }; |
489 | ||
490 | static int | |
491 | trace_selftest_function_recursion(void) | |
492 | { | |
493 | int save_ftrace_enabled = ftrace_enabled; | |
ea701f11 SR |
494 | char *func_name; |
495 | int len; | |
496 | int ret; | |
ea701f11 SR |
497 | |
498 | /* The previous test PASSED */ | |
499 | pr_cont("PASSED\n"); | |
500 | pr_info("Testing ftrace recursion: "); | |
501 | ||
502 | ||
503 | /* enable tracing, and record the filter function */ | |
504 | ftrace_enabled = 1; | |
ea701f11 SR |
505 | |
506 | /* Handle PPC64 '.' name */ | |
507 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | |
508 | len = strlen(func_name); | |
509 | ||
510 | ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1); | |
511 | if (ret) { | |
512 | pr_cont("*Could not set filter* "); | |
513 | goto out; | |
514 | } | |
515 | ||
516 | ret = register_ftrace_function(&test_rec_probe); | |
517 | if (ret) { | |
518 | pr_cont("*could not register callback* "); | |
519 | goto out; | |
520 | } | |
521 | ||
522 | DYN_FTRACE_TEST_NAME(); | |
523 | ||
524 | unregister_ftrace_function(&test_rec_probe); | |
525 | ||
526 | ret = -1; | |
726b3d3f SRV |
527 | /* |
528 | * Recursion allows for transitions between context, | |
529 | * and may call the callback twice. | |
530 | */ | |
531 | if (trace_selftest_recursion_cnt != 1 && | |
532 | trace_selftest_recursion_cnt != 2) { | |
533 | pr_cont("*callback not called once (or twice) (%d)* ", | |
ea701f11 SR |
534 | trace_selftest_recursion_cnt); |
535 | goto out; | |
536 | } | |
537 | ||
538 | trace_selftest_recursion_cnt = 1; | |
539 | ||
540 | pr_cont("PASSED\n"); | |
541 | pr_info("Testing ftrace recursion safe: "); | |
542 | ||
543 | ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1); | |
544 | if (ret) { | |
545 | pr_cont("*Could not set filter* "); | |
546 | goto out; | |
547 | } | |
548 | ||
549 | ret = register_ftrace_function(&test_recsafe_probe); | |
550 | if (ret) { | |
551 | pr_cont("*could not register callback* "); | |
552 | goto out; | |
553 | } | |
554 | ||
555 | DYN_FTRACE_TEST_NAME(); | |
556 | ||
557 | unregister_ftrace_function(&test_recsafe_probe); | |
558 | ||
ea701f11 | 559 | ret = -1; |
05cbbf64 SR |
560 | if (trace_selftest_recursion_cnt != 2) { |
561 | pr_cont("*callback not called expected 2 times (%d)* ", | |
562 | trace_selftest_recursion_cnt); | |
ea701f11 SR |
563 | goto out; |
564 | } | |
565 | ||
566 | ret = 0; | |
567 | out: | |
568 | ftrace_enabled = save_ftrace_enabled; | |
ea701f11 SR |
569 | |
570 | return ret; | |
571 | } | |
77a2b37d SR |
572 | #else |
573 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | |
ea701f11 | 574 | # define trace_selftest_function_recursion() ({ 0; }) |
77a2b37d | 575 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
e9a22d1f | 576 | |
ad97772a SR |
577 | static enum { |
578 | TRACE_SELFTEST_REGS_START, | |
579 | TRACE_SELFTEST_REGS_FOUND, | |
580 | TRACE_SELFTEST_REGS_NOT_FOUND, | |
581 | } trace_selftest_regs_stat; | |
582 | ||
583 | static void trace_selftest_test_regs_func(unsigned long ip, | |
584 | unsigned long pip, | |
585 | struct ftrace_ops *op, | |
d19ad077 | 586 | struct ftrace_regs *fregs) |
ad97772a | 587 | { |
d19ad077 SRV |
588 | struct pt_regs *regs = ftrace_get_regs(fregs); |
589 | ||
590 | if (regs) | |
ad97772a SR |
591 | trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND; |
592 | else | |
593 | trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND; | |
594 | } | |
595 | ||
596 | static struct ftrace_ops test_regs_probe = { | |
597 | .func = trace_selftest_test_regs_func, | |
a25d036d | 598 | .flags = FTRACE_OPS_FL_SAVE_REGS, |
ad97772a SR |
599 | }; |
600 | ||
601 | static int | |
602 | trace_selftest_function_regs(void) | |
603 | { | |
604 | int save_ftrace_enabled = ftrace_enabled; | |
ad97772a SR |
605 | char *func_name; |
606 | int len; | |
607 | int ret; | |
608 | int supported = 0; | |
609 | ||
06aeaaea | 610 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
ad97772a SR |
611 | supported = 1; |
612 | #endif | |
613 | ||
614 | /* The previous test PASSED */ | |
615 | pr_cont("PASSED\n"); | |
616 | pr_info("Testing ftrace regs%s: ", | |
617 | !supported ? "(no arch support)" : ""); | |
618 | ||
619 | /* enable tracing, and record the filter function */ | |
620 | ftrace_enabled = 1; | |
ad97772a SR |
621 | |
622 | /* Handle PPC64 '.' name */ | |
623 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | |
624 | len = strlen(func_name); | |
625 | ||
626 | ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1); | |
627 | /* | |
628 | * If DYNAMIC_FTRACE is not set, then we just trace all functions. | |
629 | * This test really doesn't care. | |
630 | */ | |
631 | if (ret && ret != -ENODEV) { | |
632 | pr_cont("*Could not set filter* "); | |
633 | goto out; | |
634 | } | |
635 | ||
636 | ret = register_ftrace_function(&test_regs_probe); | |
637 | /* | |
638 | * Now if the arch does not support passing regs, then this should | |
639 | * have failed. | |
640 | */ | |
641 | if (!supported) { | |
642 | if (!ret) { | |
643 | pr_cont("*registered save-regs without arch support* "); | |
644 | goto out; | |
645 | } | |
646 | test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED; | |
647 | ret = register_ftrace_function(&test_regs_probe); | |
648 | } | |
649 | if (ret) { | |
650 | pr_cont("*could not register callback* "); | |
651 | goto out; | |
652 | } | |
653 | ||
654 | ||
655 | DYN_FTRACE_TEST_NAME(); | |
656 | ||
657 | unregister_ftrace_function(&test_regs_probe); | |
658 | ||
659 | ret = -1; | |
660 | ||
661 | switch (trace_selftest_regs_stat) { | |
662 | case TRACE_SELFTEST_REGS_START: | |
663 | pr_cont("*callback never called* "); | |
664 | goto out; | |
665 | ||
666 | case TRACE_SELFTEST_REGS_FOUND: | |
667 | if (supported) | |
668 | break; | |
669 | pr_cont("*callback received regs without arch support* "); | |
670 | goto out; | |
671 | ||
672 | case TRACE_SELFTEST_REGS_NOT_FOUND: | |
673 | if (!supported) | |
674 | break; | |
675 | pr_cont("*callback received NULL regs* "); | |
676 | goto out; | |
677 | } | |
678 | ||
679 | ret = 0; | |
680 | out: | |
681 | ftrace_enabled = save_ftrace_enabled; | |
ad97772a SR |
682 | |
683 | return ret; | |
684 | } | |
685 | ||
60a11774 SR |
686 | /* |
687 | * Simple verification test of ftrace function tracer. | |
688 | * Enable ftrace, sleep 1/10 second, and then read the trace | |
689 | * buffer to see if all is in order. | |
690 | */ | |
f1ed7c74 | 691 | __init int |
60a11774 SR |
692 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) |
693 | { | |
77a2b37d | 694 | int save_ftrace_enabled = ftrace_enabled; |
dd0e545f SR |
695 | unsigned long count; |
696 | int ret; | |
60a11774 | 697 | |
f1ed7c74 SRRH |
698 | #ifdef CONFIG_DYNAMIC_FTRACE |
699 | if (ftrace_filter_param) { | |
700 | printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); | |
701 | return 0; | |
702 | } | |
703 | #endif | |
704 | ||
77a2b37d SR |
705 | /* make sure msleep has been recorded */ |
706 | msleep(1); | |
707 | ||
60a11774 | 708 | /* start the tracing */ |
c7aafc54 IM |
709 | ftrace_enabled = 1; |
710 | ||
b6f11df2 | 711 | ret = tracer_init(trace, tr); |
1c80025a FW |
712 | if (ret) { |
713 | warn_failed_init_tracer(trace, ret); | |
714 | goto out; | |
715 | } | |
716 | ||
60a11774 SR |
717 | /* Sleep for a 1/10 of a second */ |
718 | msleep(100); | |
719 | /* stop the tracing. */ | |
bbf5b1a0 | 720 | tracing_stop(); |
c7aafc54 IM |
721 | ftrace_enabled = 0; |
722 | ||
60a11774 | 723 | /* check the trace buffer */ |
1c5eb448 | 724 | ret = trace_test_buffer(&tr->array_buffer, &count); |
3ddee63a SRRH |
725 | |
726 | ftrace_enabled = 1; | |
60a11774 | 727 | trace->reset(tr); |
bbf5b1a0 | 728 | tracing_start(); |
60a11774 SR |
729 | |
730 | if (!ret && !count) { | |
731 | printk(KERN_CONT ".. no entries found .."); | |
732 | ret = -1; | |
77a2b37d | 733 | goto out; |
60a11774 SR |
734 | } |
735 | ||
77a2b37d SR |
736 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, |
737 | DYN_FTRACE_TEST_NAME); | |
ea701f11 SR |
738 | if (ret) |
739 | goto out; | |
77a2b37d | 740 | |
ea701f11 | 741 | ret = trace_selftest_function_recursion(); |
ad97772a SR |
742 | if (ret) |
743 | goto out; | |
744 | ||
745 | ret = trace_selftest_function_regs(); | |
77a2b37d SR |
746 | out: |
747 | ftrace_enabled = save_ftrace_enabled; | |
77a2b37d | 748 | |
4eebcc81 SR |
749 | /* kill ftrace totally if we failed */ |
750 | if (ret) | |
751 | ftrace_kill(); | |
752 | ||
60a11774 SR |
753 | return ret; |
754 | } | |
606576ce | 755 | #endif /* CONFIG_FUNCTION_TRACER */ |
60a11774 | 756 | |
7447dce9 FW |
757 | |
758 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
cf586b61 | 759 | |
47c3c70a SRV |
760 | #ifdef CONFIG_DYNAMIC_FTRACE |
761 | ||
2f6b884d | 762 | #define CHAR_NUMBER 123 |
47c3c70a SRV |
763 | #define SHORT_NUMBER 12345 |
764 | #define WORD_NUMBER 1234567890 | |
765 | #define LONG_NUMBER 1234567890123456789LL | |
dd120af2 MHG |
766 | #define ERRSTR_BUFLEN 128 |
767 | ||
768 | struct fgraph_fixture { | |
769 | struct fgraph_ops gops; | |
770 | int store_size; | |
771 | const char *store_type_name; | |
772 | char error_str_buf[ERRSTR_BUFLEN]; | |
773 | char *error_str; | |
774 | }; | |
47c3c70a SRV |
775 | |
776 | static __init int store_entry(struct ftrace_graph_ent *trace, | |
41705c42 MHG |
777 | struct fgraph_ops *gops, |
778 | struct ftrace_regs *fregs) | |
47c3c70a | 779 | { |
dd120af2 MHG |
780 | struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops); |
781 | const char *type = fixture->store_type_name; | |
782 | int size = fixture->store_size; | |
47c3c70a SRV |
783 | void *p; |
784 | ||
785 | p = fgraph_reserve_data(gops->idx, size); | |
786 | if (!p) { | |
dd120af2 | 787 | snprintf(fixture->error_str_buf, ERRSTR_BUFLEN, |
47c3c70a | 788 | "Failed to reserve %s\n", type); |
47c3c70a SRV |
789 | return 0; |
790 | } | |
791 | ||
dd120af2 | 792 | switch (size) { |
47c3c70a | 793 | case 1: |
2f6b884d | 794 | *(char *)p = CHAR_NUMBER; |
47c3c70a SRV |
795 | break; |
796 | case 2: | |
797 | *(short *)p = SHORT_NUMBER; | |
798 | break; | |
799 | case 4: | |
800 | *(int *)p = WORD_NUMBER; | |
801 | break; | |
802 | case 8: | |
803 | *(long long *)p = LONG_NUMBER; | |
804 | break; | |
805 | } | |
806 | ||
807 | return 1; | |
808 | } | |
809 | ||
810 | static __init void store_return(struct ftrace_graph_ret *trace, | |
2ca8c112 MHG |
811 | struct fgraph_ops *gops, |
812 | struct ftrace_regs *fregs) | |
47c3c70a | 813 | { |
dd120af2 MHG |
814 | struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops); |
815 | const char *type = fixture->store_type_name; | |
47c3c70a SRV |
816 | long long expect = 0; |
817 | long long found = -1; | |
818 | int size; | |
819 | char *p; | |
820 | ||
821 | p = fgraph_retrieve_data(gops->idx, &size); | |
822 | if (!p) { | |
dd120af2 | 823 | snprintf(fixture->error_str_buf, ERRSTR_BUFLEN, |
47c3c70a | 824 | "Failed to retrieve %s\n", type); |
47c3c70a SRV |
825 | return; |
826 | } | |
dd120af2 MHG |
827 | if (fixture->store_size > size) { |
828 | snprintf(fixture->error_str_buf, ERRSTR_BUFLEN, | |
47c3c70a | 829 | "Retrieved size %d is smaller than expected %d\n", |
dd120af2 | 830 | size, (int)fixture->store_size); |
47c3c70a SRV |
831 | return; |
832 | } | |
833 | ||
dd120af2 | 834 | switch (fixture->store_size) { |
47c3c70a | 835 | case 1: |
2f6b884d | 836 | expect = CHAR_NUMBER; |
47c3c70a SRV |
837 | found = *(char *)p; |
838 | break; | |
839 | case 2: | |
840 | expect = SHORT_NUMBER; | |
841 | found = *(short *)p; | |
842 | break; | |
843 | case 4: | |
844 | expect = WORD_NUMBER; | |
845 | found = *(int *)p; | |
846 | break; | |
847 | case 8: | |
848 | expect = LONG_NUMBER; | |
849 | found = *(long long *)p; | |
850 | break; | |
851 | } | |
852 | ||
853 | if (found != expect) { | |
dd120af2 | 854 | snprintf(fixture->error_str_buf, ERRSTR_BUFLEN, |
47c3c70a | 855 | "%s returned not %lld but %lld\n", type, expect, found); |
47c3c70a SRV |
856 | return; |
857 | } | |
dd120af2 | 858 | fixture->error_str = NULL; |
47c3c70a SRV |
859 | } |
860 | ||
dd120af2 | 861 | static int __init init_fgraph_fixture(struct fgraph_fixture *fixture) |
47c3c70a SRV |
862 | { |
863 | char *func_name; | |
864 | int len; | |
47c3c70a | 865 | |
dd120af2 MHG |
866 | snprintf(fixture->error_str_buf, ERRSTR_BUFLEN, |
867 | "Failed to execute storage %s\n", fixture->store_type_name); | |
868 | fixture->error_str = fixture->error_str_buf; | |
47c3c70a | 869 | |
dd120af2 MHG |
870 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
871 | len = strlen(func_name); | |
872 | ||
873 | return ftrace_set_filter(&fixture->gops.ops, func_name, len, 1); | |
874 | } | |
875 | ||
876 | /* Test fgraph storage for each size */ | |
877 | static int __init test_graph_storage_single(struct fgraph_fixture *fixture) | |
878 | { | |
879 | int size = fixture->store_size; | |
880 | int ret; | |
47c3c70a SRV |
881 | |
882 | pr_cont("PASSED\n"); | |
b576d375 | 883 | pr_info("Testing fgraph storage of %d byte%s: ", size, str_plural(size)); |
47c3c70a | 884 | |
dd120af2 | 885 | ret = init_fgraph_fixture(fixture); |
47c3c70a SRV |
886 | if (ret && ret != -ENODEV) { |
887 | pr_cont("*Could not set filter* "); | |
888 | return -1; | |
889 | } | |
890 | ||
dd120af2 | 891 | ret = register_ftrace_graph(&fixture->gops); |
47c3c70a SRV |
892 | if (ret) { |
893 | pr_warn("Failed to init store_bytes fgraph tracing\n"); | |
894 | return -1; | |
895 | } | |
896 | ||
897 | DYN_FTRACE_TEST_NAME(); | |
898 | ||
dd120af2 | 899 | unregister_ftrace_graph(&fixture->gops); |
47c3c70a | 900 | |
dd120af2 MHG |
901 | if (fixture->error_str) { |
902 | pr_cont("*** %s ***", fixture->error_str); | |
47c3c70a SRV |
903 | return -1; |
904 | } | |
905 | ||
906 | return 0; | |
907 | } | |
dd120af2 MHG |
908 | |
909 | static struct fgraph_fixture store_bytes[4] __initdata = { | |
910 | [0] = { | |
911 | .gops = { | |
912 | .entryfunc = store_entry, | |
913 | .retfunc = store_return, | |
914 | }, | |
915 | .store_size = 1, | |
916 | .store_type_name = "byte", | |
917 | }, | |
918 | [1] = { | |
919 | .gops = { | |
920 | .entryfunc = store_entry, | |
921 | .retfunc = store_return, | |
922 | }, | |
923 | .store_size = 2, | |
924 | .store_type_name = "short", | |
925 | }, | |
926 | [2] = { | |
927 | .gops = { | |
928 | .entryfunc = store_entry, | |
929 | .retfunc = store_return, | |
930 | }, | |
931 | .store_size = 4, | |
932 | .store_type_name = "word", | |
933 | }, | |
934 | [3] = { | |
935 | .gops = { | |
936 | .entryfunc = store_entry, | |
937 | .retfunc = store_return, | |
938 | }, | |
939 | .store_size = 8, | |
940 | .store_type_name = "long long", | |
941 | }, | |
942 | }; | |
943 | ||
944 | static __init int test_graph_storage_multi(void) | |
945 | { | |
946 | struct fgraph_fixture *fixture; | |
947 | bool printed = false; | |
bc754cc7 | 948 | int i, j, ret; |
dd120af2 MHG |
949 | |
950 | pr_cont("PASSED\n"); | |
951 | pr_info("Testing multiple fgraph storage on a function: "); | |
952 | ||
953 | for (i = 0; i < ARRAY_SIZE(store_bytes); i++) { | |
954 | fixture = &store_bytes[i]; | |
955 | ret = init_fgraph_fixture(fixture); | |
956 | if (ret && ret != -ENODEV) { | |
957 | pr_cont("*Could not set filter* "); | |
958 | printed = true; | |
bc754cc7 | 959 | goto out2; |
dd120af2 | 960 | } |
bc754cc7 | 961 | } |
dd120af2 | 962 | |
bc754cc7 MHG |
963 | for (j = 0; j < ARRAY_SIZE(store_bytes); j++) { |
964 | fixture = &store_bytes[j]; | |
dd120af2 MHG |
965 | ret = register_ftrace_graph(&fixture->gops); |
966 | if (ret) { | |
967 | pr_warn("Failed to init store_bytes fgraph tracing\n"); | |
968 | printed = true; | |
bc754cc7 | 969 | goto out1; |
dd120af2 MHG |
970 | } |
971 | } | |
972 | ||
973 | DYN_FTRACE_TEST_NAME(); | |
bc754cc7 MHG |
974 | out1: |
975 | while (--j >= 0) { | |
976 | fixture = &store_bytes[j]; | |
977 | unregister_ftrace_graph(&fixture->gops); | |
978 | ||
979 | if (fixture->error_str && !printed) { | |
980 | pr_cont("*** %s ***", fixture->error_str); | |
981 | printed = true; | |
982 | } | |
983 | } | |
984 | out2: | |
dd120af2 MHG |
985 | while (--i >= 0) { |
986 | fixture = &store_bytes[i]; | |
bc754cc7 | 987 | ftrace_free_filter(&fixture->gops.ops); |
dd120af2 MHG |
988 | |
989 | if (fixture->error_str && !printed) { | |
990 | pr_cont("*** %s ***", fixture->error_str); | |
991 | printed = true; | |
992 | } | |
993 | } | |
994 | return printed ? -1 : 0; | |
995 | } | |
996 | ||
47c3c70a SRV |
997 | /* Test the storage passed across function_graph entry and return */ |
998 | static __init int test_graph_storage(void) | |
999 | { | |
1000 | int ret; | |
1001 | ||
dd120af2 MHG |
1002 | ret = test_graph_storage_single(&store_bytes[0]); |
1003 | if (ret) | |
1004 | return ret; | |
1005 | ret = test_graph_storage_single(&store_bytes[1]); | |
47c3c70a SRV |
1006 | if (ret) |
1007 | return ret; | |
dd120af2 | 1008 | ret = test_graph_storage_single(&store_bytes[2]); |
47c3c70a SRV |
1009 | if (ret) |
1010 | return ret; | |
dd120af2 | 1011 | ret = test_graph_storage_single(&store_bytes[3]); |
47c3c70a SRV |
1012 | if (ret) |
1013 | return ret; | |
dd120af2 | 1014 | ret = test_graph_storage_multi(); |
47c3c70a SRV |
1015 | if (ret) |
1016 | return ret; | |
1017 | return 0; | |
1018 | } | |
1019 | #else | |
1020 | static inline int test_graph_storage(void) { return 0; } | |
1021 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
1022 | ||
cf586b61 FW |
1023 | /* Maximum number of functions to trace before diagnosing a hang */ |
1024 | #define GRAPH_MAX_FUNC_TEST 100000000 | |
1025 | ||
cf586b61 FW |
1026 | static unsigned int graph_hang_thresh; |
1027 | ||
1028 | /* Wrap the real function entry probe to avoid possible hanging */ | |
37238abe | 1029 | static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace, |
41705c42 MHG |
1030 | struct fgraph_ops *gops, |
1031 | struct ftrace_regs *fregs) | |
cf586b61 FW |
1032 | { |
1033 | /* This is harmlessly racy, we want to approximately detect a hang */ | |
1034 | if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { | |
1035 | ftrace_graph_stop(); | |
1036 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | |
19f0423f | 1037 | if (ftrace_dump_on_oops_enabled()) { |
7fe70b57 SRRH |
1038 | ftrace_dump(DUMP_ALL); |
1039 | /* ftrace_dump() disables tracing */ | |
1040 | tracing_on(); | |
1041 | } | |
cf586b61 FW |
1042 | return 0; |
1043 | } | |
1044 | ||
41705c42 | 1045 | return trace_graph_entry(trace, gops, fregs); |
cf586b61 FW |
1046 | } |
1047 | ||
688f7089 SRV |
1048 | static struct fgraph_ops fgraph_ops __initdata = { |
1049 | .entryfunc = &trace_graph_entry_watchdog, | |
1050 | .retfunc = &trace_graph_return, | |
1051 | }; | |
1052 | ||
c5229a0b | 1053 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
23edf483 | 1054 | static struct ftrace_ops direct; |
4e341cad | 1055 | #endif |
130c0806 | 1056 | |
7447dce9 FW |
1057 | /* |
1058 | * Pretty much the same than for the function tracer from which the selftest | |
1059 | * has been borrowed. | |
1060 | */ | |
f1ed7c74 | 1061 | __init int |
7447dce9 FW |
1062 | trace_selftest_startup_function_graph(struct tracer *trace, |
1063 | struct trace_array *tr) | |
1064 | { | |
1065 | int ret; | |
1066 | unsigned long count; | |
130c0806 | 1067 | char *func_name __maybe_unused; |
7447dce9 | 1068 | |
f1ed7c74 SRRH |
1069 | #ifdef CONFIG_DYNAMIC_FTRACE |
1070 | if (ftrace_filter_param) { | |
1071 | printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); | |
1072 | return 0; | |
1073 | } | |
1074 | #endif | |
1075 | ||
cf586b61 FW |
1076 | /* |
1077 | * Simulate the init() callback but we attach a watchdog callback | |
1078 | * to detect and recover from possible hangs | |
1079 | */ | |
1c5eb448 | 1080 | tracing_reset_online_cpus(&tr->array_buffer); |
26dda563 | 1081 | fgraph_ops.private = tr; |
688f7089 | 1082 | ret = register_ftrace_graph(&fgraph_ops); |
7447dce9 FW |
1083 | if (ret) { |
1084 | warn_failed_init_tracer(trace, ret); | |
1085 | goto out; | |
1086 | } | |
cf586b61 | 1087 | tracing_start_cmdline_record(); |
7447dce9 FW |
1088 | |
1089 | /* Sleep for a 1/10 of a second */ | |
1090 | msleep(100); | |
1091 | ||
cf586b61 FW |
1092 | /* Have we just recovered from a hang? */ |
1093 | if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { | |
60efe21e | 1094 | disable_tracing_selftest("recovering from a hang"); |
cf586b61 FW |
1095 | ret = -1; |
1096 | goto out; | |
1097 | } | |
1098 | ||
7447dce9 FW |
1099 | tracing_stop(); |
1100 | ||
1101 | /* check the trace buffer */ | |
1c5eb448 | 1102 | ret = trace_test_buffer(&tr->array_buffer, &count); |
7447dce9 | 1103 | |
52fde6e7 SRV |
1104 | /* Need to also simulate the tr->reset to remove this fgraph_ops */ |
1105 | tracing_stop_cmdline_record(); | |
1106 | unregister_ftrace_graph(&fgraph_ops); | |
1107 | ||
7447dce9 FW |
1108 | tracing_start(); |
1109 | ||
1110 | if (!ret && !count) { | |
1111 | printk(KERN_CONT ".. no entries found .."); | |
1112 | ret = -1; | |
1113 | goto out; | |
1114 | } | |
1115 | ||
c5229a0b | 1116 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
a2d910f0 SRG |
1117 | /* |
1118 | * These tests can take some time to run. Make sure on non PREEMPT | |
1119 | * kernels, we do not trigger the softlockup detector. | |
1120 | */ | |
1121 | cond_resched(); | |
1122 | ||
130c0806 | 1123 | tracing_reset_online_cpus(&tr->array_buffer); |
26dda563 | 1124 | fgraph_ops.private = tr; |
130c0806 JO |
1125 | |
1126 | /* | |
1127 | * Some archs *cough*PowerPC*cough* add characters to the | |
1128 | * start of the function names. We simply put a '*' to | |
1129 | * accommodate them. | |
1130 | */ | |
1131 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | |
1132 | ftrace_set_global_filter(func_name, strlen(func_name), 1); | |
1133 | ||
1134 | /* | |
1135 | * Register direct function together with graph tracer | |
1136 | * and make sure we get graph trace. | |
1137 | */ | |
23edf483 | 1138 | ftrace_set_filter_ip(&direct, (unsigned long)DYN_FTRACE_TEST_NAME, 0, 0); |
da8bdfbd | 1139 | ret = register_ftrace_direct(&direct, |
fee86a4e | 1140 | (unsigned long)ftrace_stub_direct_tramp); |
130c0806 JO |
1141 | if (ret) |
1142 | goto out; | |
1143 | ||
a2d910f0 SRG |
1144 | cond_resched(); |
1145 | ||
130c0806 JO |
1146 | ret = register_ftrace_graph(&fgraph_ops); |
1147 | if (ret) { | |
1148 | warn_failed_init_tracer(trace, ret); | |
1149 | goto out; | |
1150 | } | |
1151 | ||
1152 | DYN_FTRACE_TEST_NAME(); | |
1153 | ||
1154 | count = 0; | |
1155 | ||
1156 | tracing_stop(); | |
1157 | /* check the trace buffer */ | |
1158 | ret = trace_test_buffer(&tr->array_buffer, &count); | |
1159 | ||
1160 | unregister_ftrace_graph(&fgraph_ops); | |
1161 | ||
da8bdfbd | 1162 | ret = unregister_ftrace_direct(&direct, |
fee86a4e | 1163 | (unsigned long)ftrace_stub_direct_tramp, |
da8bdfbd | 1164 | true); |
130c0806 JO |
1165 | if (ret) |
1166 | goto out; | |
1167 | ||
a2d910f0 SRG |
1168 | cond_resched(); |
1169 | ||
130c0806 | 1170 | tracing_start(); |
7447dce9 | 1171 | |
130c0806 JO |
1172 | if (!ret && !count) { |
1173 | ret = -1; | |
1174 | goto out; | |
1175 | } | |
e35c2d8e LH |
1176 | |
1177 | /* Enable tracing on all functions again */ | |
1178 | ftrace_set_global_filter(NULL, 0, 1); | |
130c0806 JO |
1179 | #endif |
1180 | ||
47c3c70a SRV |
1181 | ret = test_graph_storage(); |
1182 | ||
130c0806 | 1183 | /* Don't test dynamic tracing, the function tracer already did */ |
7447dce9 FW |
1184 | out: |
1185 | /* Stop it if we failed */ | |
1186 | if (ret) | |
1187 | ftrace_graph_stop(); | |
1188 | ||
1189 | return ret; | |
1190 | } | |
1191 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
1192 | ||
1193 | ||
60a11774 SR |
1194 | #ifdef CONFIG_IRQSOFF_TRACER |
1195 | int | |
1196 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |
1197 | { | |
6d9b3fa5 | 1198 | unsigned long save_max = tr->max_latency; |
60a11774 SR |
1199 | unsigned long count; |
1200 | int ret; | |
1201 | ||
1202 | /* start the tracing */ | |
b6f11df2 | 1203 | ret = tracer_init(trace, tr); |
1c80025a FW |
1204 | if (ret) { |
1205 | warn_failed_init_tracer(trace, ret); | |
1206 | return ret; | |
1207 | } | |
1208 | ||
60a11774 | 1209 | /* reset the max latency */ |
6d9b3fa5 | 1210 | tr->max_latency = 0; |
60a11774 SR |
1211 | /* disable interrupts for a bit */ |
1212 | local_irq_disable(); | |
1213 | udelay(100); | |
1214 | local_irq_enable(); | |
49036200 FW |
1215 | |
1216 | /* | |
1217 | * Stop the tracer to avoid a warning subsequent | |
1218 | * to buffer flipping failure because tracing_stop() | |
1219 | * disables the tr and max buffers, making flipping impossible | |
1220 | * in case of parallels max irqs off latencies. | |
1221 | */ | |
1222 | trace->stop(tr); | |
60a11774 | 1223 | /* stop the tracing. */ |
bbf5b1a0 | 1224 | tracing_stop(); |
60a11774 | 1225 | /* check both trace buffers */ |
1c5eb448 | 1226 | ret = trace_test_buffer(&tr->array_buffer, NULL); |
60a11774 | 1227 | if (!ret) |
12883efb | 1228 | ret = trace_test_buffer(&tr->max_buffer, &count); |
60a11774 | 1229 | trace->reset(tr); |
bbf5b1a0 | 1230 | tracing_start(); |
60a11774 SR |
1231 | |
1232 | if (!ret && !count) { | |
1233 | printk(KERN_CONT ".. no entries found .."); | |
1234 | ret = -1; | |
1235 | } | |
1236 | ||
6d9b3fa5 | 1237 | tr->max_latency = save_max; |
60a11774 SR |
1238 | |
1239 | return ret; | |
1240 | } | |
1241 | #endif /* CONFIG_IRQSOFF_TRACER */ | |
1242 | ||
1243 | #ifdef CONFIG_PREEMPT_TRACER | |
1244 | int | |
1245 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |
1246 | { | |
6d9b3fa5 | 1247 | unsigned long save_max = tr->max_latency; |
60a11774 SR |
1248 | unsigned long count; |
1249 | int ret; | |
1250 | ||
769c48eb | 1251 | /* |
f2cc020d | 1252 | * Now that the big kernel lock is no longer preemptible, |
769c48eb SR |
1253 | * and this is called with the BKL held, it will always |
1254 | * fail. If preemption is already disabled, simply | |
1255 | * pass the test. When the BKL is removed, or becomes | |
1256 | * preemptible again, we will once again test this, | |
1257 | * so keep it in. | |
1258 | */ | |
1259 | if (preempt_count()) { | |
1260 | printk(KERN_CONT "can not test ... force "); | |
1261 | return 0; | |
1262 | } | |
1263 | ||
60a11774 | 1264 | /* start the tracing */ |
b6f11df2 | 1265 | ret = tracer_init(trace, tr); |
1c80025a FW |
1266 | if (ret) { |
1267 | warn_failed_init_tracer(trace, ret); | |
1268 | return ret; | |
1269 | } | |
1270 | ||
60a11774 | 1271 | /* reset the max latency */ |
6d9b3fa5 | 1272 | tr->max_latency = 0; |
60a11774 SR |
1273 | /* disable preemption for a bit */ |
1274 | preempt_disable(); | |
1275 | udelay(100); | |
1276 | preempt_enable(); | |
49036200 FW |
1277 | |
1278 | /* | |
1279 | * Stop the tracer to avoid a warning subsequent | |
1280 | * to buffer flipping failure because tracing_stop() | |
1281 | * disables the tr and max buffers, making flipping impossible | |
1282 | * in case of parallels max preempt off latencies. | |
1283 | */ | |
1284 | trace->stop(tr); | |
60a11774 | 1285 | /* stop the tracing. */ |
bbf5b1a0 | 1286 | tracing_stop(); |
60a11774 | 1287 | /* check both trace buffers */ |
1c5eb448 | 1288 | ret = trace_test_buffer(&tr->array_buffer, NULL); |
60a11774 | 1289 | if (!ret) |
12883efb | 1290 | ret = trace_test_buffer(&tr->max_buffer, &count); |
60a11774 | 1291 | trace->reset(tr); |
bbf5b1a0 | 1292 | tracing_start(); |
60a11774 SR |
1293 | |
1294 | if (!ret && !count) { | |
1295 | printk(KERN_CONT ".. no entries found .."); | |
1296 | ret = -1; | |
1297 | } | |
1298 | ||
6d9b3fa5 | 1299 | tr->max_latency = save_max; |
60a11774 SR |
1300 | |
1301 | return ret; | |
1302 | } | |
1303 | #endif /* CONFIG_PREEMPT_TRACER */ | |
1304 | ||
1305 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | |
1306 | int | |
1307 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | |
1308 | { | |
6d9b3fa5 | 1309 | unsigned long save_max = tr->max_latency; |
60a11774 SR |
1310 | unsigned long count; |
1311 | int ret; | |
1312 | ||
769c48eb | 1313 | /* |
f2cc020d | 1314 | * Now that the big kernel lock is no longer preemptible, |
769c48eb SR |
1315 | * and this is called with the BKL held, it will always |
1316 | * fail. If preemption is already disabled, simply | |
1317 | * pass the test. When the BKL is removed, or becomes | |
1318 | * preemptible again, we will once again test this, | |
1319 | * so keep it in. | |
1320 | */ | |
1321 | if (preempt_count()) { | |
1322 | printk(KERN_CONT "can not test ... force "); | |
1323 | return 0; | |
1324 | } | |
1325 | ||
60a11774 | 1326 | /* start the tracing */ |
b6f11df2 | 1327 | ret = tracer_init(trace, tr); |
1c80025a FW |
1328 | if (ret) { |
1329 | warn_failed_init_tracer(trace, ret); | |
ac1d52d0 | 1330 | goto out_no_start; |
1c80025a | 1331 | } |
60a11774 SR |
1332 | |
1333 | /* reset the max latency */ | |
6d9b3fa5 | 1334 | tr->max_latency = 0; |
60a11774 SR |
1335 | |
1336 | /* disable preemption and interrupts for a bit */ | |
1337 | preempt_disable(); | |
1338 | local_irq_disable(); | |
1339 | udelay(100); | |
1340 | preempt_enable(); | |
1341 | /* reverse the order of preempt vs irqs */ | |
1342 | local_irq_enable(); | |
1343 | ||
49036200 FW |
1344 | /* |
1345 | * Stop the tracer to avoid a warning subsequent | |
1346 | * to buffer flipping failure because tracing_stop() | |
1347 | * disables the tr and max buffers, making flipping impossible | |
1348 | * in case of parallels max irqs/preempt off latencies. | |
1349 | */ | |
1350 | trace->stop(tr); | |
60a11774 | 1351 | /* stop the tracing. */ |
bbf5b1a0 | 1352 | tracing_stop(); |
60a11774 | 1353 | /* check both trace buffers */ |
1c5eb448 | 1354 | ret = trace_test_buffer(&tr->array_buffer, NULL); |
ac1d52d0 | 1355 | if (ret) |
60a11774 SR |
1356 | goto out; |
1357 | ||
12883efb | 1358 | ret = trace_test_buffer(&tr->max_buffer, &count); |
ac1d52d0 | 1359 | if (ret) |
60a11774 SR |
1360 | goto out; |
1361 | ||
1362 | if (!ret && !count) { | |
1363 | printk(KERN_CONT ".. no entries found .."); | |
1364 | ret = -1; | |
1365 | goto out; | |
1366 | } | |
1367 | ||
1368 | /* do the test by disabling interrupts first this time */ | |
6d9b3fa5 | 1369 | tr->max_latency = 0; |
bbf5b1a0 | 1370 | tracing_start(); |
49036200 FW |
1371 | trace->start(tr); |
1372 | ||
60a11774 SR |
1373 | preempt_disable(); |
1374 | local_irq_disable(); | |
1375 | udelay(100); | |
1376 | preempt_enable(); | |
1377 | /* reverse the order of preempt vs irqs */ | |
1378 | local_irq_enable(); | |
1379 | ||
49036200 | 1380 | trace->stop(tr); |
60a11774 | 1381 | /* stop the tracing. */ |
bbf5b1a0 | 1382 | tracing_stop(); |
60a11774 | 1383 | /* check both trace buffers */ |
1c5eb448 | 1384 | ret = trace_test_buffer(&tr->array_buffer, NULL); |
60a11774 SR |
1385 | if (ret) |
1386 | goto out; | |
1387 | ||
12883efb | 1388 | ret = trace_test_buffer(&tr->max_buffer, &count); |
60a11774 SR |
1389 | |
1390 | if (!ret && !count) { | |
1391 | printk(KERN_CONT ".. no entries found .."); | |
1392 | ret = -1; | |
1393 | goto out; | |
1394 | } | |
1395 | ||
ac1d52d0 | 1396 | out: |
bbf5b1a0 | 1397 | tracing_start(); |
ac1d52d0 FW |
1398 | out_no_start: |
1399 | trace->reset(tr); | |
6d9b3fa5 | 1400 | tr->max_latency = save_max; |
60a11774 SR |
1401 | |
1402 | return ret; | |
1403 | } | |
1404 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | |
1405 | ||
fb1b6d8b SN |
1406 | #ifdef CONFIG_NOP_TRACER |
1407 | int | |
1408 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | |
1409 | { | |
1410 | /* What could possibly go wrong? */ | |
1411 | return 0; | |
1412 | } | |
1413 | #endif | |
1414 | ||
60a11774 | 1415 | #ifdef CONFIG_SCHED_TRACER |
addff1fe SR |
1416 | |
1417 | struct wakeup_test_data { | |
1418 | struct completion is_ready; | |
1419 | int go; | |
1420 | }; | |
1421 | ||
60a11774 SR |
1422 | static int trace_wakeup_test_thread(void *data) |
1423 | { | |
af6ace76 DF |
1424 | /* Make this a -deadline thread */ |
1425 | static const struct sched_attr attr = { | |
1426 | .sched_policy = SCHED_DEADLINE, | |
1427 | .sched_runtime = 100000ULL, | |
1428 | .sched_deadline = 10000000ULL, | |
1429 | .sched_period = 10000000ULL | |
1430 | }; | |
addff1fe | 1431 | struct wakeup_test_data *x = data; |
60a11774 | 1432 | |
af6ace76 | 1433 | sched_setattr(current, &attr); |
60a11774 SR |
1434 | |
1435 | /* Make it know we have a new prio */ | |
addff1fe | 1436 | complete(&x->is_ready); |
60a11774 SR |
1437 | |
1438 | /* now go to sleep and let the test wake us up */ | |
1439 | set_current_state(TASK_INTERRUPTIBLE); | |
addff1fe SR |
1440 | while (!x->go) { |
1441 | schedule(); | |
1442 | set_current_state(TASK_INTERRUPTIBLE); | |
1443 | } | |
60a11774 | 1444 | |
addff1fe SR |
1445 | complete(&x->is_ready); |
1446 | ||
1447 | set_current_state(TASK_INTERRUPTIBLE); | |
3c18c10b | 1448 | |
60a11774 SR |
1449 | /* we are awake, now wait to disappear */ |
1450 | while (!kthread_should_stop()) { | |
addff1fe SR |
1451 | schedule(); |
1452 | set_current_state(TASK_INTERRUPTIBLE); | |
60a11774 SR |
1453 | } |
1454 | ||
addff1fe SR |
1455 | __set_current_state(TASK_RUNNING); |
1456 | ||
60a11774 SR |
1457 | return 0; |
1458 | } | |
60a11774 SR |
1459 | int |
1460 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |
1461 | { | |
6d9b3fa5 | 1462 | unsigned long save_max = tr->max_latency; |
60a11774 | 1463 | struct task_struct *p; |
addff1fe | 1464 | struct wakeup_test_data data; |
60a11774 SR |
1465 | unsigned long count; |
1466 | int ret; | |
1467 | ||
addff1fe SR |
1468 | memset(&data, 0, sizeof(data)); |
1469 | ||
1470 | init_completion(&data.is_ready); | |
60a11774 | 1471 | |
af6ace76 | 1472 | /* create a -deadline thread */ |
addff1fe | 1473 | p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test"); |
c7aafc54 | 1474 | if (IS_ERR(p)) { |
60a11774 SR |
1475 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); |
1476 | return -1; | |
1477 | } | |
1478 | ||
af6ace76 | 1479 | /* make sure the thread is running at -deadline policy */ |
addff1fe | 1480 | wait_for_completion(&data.is_ready); |
60a11774 SR |
1481 | |
1482 | /* start the tracing */ | |
b6f11df2 | 1483 | ret = tracer_init(trace, tr); |
1c80025a FW |
1484 | if (ret) { |
1485 | warn_failed_init_tracer(trace, ret); | |
1486 | return ret; | |
1487 | } | |
1488 | ||
60a11774 | 1489 | /* reset the max latency */ |
6d9b3fa5 | 1490 | tr->max_latency = 0; |
60a11774 | 1491 | |
cd9626e9 | 1492 | while (task_is_runnable(p)) { |
3c18c10b | 1493 | /* |
af6ace76 | 1494 | * Sleep to make sure the -deadline thread is asleep too. |
3c18c10b SR |
1495 | * On virtual machines we can't rely on timings, |
1496 | * but we want to make sure this test still works. | |
1497 | */ | |
1498 | msleep(100); | |
1499 | } | |
60a11774 | 1500 | |
addff1fe SR |
1501 | init_completion(&data.is_ready); |
1502 | ||
1503 | data.go = 1; | |
1504 | /* memory barrier is in the wake_up_process() */ | |
60a11774 SR |
1505 | |
1506 | wake_up_process(p); | |
1507 | ||
3c18c10b | 1508 | /* Wait for the task to wake up */ |
addff1fe | 1509 | wait_for_completion(&data.is_ready); |
5aa60c60 | 1510 | |
60a11774 | 1511 | /* stop the tracing. */ |
bbf5b1a0 | 1512 | tracing_stop(); |
60a11774 | 1513 | /* check both trace buffers */ |
1c5eb448 | 1514 | ret = trace_test_buffer(&tr->array_buffer, NULL); |
60a11774 | 1515 | if (!ret) |
12883efb | 1516 | ret = trace_test_buffer(&tr->max_buffer, &count); |
60a11774 SR |
1517 | |
1518 | ||
1519 | trace->reset(tr); | |
bbf5b1a0 | 1520 | tracing_start(); |
60a11774 | 1521 | |
6d9b3fa5 | 1522 | tr->max_latency = save_max; |
60a11774 SR |
1523 | |
1524 | /* kill the thread */ | |
1525 | kthread_stop(p); | |
1526 | ||
1527 | if (!ret && !count) { | |
1528 | printk(KERN_CONT ".. no entries found .."); | |
1529 | ret = -1; | |
1530 | } | |
1531 | ||
1532 | return ret; | |
1533 | } | |
1534 | #endif /* CONFIG_SCHED_TRACER */ | |
1535 | ||
80e5ea45 SR |
1536 | #ifdef CONFIG_BRANCH_TRACER |
1537 | int | |
1538 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |
1539 | { | |
1540 | unsigned long count; | |
1541 | int ret; | |
1542 | ||
1543 | /* start the tracing */ | |
b6f11df2 | 1544 | ret = tracer_init(trace, tr); |
1c80025a FW |
1545 | if (ret) { |
1546 | warn_failed_init_tracer(trace, ret); | |
1547 | return ret; | |
1548 | } | |
1549 | ||
80e5ea45 SR |
1550 | /* Sleep for a 1/10 of a second */ |
1551 | msleep(100); | |
1552 | /* stop the tracing. */ | |
1553 | tracing_stop(); | |
1554 | /* check the trace buffer */ | |
1c5eb448 | 1555 | ret = trace_test_buffer(&tr->array_buffer, &count); |
80e5ea45 SR |
1556 | trace->reset(tr); |
1557 | tracing_start(); | |
1558 | ||
d2ef7c2f WH |
1559 | if (!ret && !count) { |
1560 | printk(KERN_CONT ".. no entries found .."); | |
1561 | ret = -1; | |
1562 | } | |
1563 | ||
80e5ea45 SR |
1564 | return ret; |
1565 | } | |
1566 | #endif /* CONFIG_BRANCH_TRACER */ | |
321bb5e1 | 1567 |