d88c44f1dfa5537856199233b16ca2ea10988e40
[linux-2.6-block.git] / kernel / trace / trace_selftest.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Include in trace.c */
3
4 #include <uapi/linux/sched/types.h>
5 #include <linux/stringify.h>
6 #include <linux/kthread.h>
7 #include <linux/delay.h>
8 #include <linux/slab.h>
9
10 static inline int trace_valid_entry(struct trace_entry *entry)
11 {
12         switch (entry->type) {
13         case TRACE_FN:
14         case TRACE_CTX:
15         case TRACE_WAKE:
16         case TRACE_STACK:
17         case TRACE_PRINT:
18         case TRACE_BRANCH:
19         case TRACE_GRAPH_ENT:
20         case TRACE_GRAPH_RETADDR_ENT:
21         case TRACE_GRAPH_RET:
22                 return 1;
23         }
24         return 0;
25 }
26
27 static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
28 {
29         struct ring_buffer_event *event;
30         struct trace_entry *entry;
31         unsigned int loops = 0;
32
33         while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
34                 entry = ring_buffer_event_data(event);
35
36                 /*
37                  * The ring buffer is a size of trace_buf_size, if
38                  * we loop more than the size, there's something wrong
39                  * with the ring buffer.
40                  */
41                 if (loops++ > trace_buf_size) {
42                         printk(KERN_CONT ".. bad ring buffer ");
43                         goto failed;
44                 }
45                 if (!trace_valid_entry(entry)) {
46                         printk(KERN_CONT ".. invalid entry %d ",
47                                 entry->type);
48                         goto failed;
49                 }
50         }
51         return 0;
52
53  failed:
54         /* disable tracing */
55         tracing_disabled = 1;
56         printk(KERN_CONT ".. corrupted trace buffer .. ");
57         return -1;
58 }
59
60 /*
61  * Test the trace buffer to see if all the elements
62  * are still sane.
63  */
64 static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
65 {
66         unsigned long flags, cnt = 0;
67         int cpu, ret = 0;
68
69         /* Don't allow flipping of max traces now */
70         local_irq_save(flags);
71         arch_spin_lock(&buf->tr->max_lock);
72
73         cnt = ring_buffer_entries(buf->buffer);
74
75         /*
76          * The trace_test_buffer_cpu runs a while loop to consume all data.
77          * If the calling tracer is broken, and is constantly filling
78          * the buffer, this will run forever, and hard lock the box.
79          * We disable the ring buffer while we do this test to prevent
80          * a hard lock up.
81          */
82         tracing_off();
83         for_each_possible_cpu(cpu) {
84                 ret = trace_test_buffer_cpu(buf, cpu);
85                 if (ret)
86                         break;
87         }
88         tracing_on();
89         arch_spin_unlock(&buf->tr->max_lock);
90         local_irq_restore(flags);
91
92         if (count)
93                 *count = cnt;
94
95         return ret;
96 }
97
98 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
99 {
100         printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
101                 trace->name, init_ret);
102 }
103 #ifdef CONFIG_FUNCTION_TRACER
104
105 #ifdef CONFIG_DYNAMIC_FTRACE
106
107 static int trace_selftest_test_probe1_cnt;
108 static void trace_selftest_test_probe1_func(unsigned long ip,
109                                             unsigned long pip,
110                                             struct ftrace_ops *op,
111                                             struct ftrace_regs *fregs)
112 {
113         trace_selftest_test_probe1_cnt++;
114 }
115
116 static int trace_selftest_test_probe2_cnt;
117 static void trace_selftest_test_probe2_func(unsigned long ip,
118                                             unsigned long pip,
119                                             struct ftrace_ops *op,
120                                             struct ftrace_regs *fregs)
121 {
122         trace_selftest_test_probe2_cnt++;
123 }
124
125 static int trace_selftest_test_probe3_cnt;
126 static void trace_selftest_test_probe3_func(unsigned long ip,
127                                             unsigned long pip,
128                                             struct ftrace_ops *op,
129                                             struct ftrace_regs *fregs)
130 {
131         trace_selftest_test_probe3_cnt++;
132 }
133
134 static int trace_selftest_test_global_cnt;
135 static void trace_selftest_test_global_func(unsigned long ip,
136                                             unsigned long pip,
137                                             struct ftrace_ops *op,
138                                             struct ftrace_regs *fregs)
139 {
140         trace_selftest_test_global_cnt++;
141 }
142
143 static int trace_selftest_test_dyn_cnt;
144 static void trace_selftest_test_dyn_func(unsigned long ip,
145                                          unsigned long pip,
146                                          struct ftrace_ops *op,
147                                          struct ftrace_regs *fregs)
148 {
149         trace_selftest_test_dyn_cnt++;
150 }
151
152 static struct ftrace_ops test_probe1 = {
153         .func                   = trace_selftest_test_probe1_func,
154 };
155
156 static struct ftrace_ops test_probe2 = {
157         .func                   = trace_selftest_test_probe2_func,
158 };
159
160 static struct ftrace_ops test_probe3 = {
161         .func                   = trace_selftest_test_probe3_func,
162 };
163
164 static void print_counts(void)
165 {
166         printk("(%d %d %d %d %d) ",
167                trace_selftest_test_probe1_cnt,
168                trace_selftest_test_probe2_cnt,
169                trace_selftest_test_probe3_cnt,
170                trace_selftest_test_global_cnt,
171                trace_selftest_test_dyn_cnt);
172 }
173
174 static void reset_counts(void)
175 {
176         trace_selftest_test_probe1_cnt = 0;
177         trace_selftest_test_probe2_cnt = 0;
178         trace_selftest_test_probe3_cnt = 0;
179         trace_selftest_test_global_cnt = 0;
180         trace_selftest_test_dyn_cnt = 0;
181 }
182
183 static int trace_selftest_ops(struct trace_array *tr, int cnt)
184 {
185         int save_ftrace_enabled = ftrace_enabled;
186         struct ftrace_ops *dyn_ops;
187         char *func1_name;
188         char *func2_name;
189         int len1;
190         int len2;
191         int ret = -1;
192
193         printk(KERN_CONT "PASSED\n");
194         pr_info("Testing dynamic ftrace ops #%d: ", cnt);
195
196         ftrace_enabled = 1;
197         reset_counts();
198
199         /* Handle PPC64 '.' name */
200         func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
201         func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
202         len1 = strlen(func1_name);
203         len2 = strlen(func2_name);
204
205         /*
206          * Probe 1 will trace function 1.
207          * Probe 2 will trace function 2.
208          * Probe 3 will trace functions 1 and 2.
209          */
210         ftrace_set_filter(&test_probe1, func1_name, len1, 1);
211         ftrace_set_filter(&test_probe2, func2_name, len2, 1);
212         ftrace_set_filter(&test_probe3, func1_name, len1, 1);
213         ftrace_set_filter(&test_probe3, func2_name, len2, 0);
214
215         register_ftrace_function(&test_probe1);
216         register_ftrace_function(&test_probe2);
217         register_ftrace_function(&test_probe3);
218         /* First time we are running with main function */
219         if (cnt > 1) {
220                 ftrace_init_array_ops(tr, trace_selftest_test_global_func);
221                 register_ftrace_function(tr->ops);
222         }
223
224         DYN_FTRACE_TEST_NAME();
225
226         print_counts();
227
228         if (trace_selftest_test_probe1_cnt != 1)
229                 goto out;
230         if (trace_selftest_test_probe2_cnt != 0)
231                 goto out;
232         if (trace_selftest_test_probe3_cnt != 1)
233                 goto out;
234         if (cnt > 1) {
235                 if (trace_selftest_test_global_cnt == 0)
236                         goto out;
237         }
238
239         DYN_FTRACE_TEST_NAME2();
240
241         print_counts();
242
243         if (trace_selftest_test_probe1_cnt != 1)
244                 goto out;
245         if (trace_selftest_test_probe2_cnt != 1)
246                 goto out;
247         if (trace_selftest_test_probe3_cnt != 2)
248                 goto out;
249
250         /* Add a dynamic probe */
251         dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
252         if (!dyn_ops) {
253                 printk("MEMORY ERROR ");
254                 goto out;
255         }
256
257         dyn_ops->func = trace_selftest_test_dyn_func;
258
259         register_ftrace_function(dyn_ops);
260
261         trace_selftest_test_global_cnt = 0;
262
263         DYN_FTRACE_TEST_NAME();
264
265         print_counts();
266
267         if (trace_selftest_test_probe1_cnt != 2)
268                 goto out_free;
269         if (trace_selftest_test_probe2_cnt != 1)
270                 goto out_free;
271         if (trace_selftest_test_probe3_cnt != 3)
272                 goto out_free;
273         if (cnt > 1) {
274                 if (trace_selftest_test_global_cnt == 0)
275                         goto out_free;
276         }
277         if (trace_selftest_test_dyn_cnt == 0)
278                 goto out_free;
279
280         DYN_FTRACE_TEST_NAME2();
281
282         print_counts();
283
284         if (trace_selftest_test_probe1_cnt != 2)
285                 goto out_free;
286         if (trace_selftest_test_probe2_cnt != 2)
287                 goto out_free;
288         if (trace_selftest_test_probe3_cnt != 4)
289                 goto out_free;
290
291         /* Remove trace function from probe 3 */
292         func1_name = "!" __stringify(DYN_FTRACE_TEST_NAME);
293         len1 = strlen(func1_name);
294
295         ftrace_set_filter(&test_probe3, func1_name, len1, 0);
296
297         DYN_FTRACE_TEST_NAME();
298
299         print_counts();
300
301         if (trace_selftest_test_probe1_cnt != 3)
302                 goto out_free;
303         if (trace_selftest_test_probe2_cnt != 2)
304                 goto out_free;
305         if (trace_selftest_test_probe3_cnt != 4)
306                 goto out_free;
307         if (cnt > 1) {
308                 if (trace_selftest_test_global_cnt == 0)
309                         goto out_free;
310         }
311         if (trace_selftest_test_dyn_cnt == 0)
312                 goto out_free;
313
314         DYN_FTRACE_TEST_NAME2();
315
316         print_counts();
317
318         if (trace_selftest_test_probe1_cnt != 3)
319                 goto out_free;
320         if (trace_selftest_test_probe2_cnt != 3)
321                 goto out_free;
322         if (trace_selftest_test_probe3_cnt != 5)
323                 goto out_free;
324
325         ret = 0;
326  out_free:
327         unregister_ftrace_function(dyn_ops);
328         kfree(dyn_ops);
329
330  out:
331         /* Purposely unregister in the same order */
332         unregister_ftrace_function(&test_probe1);
333         unregister_ftrace_function(&test_probe2);
334         unregister_ftrace_function(&test_probe3);
335         if (cnt > 1)
336                 unregister_ftrace_function(tr->ops);
337         ftrace_reset_array_ops(tr);
338
339         /* Make sure everything is off */
340         reset_counts();
341         DYN_FTRACE_TEST_NAME();
342         DYN_FTRACE_TEST_NAME();
343
344         if (trace_selftest_test_probe1_cnt ||
345             trace_selftest_test_probe2_cnt ||
346             trace_selftest_test_probe3_cnt ||
347             trace_selftest_test_global_cnt ||
348             trace_selftest_test_dyn_cnt)
349                 ret = -1;
350
351         ftrace_enabled = save_ftrace_enabled;
352
353         return ret;
354 }
355
356 /* Test dynamic code modification and ftrace filters */
357 static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
358                                                   struct trace_array *tr,
359                                                   int (*func)(void))
360 {
361         int save_ftrace_enabled = ftrace_enabled;
362         unsigned long count;
363         char *func_name;
364         int ret;
365
366         /* The ftrace test PASSED */
367         printk(KERN_CONT "PASSED\n");
368         pr_info("Testing dynamic ftrace: ");
369
370         /* enable tracing, and record the filter function */
371         ftrace_enabled = 1;
372
373         /* passed in by parameter to fool gcc from optimizing */
374         func();
375
376         /*
377          * Some archs *cough*PowerPC*cough* add characters to the
378          * start of the function names. We simply put a '*' to
379          * accommodate them.
380          */
381         func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
382
383         /* filter only on our function */
384         ftrace_set_global_filter(func_name, strlen(func_name), 1);
385
386         /* enable tracing */
387         ret = tracer_init(trace, tr);
388         if (ret) {
389                 warn_failed_init_tracer(trace, ret);
390                 goto out;
391         }
392
393         /* Sleep for a 1/10 of a second */
394         msleep(100);
395
396         /* we should have nothing in the buffer */
397         ret = trace_test_buffer(&tr->array_buffer, &count);
398         if (ret)
399                 goto out;
400
401         if (count) {
402                 ret = -1;
403                 printk(KERN_CONT ".. filter did not filter .. ");
404                 goto out;
405         }
406
407         /* call our function again */
408         func();
409
410         /* sleep again */
411         msleep(100);
412
413         /* stop the tracing. */
414         tracing_stop();
415         ftrace_enabled = 0;
416
417         /* check the trace buffer */
418         ret = trace_test_buffer(&tr->array_buffer, &count);
419
420         ftrace_enabled = 1;
421         tracing_start();
422
423         /* we should only have one item */
424         if (!ret && count != 1) {
425                 trace->reset(tr);
426                 printk(KERN_CONT ".. filter failed count=%ld ..", count);
427                 ret = -1;
428                 goto out;
429         }
430
431         /* Test the ops with global tracing running */
432         ret = trace_selftest_ops(tr, 1);
433         trace->reset(tr);
434
435  out:
436         ftrace_enabled = save_ftrace_enabled;
437
438         /* Enable tracing on all functions again */
439         ftrace_set_global_filter(NULL, 0, 1);
440
441         /* Test the ops with global tracing off */
442         if (!ret)
443                 ret = trace_selftest_ops(tr, 2);
444
445         return ret;
446 }
447
448 static int trace_selftest_recursion_cnt;
449 static void trace_selftest_test_recursion_func(unsigned long ip,
450                                                unsigned long pip,
451                                                struct ftrace_ops *op,
452                                                struct ftrace_regs *fregs)
453 {
454         /*
455          * This function is registered without the recursion safe flag.
456          * The ftrace infrastructure should provide the recursion
457          * protection. If not, this will crash the kernel!
458          */
459         if (trace_selftest_recursion_cnt++ > 10)
460                 return;
461         DYN_FTRACE_TEST_NAME();
462 }
463
464 static void trace_selftest_test_recursion_safe_func(unsigned long ip,
465                                                     unsigned long pip,
466                                                     struct ftrace_ops *op,
467                                                     struct ftrace_regs *fregs)
468 {
469         /*
470          * We said we would provide our own recursion. By calling
471          * this function again, we should recurse back into this function
472          * and count again. But this only happens if the arch supports
473          * all of ftrace features and nothing else is using the function
474          * tracing utility.
475          */
476         if (trace_selftest_recursion_cnt++)
477                 return;
478         DYN_FTRACE_TEST_NAME();
479 }
480
481 static struct ftrace_ops test_rec_probe = {
482         .func                   = trace_selftest_test_recursion_func,
483         .flags                  = FTRACE_OPS_FL_RECURSION,
484 };
485
486 static struct ftrace_ops test_recsafe_probe = {
487         .func                   = trace_selftest_test_recursion_safe_func,
488 };
489
490 static int
491 trace_selftest_function_recursion(void)
492 {
493         int save_ftrace_enabled = ftrace_enabled;
494         char *func_name;
495         int len;
496         int ret;
497
498         /* The previous test PASSED */
499         pr_cont("PASSED\n");
500         pr_info("Testing ftrace recursion: ");
501
502
503         /* enable tracing, and record the filter function */
504         ftrace_enabled = 1;
505
506         /* Handle PPC64 '.' name */
507         func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
508         len = strlen(func_name);
509
510         ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
511         if (ret) {
512                 pr_cont("*Could not set filter* ");
513                 goto out;
514         }
515
516         ret = register_ftrace_function(&test_rec_probe);
517         if (ret) {
518                 pr_cont("*could not register callback* ");
519                 goto out;
520         }
521
522         DYN_FTRACE_TEST_NAME();
523
524         unregister_ftrace_function(&test_rec_probe);
525
526         ret = -1;
527         /*
528          * Recursion allows for transitions between context,
529          * and may call the callback twice.
530          */
531         if (trace_selftest_recursion_cnt != 1 &&
532             trace_selftest_recursion_cnt != 2) {
533                 pr_cont("*callback not called once (or twice) (%d)* ",
534                         trace_selftest_recursion_cnt);
535                 goto out;
536         }
537
538         trace_selftest_recursion_cnt = 1;
539
540         pr_cont("PASSED\n");
541         pr_info("Testing ftrace recursion safe: ");
542
543         ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
544         if (ret) {
545                 pr_cont("*Could not set filter* ");
546                 goto out;
547         }
548
549         ret = register_ftrace_function(&test_recsafe_probe);
550         if (ret) {
551                 pr_cont("*could not register callback* ");
552                 goto out;
553         }
554
555         DYN_FTRACE_TEST_NAME();
556
557         unregister_ftrace_function(&test_recsafe_probe);
558
559         ret = -1;
560         if (trace_selftest_recursion_cnt != 2) {
561                 pr_cont("*callback not called expected 2 times (%d)* ",
562                         trace_selftest_recursion_cnt);
563                 goto out;
564         }
565
566         ret = 0;
567 out:
568         ftrace_enabled = save_ftrace_enabled;
569
570         return ret;
571 }
572 #else
573 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
574 # define trace_selftest_function_recursion() ({ 0; })
575 #endif /* CONFIG_DYNAMIC_FTRACE */
576
577 static enum {
578         TRACE_SELFTEST_REGS_START,
579         TRACE_SELFTEST_REGS_FOUND,
580         TRACE_SELFTEST_REGS_NOT_FOUND,
581 } trace_selftest_regs_stat;
582
583 static void trace_selftest_test_regs_func(unsigned long ip,
584                                           unsigned long pip,
585                                           struct ftrace_ops *op,
586                                           struct ftrace_regs *fregs)
587 {
588         struct pt_regs *regs = ftrace_get_regs(fregs);
589
590         if (regs)
591                 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
592         else
593                 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
594 }
595
596 static struct ftrace_ops test_regs_probe = {
597         .func           = trace_selftest_test_regs_func,
598         .flags          = FTRACE_OPS_FL_SAVE_REGS,
599 };
600
601 static int
602 trace_selftest_function_regs(void)
603 {
604         int save_ftrace_enabled = ftrace_enabled;
605         char *func_name;
606         int len;
607         int ret;
608         int supported = 0;
609
610 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
611         supported = 1;
612 #endif
613
614         /* The previous test PASSED */
615         pr_cont("PASSED\n");
616         pr_info("Testing ftrace regs%s: ",
617                 !supported ? "(no arch support)" : "");
618
619         /* enable tracing, and record the filter function */
620         ftrace_enabled = 1;
621
622         /* Handle PPC64 '.' name */
623         func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
624         len = strlen(func_name);
625
626         ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
627         /*
628          * If DYNAMIC_FTRACE is not set, then we just trace all functions.
629          * This test really doesn't care.
630          */
631         if (ret && ret != -ENODEV) {
632                 pr_cont("*Could not set filter* ");
633                 goto out;
634         }
635
636         ret = register_ftrace_function(&test_regs_probe);
637         /*
638          * Now if the arch does not support passing regs, then this should
639          * have failed.
640          */
641         if (!supported) {
642                 if (!ret) {
643                         pr_cont("*registered save-regs without arch support* ");
644                         goto out;
645                 }
646                 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
647                 ret = register_ftrace_function(&test_regs_probe);
648         }
649         if (ret) {
650                 pr_cont("*could not register callback* ");
651                 goto out;
652         }
653
654
655         DYN_FTRACE_TEST_NAME();
656
657         unregister_ftrace_function(&test_regs_probe);
658
659         ret = -1;
660
661         switch (trace_selftest_regs_stat) {
662         case TRACE_SELFTEST_REGS_START:
663                 pr_cont("*callback never called* ");
664                 goto out;
665
666         case TRACE_SELFTEST_REGS_FOUND:
667                 if (supported)
668                         break;
669                 pr_cont("*callback received regs without arch support* ");
670                 goto out;
671
672         case TRACE_SELFTEST_REGS_NOT_FOUND:
673                 if (!supported)
674                         break;
675                 pr_cont("*callback received NULL regs* ");
676                 goto out;
677         }
678
679         ret = 0;
680 out:
681         ftrace_enabled = save_ftrace_enabled;
682
683         return ret;
684 }
685
686 /*
687  * Simple verification test of ftrace function tracer.
688  * Enable ftrace, sleep 1/10 second, and then read the trace
689  * buffer to see if all is in order.
690  */
691 __init int
692 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
693 {
694         int save_ftrace_enabled = ftrace_enabled;
695         unsigned long count;
696         int ret;
697
698 #ifdef CONFIG_DYNAMIC_FTRACE
699         if (ftrace_filter_param) {
700                 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
701                 return 0;
702         }
703 #endif
704
705         /* make sure msleep has been recorded */
706         msleep(1);
707
708         /* start the tracing */
709         ftrace_enabled = 1;
710
711         ret = tracer_init(trace, tr);
712         if (ret) {
713                 warn_failed_init_tracer(trace, ret);
714                 goto out;
715         }
716
717         /* Sleep for a 1/10 of a second */
718         msleep(100);
719         /* stop the tracing. */
720         tracing_stop();
721         ftrace_enabled = 0;
722
723         /* check the trace buffer */
724         ret = trace_test_buffer(&tr->array_buffer, &count);
725
726         ftrace_enabled = 1;
727         trace->reset(tr);
728         tracing_start();
729
730         if (!ret && !count) {
731                 printk(KERN_CONT ".. no entries found ..");
732                 ret = -1;
733                 goto out;
734         }
735
736         ret = trace_selftest_startup_dynamic_tracing(trace, tr,
737                                                      DYN_FTRACE_TEST_NAME);
738         if (ret)
739                 goto out;
740
741         ret = trace_selftest_function_recursion();
742         if (ret)
743                 goto out;
744
745         ret = trace_selftest_function_regs();
746  out:
747         ftrace_enabled = save_ftrace_enabled;
748
749         /* kill ftrace totally if we failed */
750         if (ret)
751                 ftrace_kill();
752
753         return ret;
754 }
755 #endif /* CONFIG_FUNCTION_TRACER */
756
757
758 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
759
760 #ifdef CONFIG_DYNAMIC_FTRACE
761
762 #define CHAR_NUMBER 123
763 #define SHORT_NUMBER 12345
764 #define WORD_NUMBER 1234567890
765 #define LONG_NUMBER 1234567890123456789LL
766 #define ERRSTR_BUFLEN 128
767
768 struct fgraph_fixture {
769         struct fgraph_ops gops;
770         int store_size;
771         const char *store_type_name;
772         char error_str_buf[ERRSTR_BUFLEN];
773         char *error_str;
774 };
775
776 static __init int store_entry(struct ftrace_graph_ent *trace,
777                               struct fgraph_ops *gops,
778                               struct ftrace_regs *fregs)
779 {
780         struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops);
781         const char *type = fixture->store_type_name;
782         int size = fixture->store_size;
783         void *p;
784
785         p = fgraph_reserve_data(gops->idx, size);
786         if (!p) {
787                 snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
788                          "Failed to reserve %s\n", type);
789                 return 0;
790         }
791
792         switch (size) {
793         case 1:
794                 *(char *)p = CHAR_NUMBER;
795                 break;
796         case 2:
797                 *(short *)p = SHORT_NUMBER;
798                 break;
799         case 4:
800                 *(int *)p = WORD_NUMBER;
801                 break;
802         case 8:
803                 *(long long *)p = LONG_NUMBER;
804                 break;
805         }
806
807         return 1;
808 }
809
810 static __init void store_return(struct ftrace_graph_ret *trace,
811                                 struct fgraph_ops *gops,
812                                 struct ftrace_regs *fregs)
813 {
814         struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops);
815         const char *type = fixture->store_type_name;
816         long long expect = 0;
817         long long found = -1;
818         int size;
819         char *p;
820
821         p = fgraph_retrieve_data(gops->idx, &size);
822         if (!p) {
823                 snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
824                          "Failed to retrieve %s\n", type);
825                 return;
826         }
827         if (fixture->store_size > size) {
828                 snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
829                          "Retrieved size %d is smaller than expected %d\n",
830                          size, (int)fixture->store_size);
831                 return;
832         }
833
834         switch (fixture->store_size) {
835         case 1:
836                 expect = CHAR_NUMBER;
837                 found = *(char *)p;
838                 break;
839         case 2:
840                 expect = SHORT_NUMBER;
841                 found = *(short *)p;
842                 break;
843         case 4:
844                 expect = WORD_NUMBER;
845                 found = *(int *)p;
846                 break;
847         case 8:
848                 expect = LONG_NUMBER;
849                 found = *(long long *)p;
850                 break;
851         }
852
853         if (found != expect) {
854                 snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
855                          "%s returned not %lld but %lld\n", type, expect, found);
856                 return;
857         }
858         fixture->error_str = NULL;
859 }
860
861 static int __init init_fgraph_fixture(struct fgraph_fixture *fixture)
862 {
863         char *func_name;
864         int len;
865
866         snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
867                  "Failed to execute storage %s\n", fixture->store_type_name);
868         fixture->error_str = fixture->error_str_buf;
869
870         func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
871         len = strlen(func_name);
872
873         return ftrace_set_filter(&fixture->gops.ops, func_name, len, 1);
874 }
875
876 /* Test fgraph storage for each size */
877 static int __init test_graph_storage_single(struct fgraph_fixture *fixture)
878 {
879         int size = fixture->store_size;
880         int ret;
881
882         pr_cont("PASSED\n");
883         pr_info("Testing fgraph storage of %d byte%s: ", size, str_plural(size));
884
885         ret = init_fgraph_fixture(fixture);
886         if (ret && ret != -ENODEV) {
887                 pr_cont("*Could not set filter* ");
888                 return -1;
889         }
890
891         ret = register_ftrace_graph(&fixture->gops);
892         if (ret) {
893                 pr_warn("Failed to init store_bytes fgraph tracing\n");
894                 return -1;
895         }
896
897         DYN_FTRACE_TEST_NAME();
898
899         unregister_ftrace_graph(&fixture->gops);
900
901         if (fixture->error_str) {
902                 pr_cont("*** %s ***", fixture->error_str);
903                 return -1;
904         }
905
906         return 0;
907 }
908
909 static struct fgraph_fixture store_bytes[4] __initdata = {
910         [0] = {
911                 .gops = {
912                         .entryfunc              = store_entry,
913                         .retfunc                = store_return,
914                 },
915                 .store_size = 1,
916                 .store_type_name = "byte",
917         },
918         [1] = {
919                 .gops = {
920                         .entryfunc              = store_entry,
921                         .retfunc                = store_return,
922                 },
923                 .store_size = 2,
924                 .store_type_name = "short",
925         },
926         [2] = {
927                 .gops = {
928                         .entryfunc              = store_entry,
929                         .retfunc                = store_return,
930                 },
931                 .store_size = 4,
932                 .store_type_name = "word",
933         },
934         [3] = {
935                 .gops = {
936                         .entryfunc              = store_entry,
937                         .retfunc                = store_return,
938                 },
939                 .store_size = 8,
940                 .store_type_name = "long long",
941         },
942 };
943
944 static __init int test_graph_storage_multi(void)
945 {
946         struct fgraph_fixture *fixture;
947         bool printed = false;
948         int i, j, ret;
949
950         pr_cont("PASSED\n");
951         pr_info("Testing multiple fgraph storage on a function: ");
952
953         for (i = 0; i < ARRAY_SIZE(store_bytes); i++) {
954                 fixture = &store_bytes[i];
955                 ret = init_fgraph_fixture(fixture);
956                 if (ret && ret != -ENODEV) {
957                         pr_cont("*Could not set filter* ");
958                         printed = true;
959                         goto out2;
960                 }
961         }
962
963         for (j = 0; j < ARRAY_SIZE(store_bytes); j++) {
964                 fixture = &store_bytes[j];
965                 ret = register_ftrace_graph(&fixture->gops);
966                 if (ret) {
967                         pr_warn("Failed to init store_bytes fgraph tracing\n");
968                         printed = true;
969                         goto out1;
970                 }
971         }
972
973         DYN_FTRACE_TEST_NAME();
974 out1:
975         while (--j >= 0) {
976                 fixture = &store_bytes[j];
977                 unregister_ftrace_graph(&fixture->gops);
978
979                 if (fixture->error_str && !printed) {
980                         pr_cont("*** %s ***", fixture->error_str);
981                         printed = true;
982                 }
983         }
984 out2:
985         while (--i >= 0) {
986                 fixture = &store_bytes[i];
987                 ftrace_free_filter(&fixture->gops.ops);
988
989                 if (fixture->error_str && !printed) {
990                         pr_cont("*** %s ***", fixture->error_str);
991                         printed = true;
992                 }
993         }
994         return printed ? -1 : 0;
995 }
996
997 /* Test the storage passed across function_graph entry and return */
998 static __init int test_graph_storage(void)
999 {
1000         int ret;
1001
1002         ret = test_graph_storage_single(&store_bytes[0]);
1003         if (ret)
1004                 return ret;
1005         ret = test_graph_storage_single(&store_bytes[1]);
1006         if (ret)
1007                 return ret;
1008         ret = test_graph_storage_single(&store_bytes[2]);
1009         if (ret)
1010                 return ret;
1011         ret = test_graph_storage_single(&store_bytes[3]);
1012         if (ret)
1013                 return ret;
1014         ret = test_graph_storage_multi();
1015         if (ret)
1016                 return ret;
1017         return 0;
1018 }
1019 #else
1020 static inline int test_graph_storage(void) { return 0; }
1021 #endif /* CONFIG_DYNAMIC_FTRACE */
1022
1023 /* Maximum number of functions to trace before diagnosing a hang */
1024 #define GRAPH_MAX_FUNC_TEST     100000000
1025
1026 static unsigned int graph_hang_thresh;
1027
1028 /* Wrap the real function entry probe to avoid possible hanging */
1029 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace,
1030                                       struct fgraph_ops *gops,
1031                                       struct ftrace_regs *fregs)
1032 {
1033         /* This is harmlessly racy, we want to approximately detect a hang */
1034         if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
1035                 ftrace_graph_stop();
1036                 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
1037                 if (ftrace_dump_on_oops_enabled()) {
1038                         ftrace_dump(DUMP_ALL);
1039                         /* ftrace_dump() disables tracing */
1040                         tracing_on();
1041                 }
1042                 return 0;
1043         }
1044
1045         return trace_graph_entry(trace, gops, fregs);
1046 }
1047
1048 static struct fgraph_ops fgraph_ops __initdata  = {
1049         .entryfunc              = &trace_graph_entry_watchdog,
1050         .retfunc                = &trace_graph_return,
1051 };
1052
1053 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
1054 static struct ftrace_ops direct;
1055 #endif
1056
1057 /*
1058  * Pretty much the same than for the function tracer from which the selftest
1059  * has been borrowed.
1060  */
1061 __init int
1062 trace_selftest_startup_function_graph(struct tracer *trace,
1063                                         struct trace_array *tr)
1064 {
1065         int ret;
1066         unsigned long count;
1067         char *func_name __maybe_unused;
1068
1069 #ifdef CONFIG_DYNAMIC_FTRACE
1070         if (ftrace_filter_param) {
1071                 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
1072                 return 0;
1073         }
1074 #endif
1075
1076         /*
1077          * Simulate the init() callback but we attach a watchdog callback
1078          * to detect and recover from possible hangs
1079          */
1080         tracing_reset_online_cpus(&tr->array_buffer);
1081         fgraph_ops.private = tr;
1082         ret = register_ftrace_graph(&fgraph_ops);
1083         if (ret) {
1084                 warn_failed_init_tracer(trace, ret);
1085                 goto out;
1086         }
1087         tracing_start_cmdline_record();
1088
1089         /* Sleep for a 1/10 of a second */
1090         msleep(100);
1091
1092         /* Have we just recovered from a hang? */
1093         if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
1094                 disable_tracing_selftest("recovering from a hang");
1095                 ret = -1;
1096                 goto out;
1097         }
1098
1099         tracing_stop();
1100
1101         /* check the trace buffer */
1102         ret = trace_test_buffer(&tr->array_buffer, &count);
1103
1104         /* Need to also simulate the tr->reset to remove this fgraph_ops */
1105         tracing_stop_cmdline_record();
1106         unregister_ftrace_graph(&fgraph_ops);
1107
1108         tracing_start();
1109
1110         if (!ret && !count) {
1111                 printk(KERN_CONT ".. no entries found ..");
1112                 ret = -1;
1113                 goto out;
1114         }
1115
1116 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
1117         /*
1118          * These tests can take some time to run. Make sure on non PREEMPT
1119          * kernels, we do not trigger the softlockup detector.
1120          */
1121         cond_resched();
1122
1123         tracing_reset_online_cpus(&tr->array_buffer);
1124         fgraph_ops.private = tr;
1125
1126         /*
1127          * Some archs *cough*PowerPC*cough* add characters to the
1128          * start of the function names. We simply put a '*' to
1129          * accommodate them.
1130          */
1131         func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
1132         ftrace_set_global_filter(func_name, strlen(func_name), 1);
1133
1134         /*
1135          * Register direct function together with graph tracer
1136          * and make sure we get graph trace.
1137          */
1138         ftrace_set_filter_ip(&direct, (unsigned long)DYN_FTRACE_TEST_NAME, 0, 0);
1139         ret = register_ftrace_direct(&direct,
1140                                      (unsigned long)ftrace_stub_direct_tramp);
1141         if (ret)
1142                 goto out;
1143
1144         cond_resched();
1145
1146         ret = register_ftrace_graph(&fgraph_ops);
1147         if (ret) {
1148                 warn_failed_init_tracer(trace, ret);
1149                 goto out;
1150         }
1151
1152         DYN_FTRACE_TEST_NAME();
1153
1154         count = 0;
1155
1156         tracing_stop();
1157         /* check the trace buffer */
1158         ret = trace_test_buffer(&tr->array_buffer, &count);
1159
1160         unregister_ftrace_graph(&fgraph_ops);
1161
1162         ret = unregister_ftrace_direct(&direct,
1163                                        (unsigned long)ftrace_stub_direct_tramp,
1164                                        true);
1165         if (ret)
1166                 goto out;
1167
1168         cond_resched();
1169
1170         tracing_start();
1171
1172         if (!ret && !count) {
1173                 ret = -1;
1174                 goto out;
1175         }
1176
1177         /* Enable tracing on all functions again */
1178         ftrace_set_global_filter(NULL, 0, 1);
1179 #endif
1180
1181         ret = test_graph_storage();
1182
1183         /* Don't test dynamic tracing, the function tracer already did */
1184 out:
1185         /* Stop it if we failed */
1186         if (ret)
1187                 ftrace_graph_stop();
1188
1189         return ret;
1190 }
1191 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1192
1193
1194 #ifdef CONFIG_IRQSOFF_TRACER
1195 int
1196 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
1197 {
1198         unsigned long save_max = tr->max_latency;
1199         unsigned long count;
1200         int ret;
1201
1202         /* start the tracing */
1203         ret = tracer_init(trace, tr);
1204         if (ret) {
1205                 warn_failed_init_tracer(trace, ret);
1206                 return ret;
1207         }
1208
1209         /* reset the max latency */
1210         tr->max_latency = 0;
1211         /* disable interrupts for a bit */
1212         local_irq_disable();
1213         udelay(100);
1214         local_irq_enable();
1215
1216         /*
1217          * Stop the tracer to avoid a warning subsequent
1218          * to buffer flipping failure because tracing_stop()
1219          * disables the tr and max buffers, making flipping impossible
1220          * in case of parallels max irqs off latencies.
1221          */
1222         trace->stop(tr);
1223         /* stop the tracing. */
1224         tracing_stop();
1225         /* check both trace buffers */
1226         ret = trace_test_buffer(&tr->array_buffer, NULL);
1227         if (!ret)
1228                 ret = trace_test_buffer(&tr->max_buffer, &count);
1229         trace->reset(tr);
1230         tracing_start();
1231
1232         if (!ret && !count) {
1233                 printk(KERN_CONT ".. no entries found ..");
1234                 ret = -1;
1235         }
1236
1237         tr->max_latency = save_max;
1238
1239         return ret;
1240 }
1241 #endif /* CONFIG_IRQSOFF_TRACER */
1242
1243 #ifdef CONFIG_PREEMPT_TRACER
1244 int
1245 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
1246 {
1247         unsigned long save_max = tr->max_latency;
1248         unsigned long count;
1249         int ret;
1250
1251         /*
1252          * Now that the big kernel lock is no longer preemptible,
1253          * and this is called with the BKL held, it will always
1254          * fail. If preemption is already disabled, simply
1255          * pass the test. When the BKL is removed, or becomes
1256          * preemptible again, we will once again test this,
1257          * so keep it in.
1258          */
1259         if (preempt_count()) {
1260                 printk(KERN_CONT "can not test ... force ");
1261                 return 0;
1262         }
1263
1264         /* start the tracing */
1265         ret = tracer_init(trace, tr);
1266         if (ret) {
1267                 warn_failed_init_tracer(trace, ret);
1268                 return ret;
1269         }
1270
1271         /* reset the max latency */
1272         tr->max_latency = 0;
1273         /* disable preemption for a bit */
1274         preempt_disable();
1275         udelay(100);
1276         preempt_enable();
1277
1278         /*
1279          * Stop the tracer to avoid a warning subsequent
1280          * to buffer flipping failure because tracing_stop()
1281          * disables the tr and max buffers, making flipping impossible
1282          * in case of parallels max preempt off latencies.
1283          */
1284         trace->stop(tr);
1285         /* stop the tracing. */
1286         tracing_stop();
1287         /* check both trace buffers */
1288         ret = trace_test_buffer(&tr->array_buffer, NULL);
1289         if (!ret)
1290                 ret = trace_test_buffer(&tr->max_buffer, &count);
1291         trace->reset(tr);
1292         tracing_start();
1293
1294         if (!ret && !count) {
1295                 printk(KERN_CONT ".. no entries found ..");
1296                 ret = -1;
1297         }
1298
1299         tr->max_latency = save_max;
1300
1301         return ret;
1302 }
1303 #endif /* CONFIG_PREEMPT_TRACER */
1304
1305 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
1306 int
1307 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
1308 {
1309         unsigned long save_max = tr->max_latency;
1310         unsigned long count;
1311         int ret;
1312
1313         /*
1314          * Now that the big kernel lock is no longer preemptible,
1315          * and this is called with the BKL held, it will always
1316          * fail. If preemption is already disabled, simply
1317          * pass the test. When the BKL is removed, or becomes
1318          * preemptible again, we will once again test this,
1319          * so keep it in.
1320          */
1321         if (preempt_count()) {
1322                 printk(KERN_CONT "can not test ... force ");
1323                 return 0;
1324         }
1325
1326         /* start the tracing */
1327         ret = tracer_init(trace, tr);
1328         if (ret) {
1329                 warn_failed_init_tracer(trace, ret);
1330                 goto out_no_start;
1331         }
1332
1333         /* reset the max latency */
1334         tr->max_latency = 0;
1335
1336         /* disable preemption and interrupts for a bit */
1337         preempt_disable();
1338         local_irq_disable();
1339         udelay(100);
1340         preempt_enable();
1341         /* reverse the order of preempt vs irqs */
1342         local_irq_enable();
1343
1344         /*
1345          * Stop the tracer to avoid a warning subsequent
1346          * to buffer flipping failure because tracing_stop()
1347          * disables the tr and max buffers, making flipping impossible
1348          * in case of parallels max irqs/preempt off latencies.
1349          */
1350         trace->stop(tr);
1351         /* stop the tracing. */
1352         tracing_stop();
1353         /* check both trace buffers */
1354         ret = trace_test_buffer(&tr->array_buffer, NULL);
1355         if (ret)
1356                 goto out;
1357
1358         ret = trace_test_buffer(&tr->max_buffer, &count);
1359         if (ret)
1360                 goto out;
1361
1362         if (!ret && !count) {
1363                 printk(KERN_CONT ".. no entries found ..");
1364                 ret = -1;
1365                 goto out;
1366         }
1367
1368         /* do the test by disabling interrupts first this time */
1369         tr->max_latency = 0;
1370         tracing_start();
1371         trace->start(tr);
1372
1373         preempt_disable();
1374         local_irq_disable();
1375         udelay(100);
1376         preempt_enable();
1377         /* reverse the order of preempt vs irqs */
1378         local_irq_enable();
1379
1380         trace->stop(tr);
1381         /* stop the tracing. */
1382         tracing_stop();
1383         /* check both trace buffers */
1384         ret = trace_test_buffer(&tr->array_buffer, NULL);
1385         if (ret)
1386                 goto out;
1387
1388         ret = trace_test_buffer(&tr->max_buffer, &count);
1389
1390         if (!ret && !count) {
1391                 printk(KERN_CONT ".. no entries found ..");
1392                 ret = -1;
1393                 goto out;
1394         }
1395
1396 out:
1397         tracing_start();
1398 out_no_start:
1399         trace->reset(tr);
1400         tr->max_latency = save_max;
1401
1402         return ret;
1403 }
1404 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1405
1406 #ifdef CONFIG_NOP_TRACER
1407 int
1408 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1409 {
1410         /* What could possibly go wrong? */
1411         return 0;
1412 }
1413 #endif
1414
1415 #ifdef CONFIG_SCHED_TRACER
1416
1417 struct wakeup_test_data {
1418         struct completion       is_ready;
1419         int                     go;
1420 };
1421
1422 static int trace_wakeup_test_thread(void *data)
1423 {
1424         /* Make this a -deadline thread */
1425         static const struct sched_attr attr = {
1426                 .sched_policy = SCHED_DEADLINE,
1427                 .sched_runtime = 100000ULL,
1428                 .sched_deadline = 10000000ULL,
1429                 .sched_period = 10000000ULL
1430         };
1431         struct wakeup_test_data *x = data;
1432
1433         sched_setattr(current, &attr);
1434
1435         /* Make it know we have a new prio */
1436         complete(&x->is_ready);
1437
1438         /* now go to sleep and let the test wake us up */
1439         set_current_state(TASK_INTERRUPTIBLE);
1440         while (!x->go) {
1441                 schedule();
1442                 set_current_state(TASK_INTERRUPTIBLE);
1443         }
1444
1445         complete(&x->is_ready);
1446
1447         set_current_state(TASK_INTERRUPTIBLE);
1448
1449         /* we are awake, now wait to disappear */
1450         while (!kthread_should_stop()) {
1451                 schedule();
1452                 set_current_state(TASK_INTERRUPTIBLE);
1453         }
1454
1455         __set_current_state(TASK_RUNNING);
1456
1457         return 0;
1458 }
1459 int
1460 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1461 {
1462         unsigned long save_max = tr->max_latency;
1463         struct task_struct *p;
1464         struct wakeup_test_data data;
1465         unsigned long count;
1466         int ret;
1467
1468         memset(&data, 0, sizeof(data));
1469
1470         init_completion(&data.is_ready);
1471
1472         /* create a -deadline thread */
1473         p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1474         if (IS_ERR(p)) {
1475                 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1476                 return -1;
1477         }
1478
1479         /* make sure the thread is running at -deadline policy */
1480         wait_for_completion(&data.is_ready);
1481
1482         /* start the tracing */
1483         ret = tracer_init(trace, tr);
1484         if (ret) {
1485                 warn_failed_init_tracer(trace, ret);
1486                 return ret;
1487         }
1488
1489         /* reset the max latency */
1490         tr->max_latency = 0;
1491
1492         while (task_is_runnable(p)) {
1493                 /*
1494                  * Sleep to make sure the -deadline thread is asleep too.
1495                  * On virtual machines we can't rely on timings,
1496                  * but we want to make sure this test still works.
1497                  */
1498                 msleep(100);
1499         }
1500
1501         init_completion(&data.is_ready);
1502
1503         data.go = 1;
1504         /* memory barrier is in the wake_up_process() */
1505
1506         wake_up_process(p);
1507
1508         /* Wait for the task to wake up */
1509         wait_for_completion(&data.is_ready);
1510
1511         /* stop the tracing. */
1512         tracing_stop();
1513         /* check both trace buffers */
1514         ret = trace_test_buffer(&tr->array_buffer, NULL);
1515         if (!ret)
1516                 ret = trace_test_buffer(&tr->max_buffer, &count);
1517
1518
1519         trace->reset(tr);
1520         tracing_start();
1521
1522         tr->max_latency = save_max;
1523
1524         /* kill the thread */
1525         kthread_stop(p);
1526
1527         if (!ret && !count) {
1528                 printk(KERN_CONT ".. no entries found ..");
1529                 ret = -1;
1530         }
1531
1532         return ret;
1533 }
1534 #endif /* CONFIG_SCHED_TRACER */
1535
1536 #ifdef CONFIG_BRANCH_TRACER
1537 int
1538 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1539 {
1540         unsigned long count;
1541         int ret;
1542
1543         /* start the tracing */
1544         ret = tracer_init(trace, tr);
1545         if (ret) {
1546                 warn_failed_init_tracer(trace, ret);
1547                 return ret;
1548         }
1549
1550         /* Sleep for a 1/10 of a second */
1551         msleep(100);
1552         /* stop the tracing. */
1553         tracing_stop();
1554         /* check the trace buffer */
1555         ret = trace_test_buffer(&tr->array_buffer, &count);
1556         trace->reset(tr);
1557         tracing_start();
1558
1559         if (!ret && !count) {
1560                 printk(KERN_CONT ".. no entries found ..");
1561                 ret = -1;
1562         }
1563
1564         return ret;
1565 }
1566 #endif /* CONFIG_BRANCH_TRACER */
1567