3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
15 #include "trace_output.h"
17 struct fgraph_cpu_data {
24 struct fgraph_cpu_data *cpu_data;
26 /* Place to preserve last processed entry. */
27 struct ftrace_graph_ent_entry ent;
28 struct ftrace_graph_ret_entry ret;
33 #define TRACE_GRAPH_INDENT 2
36 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
37 #define TRACE_GRAPH_PRINT_CPU 0x2
38 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
39 #define TRACE_GRAPH_PRINT_PROC 0x8
40 #define TRACE_GRAPH_PRINT_DURATION 0x10
41 #define TRACE_GRAPH_PRINT_ABS_TIME 0X20
43 static struct tracer_opt trace_opts[] = {
44 /* Display overruns? (for self-debug purpose) */
45 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
47 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
48 /* Display Overhead ? */
49 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
50 /* Display proc name/pid */
51 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
52 /* Display duration of execution */
53 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
54 /* Display absolute time of an entry */
55 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
59 static struct tracer_flags tracer_flags = {
60 /* Don't display overruns and proc by default */
61 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
62 TRACE_GRAPH_PRINT_DURATION,
66 static struct trace_array *graph_array;
69 /* Add a function return address to the trace stack on thread info.*/
71 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
72 unsigned long frame_pointer)
74 unsigned long long calltime;
77 if (!current->ret_stack)
81 * We must make sure the ret_stack is tested before we read
86 /* The return trace stack is full */
87 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
88 atomic_inc(¤t->trace_overrun);
92 calltime = trace_clock_local();
94 index = ++current->curr_ret_stack;
96 current->ret_stack[index].ret = ret;
97 current->ret_stack[index].func = func;
98 current->ret_stack[index].calltime = calltime;
99 current->ret_stack[index].subtime = 0;
100 current->ret_stack[index].fp = frame_pointer;
106 /* Retrieve a function return address to the trace stack on thread info.*/
108 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
109 unsigned long frame_pointer)
113 index = current->curr_ret_stack;
115 if (unlikely(index < 0)) {
118 /* Might as well panic, otherwise we have no where to go */
119 *ret = (unsigned long)panic;
123 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
125 * The arch may choose to record the frame pointer used
126 * and check it here to make sure that it is what we expect it
127 * to be. If gcc does not set the place holder of the return
128 * address in the frame pointer, and does a copy instead, then
129 * the function graph trace will fail. This test detects this
132 * Currently, x86_32 with optimize for size (-Os) makes the latest
135 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
137 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
138 " from func %ps return to %lx\n",
139 current->ret_stack[index].fp,
141 (void *)current->ret_stack[index].func,
142 current->ret_stack[index].ret);
143 *ret = (unsigned long)panic;
148 *ret = current->ret_stack[index].ret;
149 trace->func = current->ret_stack[index].func;
150 trace->calltime = current->ret_stack[index].calltime;
151 trace->overrun = atomic_read(¤t->trace_overrun);
152 trace->depth = index;
156 * Send the trace to the ring-buffer.
157 * @return the original return address.
159 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
161 struct ftrace_graph_ret trace;
164 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
165 trace.rettime = trace_clock_local();
166 ftrace_graph_return(&trace);
168 current->curr_ret_stack--;
170 if (unlikely(!ret)) {
173 /* Might as well panic. What else to do? */
174 ret = (unsigned long)panic;
180 static int __trace_graph_entry(struct trace_array *tr,
181 struct ftrace_graph_ent *trace,
185 struct ftrace_event_call *call = &event_funcgraph_entry;
186 struct ring_buffer_event *event;
187 struct ring_buffer *buffer = tr->buffer;
188 struct ftrace_graph_ent_entry *entry;
190 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
193 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
194 sizeof(*entry), flags, pc);
197 entry = ring_buffer_event_data(event);
198 entry->graph_ent = *trace;
199 if (!filter_current_check_discard(buffer, call, entry, event))
200 ring_buffer_unlock_commit(buffer, event);
205 int trace_graph_entry(struct ftrace_graph_ent *trace)
207 struct trace_array *tr = graph_array;
208 struct trace_array_cpu *data;
215 if (!ftrace_trace_task(current))
218 /* trace it when it is-nested-in or is a function enabled. */
219 if (!(trace->depth || ftrace_graph_addr(trace->func)))
222 local_irq_save(flags);
223 cpu = raw_smp_processor_id();
224 data = tr->data[cpu];
225 disabled = atomic_inc_return(&data->disabled);
226 if (likely(disabled == 1)) {
227 pc = preempt_count();
228 ret = __trace_graph_entry(tr, trace, flags, pc);
233 atomic_dec(&data->disabled);
234 local_irq_restore(flags);
239 static void __trace_graph_return(struct trace_array *tr,
240 struct ftrace_graph_ret *trace,
244 struct ftrace_event_call *call = &event_funcgraph_exit;
245 struct ring_buffer_event *event;
246 struct ring_buffer *buffer = tr->buffer;
247 struct ftrace_graph_ret_entry *entry;
249 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
252 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
253 sizeof(*entry), flags, pc);
256 entry = ring_buffer_event_data(event);
258 if (!filter_current_check_discard(buffer, call, entry, event))
259 ring_buffer_unlock_commit(buffer, event);
262 void trace_graph_return(struct ftrace_graph_ret *trace)
264 struct trace_array *tr = graph_array;
265 struct trace_array_cpu *data;
271 local_irq_save(flags);
272 cpu = raw_smp_processor_id();
273 data = tr->data[cpu];
274 disabled = atomic_inc_return(&data->disabled);
275 if (likely(disabled == 1)) {
276 pc = preempt_count();
277 __trace_graph_return(tr, trace, flags, pc);
279 atomic_dec(&data->disabled);
280 local_irq_restore(flags);
283 void set_graph_array(struct trace_array *tr)
287 /* Make graph_array visible before we start tracing */
292 static int graph_trace_init(struct trace_array *tr)
297 ret = register_ftrace_graph(&trace_graph_return,
301 tracing_start_cmdline_record();
306 static void graph_trace_reset(struct trace_array *tr)
308 tracing_stop_cmdline_record();
309 unregister_ftrace_graph();
312 static int max_bytes_for_cpu;
314 static enum print_line_t
315 print_graph_cpu(struct trace_seq *s, int cpu)
320 * Start with a space character - to make it stand out
321 * to the right a bit when trace output is pasted into
324 ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
326 return TRACE_TYPE_PARTIAL_LINE;
328 return TRACE_TYPE_HANDLED;
331 #define TRACE_GRAPH_PROCINFO_LENGTH 14
333 static enum print_line_t
334 print_graph_proc(struct trace_seq *s, pid_t pid)
336 char comm[TASK_COMM_LEN];
337 /* sign + log10(MAX_INT) + '\0' */
344 trace_find_cmdline(pid, comm);
346 sprintf(pid_str, "%d", pid);
348 /* 1 stands for the "-" character */
349 len = strlen(comm) + strlen(pid_str) + 1;
351 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
352 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
354 /* First spaces to align center */
355 for (i = 0; i < spaces / 2; i++) {
356 ret = trace_seq_printf(s, " ");
358 return TRACE_TYPE_PARTIAL_LINE;
361 ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
363 return TRACE_TYPE_PARTIAL_LINE;
365 /* Last spaces to align center */
366 for (i = 0; i < spaces - (spaces / 2); i++) {
367 ret = trace_seq_printf(s, " ");
369 return TRACE_TYPE_PARTIAL_LINE;
371 return TRACE_TYPE_HANDLED;
375 static enum print_line_t
376 print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
378 if (!trace_seq_putc(s, ' '))
381 return trace_print_lat_fmt(s, entry);
384 /* If the pid changed since the last trace, output this event */
385 static enum print_line_t
386 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
393 return TRACE_TYPE_HANDLED;
395 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
397 if (*last_pid == pid)
398 return TRACE_TYPE_HANDLED;
400 prev_pid = *last_pid;
404 return TRACE_TYPE_HANDLED;
406 * Context-switch trace line:
408 ------------------------------------------
409 | 1) migration/0--1 => sshd-1755
410 ------------------------------------------
413 ret = trace_seq_printf(s,
414 " ------------------------------------------\n");
416 return TRACE_TYPE_PARTIAL_LINE;
418 ret = print_graph_cpu(s, cpu);
419 if (ret == TRACE_TYPE_PARTIAL_LINE)
420 return TRACE_TYPE_PARTIAL_LINE;
422 ret = print_graph_proc(s, prev_pid);
423 if (ret == TRACE_TYPE_PARTIAL_LINE)
424 return TRACE_TYPE_PARTIAL_LINE;
426 ret = trace_seq_printf(s, " => ");
428 return TRACE_TYPE_PARTIAL_LINE;
430 ret = print_graph_proc(s, pid);
431 if (ret == TRACE_TYPE_PARTIAL_LINE)
432 return TRACE_TYPE_PARTIAL_LINE;
434 ret = trace_seq_printf(s,
435 "\n ------------------------------------------\n\n");
437 return TRACE_TYPE_PARTIAL_LINE;
439 return TRACE_TYPE_HANDLED;
442 static struct ftrace_graph_ret_entry *
443 get_return_for_leaf(struct trace_iterator *iter,
444 struct ftrace_graph_ent_entry *curr)
446 struct fgraph_data *data = iter->private;
447 struct ring_buffer_iter *ring_iter = NULL;
448 struct ring_buffer_event *event;
449 struct ftrace_graph_ret_entry *next;
452 * If the previous output failed to write to the seq buffer,
453 * then we just reuse the data from before.
455 if (data && data->failed) {
460 ring_iter = iter->buffer_iter[iter->cpu];
462 /* First peek to compare current entry and the next one */
464 event = ring_buffer_iter_peek(ring_iter, NULL);
467 * We need to consume the current entry to see
470 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
471 event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
478 next = ring_buffer_event_data(event);
482 * Save current and next entries for later reference
483 * if the output fails.
490 if (next->ent.type != TRACE_GRAPH_RET)
493 if (curr->ent.pid != next->ent.pid ||
494 curr->graph_ent.func != next->ret.func)
497 /* this is a leaf, now advance the iterator */
499 ring_buffer_read(ring_iter, NULL);
504 /* Signal a overhead of time execution to the output */
506 print_graph_overhead(unsigned long long duration, struct trace_seq *s)
508 /* If duration disappear, we don't need anything */
509 if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION))
512 /* Non nested entry or return */
514 return trace_seq_printf(s, " ");
516 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
517 /* Duration exceeded 100 msecs */
518 if (duration > 100000ULL)
519 return trace_seq_printf(s, "! ");
521 /* Duration exceeded 10 msecs */
522 if (duration > 10000ULL)
523 return trace_seq_printf(s, "+ ");
526 return trace_seq_printf(s, " ");
529 static int print_graph_abs_time(u64 t, struct trace_seq *s)
531 unsigned long usecs_rem;
533 usecs_rem = do_div(t, NSEC_PER_SEC);
536 return trace_seq_printf(s, "%5lu.%06lu | ",
537 (unsigned long)t, usecs_rem);
540 static enum print_line_t
541 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
542 enum trace_type type, int cpu, pid_t pid)
545 struct trace_seq *s = &iter->seq;
547 if (addr < (unsigned long)__irqentry_text_start ||
548 addr >= (unsigned long)__irqentry_text_end)
549 return TRACE_TYPE_UNHANDLED;
552 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
553 ret = print_graph_abs_time(iter->ts, s);
555 return TRACE_TYPE_PARTIAL_LINE;
559 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
560 ret = print_graph_cpu(s, cpu);
561 if (ret == TRACE_TYPE_PARTIAL_LINE)
562 return TRACE_TYPE_PARTIAL_LINE;
566 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
567 ret = print_graph_proc(s, pid);
568 if (ret == TRACE_TYPE_PARTIAL_LINE)
569 return TRACE_TYPE_PARTIAL_LINE;
570 ret = trace_seq_printf(s, " | ");
572 return TRACE_TYPE_PARTIAL_LINE;
576 ret = print_graph_overhead(-1, s);
578 return TRACE_TYPE_PARTIAL_LINE;
580 if (type == TRACE_GRAPH_ENT)
581 ret = trace_seq_printf(s, "==========>");
583 ret = trace_seq_printf(s, "<==========");
586 return TRACE_TYPE_PARTIAL_LINE;
588 /* Don't close the duration column if haven't one */
589 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
590 trace_seq_printf(s, " |");
591 ret = trace_seq_printf(s, "\n");
594 return TRACE_TYPE_PARTIAL_LINE;
595 return TRACE_TYPE_HANDLED;
599 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
601 unsigned long nsecs_rem = do_div(duration, 1000);
602 /* log10(ULONG_MAX) + '\0' */
608 sprintf(msecs_str, "%lu", (unsigned long) duration);
611 ret = trace_seq_printf(s, "%s", msecs_str);
613 return TRACE_TYPE_PARTIAL_LINE;
615 len = strlen(msecs_str);
617 /* Print nsecs (we don't want to exceed 7 numbers) */
619 snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
620 ret = trace_seq_printf(s, ".%s", nsecs_str);
622 return TRACE_TYPE_PARTIAL_LINE;
623 len += strlen(nsecs_str);
626 ret = trace_seq_printf(s, " us ");
628 return TRACE_TYPE_PARTIAL_LINE;
630 /* Print remaining spaces to fit the row's width */
631 for (i = len; i < 7; i++) {
632 ret = trace_seq_printf(s, " ");
634 return TRACE_TYPE_PARTIAL_LINE;
636 return TRACE_TYPE_HANDLED;
639 static enum print_line_t
640 print_graph_duration(unsigned long long duration, struct trace_seq *s)
644 ret = trace_print_graph_duration(duration, s);
645 if (ret != TRACE_TYPE_HANDLED)
648 ret = trace_seq_printf(s, "| ");
650 return TRACE_TYPE_PARTIAL_LINE;
652 return TRACE_TYPE_HANDLED;
655 /* Case of a leaf function on its call entry */
656 static enum print_line_t
657 print_graph_entry_leaf(struct trace_iterator *iter,
658 struct ftrace_graph_ent_entry *entry,
659 struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
661 struct fgraph_data *data = iter->private;
662 struct ftrace_graph_ret *graph_ret;
663 struct ftrace_graph_ent *call;
664 unsigned long long duration;
668 graph_ret = &ret_entry->ret;
669 call = &entry->graph_ent;
670 duration = graph_ret->rettime - graph_ret->calltime;
674 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
677 * Comments display at + 1 to depth. Since
678 * this is a leaf function, keep the comments
679 * equal to this depth.
681 *depth = call->depth - 1;
685 ret = print_graph_overhead(duration, s);
687 return TRACE_TYPE_PARTIAL_LINE;
690 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
691 ret = print_graph_duration(duration, s);
692 if (ret == TRACE_TYPE_PARTIAL_LINE)
693 return TRACE_TYPE_PARTIAL_LINE;
697 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
698 ret = trace_seq_printf(s, " ");
700 return TRACE_TYPE_PARTIAL_LINE;
703 ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
705 return TRACE_TYPE_PARTIAL_LINE;
707 return TRACE_TYPE_HANDLED;
710 static enum print_line_t
711 print_graph_entry_nested(struct trace_iterator *iter,
712 struct ftrace_graph_ent_entry *entry,
713 struct trace_seq *s, int cpu)
715 struct ftrace_graph_ent *call = &entry->graph_ent;
716 struct fgraph_data *data = iter->private;
722 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
724 *depth = call->depth;
728 ret = print_graph_overhead(-1, s);
730 return TRACE_TYPE_PARTIAL_LINE;
733 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
734 ret = trace_seq_printf(s, " | ");
736 return TRACE_TYPE_PARTIAL_LINE;
740 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
741 ret = trace_seq_printf(s, " ");
743 return TRACE_TYPE_PARTIAL_LINE;
746 ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
748 return TRACE_TYPE_PARTIAL_LINE;
751 * we already consumed the current entry to check the next one
752 * and see if this is a leaf.
754 return TRACE_TYPE_NO_CONSUME;
757 static enum print_line_t
758 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
759 int type, unsigned long addr)
761 struct fgraph_data *data = iter->private;
762 struct trace_entry *ent = iter->ent;
767 if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
768 return TRACE_TYPE_PARTIAL_LINE;
772 ret = print_graph_irq(iter, addr, type, cpu, ent->pid);
773 if (ret == TRACE_TYPE_PARTIAL_LINE)
774 return TRACE_TYPE_PARTIAL_LINE;
778 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
779 ret = print_graph_abs_time(iter->ts, s);
781 return TRACE_TYPE_PARTIAL_LINE;
785 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
786 ret = print_graph_cpu(s, cpu);
787 if (ret == TRACE_TYPE_PARTIAL_LINE)
788 return TRACE_TYPE_PARTIAL_LINE;
792 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
793 ret = print_graph_proc(s, ent->pid);
794 if (ret == TRACE_TYPE_PARTIAL_LINE)
795 return TRACE_TYPE_PARTIAL_LINE;
797 ret = trace_seq_printf(s, " | ");
799 return TRACE_TYPE_PARTIAL_LINE;
803 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
804 ret = print_graph_lat_fmt(s, ent);
805 if (ret == TRACE_TYPE_PARTIAL_LINE)
806 return TRACE_TYPE_PARTIAL_LINE;
812 static enum print_line_t
813 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
814 struct trace_iterator *iter)
816 struct fgraph_data *data = iter->private;
817 struct ftrace_graph_ent *call = &field->graph_ent;
818 struct ftrace_graph_ret_entry *leaf_ret;
819 static enum print_line_t ret;
822 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
823 return TRACE_TYPE_PARTIAL_LINE;
825 leaf_ret = get_return_for_leaf(iter, field);
827 ret = print_graph_entry_leaf(iter, field, leaf_ret, s);
829 ret = print_graph_entry_nested(iter, field, s, cpu);
833 * If we failed to write our output, then we need to make
834 * note of it. Because we already consumed our entry.
846 static enum print_line_t
847 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
848 struct trace_entry *ent, struct trace_iterator *iter)
850 unsigned long long duration = trace->rettime - trace->calltime;
851 struct fgraph_data *data = iter->private;
852 pid_t pid = ent->pid;
859 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
862 * Comments display at + 1 to depth. This is the
863 * return from a function, we now want the comments
864 * to display at the same level of the bracket.
866 *depth = trace->depth - 1;
869 if (print_graph_prologue(iter, s, 0, 0))
870 return TRACE_TYPE_PARTIAL_LINE;
873 ret = print_graph_overhead(duration, s);
875 return TRACE_TYPE_PARTIAL_LINE;
878 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
879 ret = print_graph_duration(duration, s);
880 if (ret == TRACE_TYPE_PARTIAL_LINE)
881 return TRACE_TYPE_PARTIAL_LINE;
885 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
886 ret = trace_seq_printf(s, " ");
888 return TRACE_TYPE_PARTIAL_LINE;
891 ret = trace_seq_printf(s, "}\n");
893 return TRACE_TYPE_PARTIAL_LINE;
896 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
897 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
900 return TRACE_TYPE_PARTIAL_LINE;
903 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid);
904 if (ret == TRACE_TYPE_PARTIAL_LINE)
905 return TRACE_TYPE_PARTIAL_LINE;
907 return TRACE_TYPE_HANDLED;
910 static enum print_line_t
911 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
912 struct trace_iterator *iter)
914 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
915 struct fgraph_data *data = iter->private;
916 struct trace_event *event;
922 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
924 if (print_graph_prologue(iter, s, 0, 0))
925 return TRACE_TYPE_PARTIAL_LINE;
928 ret = print_graph_overhead(-1, s);
930 return TRACE_TYPE_PARTIAL_LINE;
933 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
934 ret = trace_seq_printf(s, " | ");
936 return TRACE_TYPE_PARTIAL_LINE;
941 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
942 ret = trace_seq_printf(s, " ");
944 return TRACE_TYPE_PARTIAL_LINE;
948 ret = trace_seq_printf(s, "/* ");
950 return TRACE_TYPE_PARTIAL_LINE;
952 switch (iter->ent->type) {
954 ret = trace_print_bprintk_msg_only(iter);
955 if (ret != TRACE_TYPE_HANDLED)
959 ret = trace_print_printk_msg_only(iter);
960 if (ret != TRACE_TYPE_HANDLED)
964 event = ftrace_find_event(ent->type);
966 return TRACE_TYPE_UNHANDLED;
968 ret = event->trace(iter, sym_flags);
969 if (ret != TRACE_TYPE_HANDLED)
973 /* Strip ending newline */
974 if (s->buffer[s->len - 1] == '\n') {
975 s->buffer[s->len - 1] = '\0';
979 ret = trace_seq_printf(s, " */\n");
981 return TRACE_TYPE_PARTIAL_LINE;
983 return TRACE_TYPE_HANDLED;
988 print_graph_function(struct trace_iterator *iter)
990 struct ftrace_graph_ent_entry *field;
991 struct fgraph_data *data = iter->private;
992 struct trace_entry *entry = iter->ent;
993 struct trace_seq *s = &iter->seq;
997 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
998 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
999 return TRACE_TYPE_HANDLED;
1003 * If the last output failed, there's a possibility we need
1004 * to print out the missing entry which would never go out.
1006 if (data && data->failed) {
1008 iter->cpu = data->cpu;
1009 ret = print_graph_entry(field, s, iter);
1010 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1011 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1012 ret = TRACE_TYPE_NO_CONSUME;
1018 switch (entry->type) {
1019 case TRACE_GRAPH_ENT: {
1021 * print_graph_entry() may consume the current event,
1022 * thus @field may become invalid, so we need to save it.
1023 * sizeof(struct ftrace_graph_ent_entry) is very small,
1024 * it can be safely saved at the stack.
1026 struct ftrace_graph_ent_entry saved;
1027 trace_assign_type(field, entry);
1029 return print_graph_entry(&saved, s, iter);
1031 case TRACE_GRAPH_RET: {
1032 struct ftrace_graph_ret_entry *field;
1033 trace_assign_type(field, entry);
1034 return print_graph_return(&field->ret, s, entry, iter);
1037 return print_graph_comment(s, entry, iter);
1040 return TRACE_TYPE_HANDLED;
1043 static void print_lat_header(struct seq_file *s)
1045 static const char spaces[] = " " /* 16 spaces */
1047 " "; /* 17 spaces */
1050 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
1052 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
1054 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
1057 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1058 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1059 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1060 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1061 seq_printf(s, "#%.*s||| / _-=> lock-depth \n", size, spaces);
1062 seq_printf(s, "#%.*s|||| / \n", size, spaces);
1065 static void print_graph_headers(struct seq_file *s)
1067 int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1070 print_lat_header(s);
1074 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
1075 seq_printf(s, " TIME ");
1076 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
1077 seq_printf(s, " CPU");
1078 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
1079 seq_printf(s, " TASK/PID ");
1081 seq_printf(s, "|||||");
1082 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
1083 seq_printf(s, " DURATION ");
1084 seq_printf(s, " FUNCTION CALLS\n");
1088 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
1089 seq_printf(s, " | ");
1090 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
1091 seq_printf(s, " | ");
1092 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
1093 seq_printf(s, " | | ");
1095 seq_printf(s, "|||||");
1096 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
1097 seq_printf(s, " | | ");
1098 seq_printf(s, " | | | |\n");
1101 static void graph_trace_open(struct trace_iterator *iter)
1103 /* pid and depth on the last trace processed */
1104 struct fgraph_data *data;
1107 iter->private = NULL;
1109 data = kzalloc(sizeof(*data), GFP_KERNEL);
1113 data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1114 if (!data->cpu_data)
1117 for_each_possible_cpu(cpu) {
1118 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1119 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1120 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1126 iter->private = data;
1133 pr_warning("function graph tracer: not enough memory\n");
1136 static void graph_trace_close(struct trace_iterator *iter)
1138 struct fgraph_data *data = iter->private;
1141 free_percpu(data->cpu_data);
1146 static struct tracer graph_trace __read_mostly = {
1147 .name = "function_graph",
1148 .open = graph_trace_open,
1149 .pipe_open = graph_trace_open,
1150 .close = graph_trace_close,
1151 .pipe_close = graph_trace_close,
1152 .wait_pipe = poll_wait_pipe,
1153 .init = graph_trace_init,
1154 .reset = graph_trace_reset,
1155 .print_line = print_graph_function,
1156 .print_header = print_graph_headers,
1157 .flags = &tracer_flags,
1158 #ifdef CONFIG_FTRACE_SELFTEST
1159 .selftest = trace_selftest_startup_function_graph,
1163 static __init int init_graph_trace(void)
1165 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1167 return register_tracer(&graph_trace);
1170 device_initcall(init_graph_trace);