ftrace: tracer for scheduler wakeup latency
authorSteven Rostedt <srostedt@redhat.com>
Mon, 12 May 2008 19:20:42 +0000 (21:20 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 23 May 2008 18:32:36 +0000 (20:32 +0200)
This patch adds the tracer that tracks the wakeup latency of the
highest priority waking task.

  "wakeup" is added to /debugfs/tracing/available_tracers

Also added to /debugfs/tracing

  tracing_max_latency
     holds the current max latency for the wakeup

  wakeup_thresh
     if set to other than zero, a log will be recorded
     for every wakeup that takes longer than the number
     entered in here (usecs for all counters)
     (deletes previous trace)

Examples:

  (with ftrace_enabled = 0)

============
preemption latency trace v1.1.5 on 2.6.24-rc8
Signed-off-by: Ingo Molnar <mingo@elte.hu>
--------------------------------------------------------------------
 latency: 26 us, #2/2, CPU#1 | (M:rt VP:0, KP:0, SP:0 HP:0 #P:2)
    -----------------
    | task: migration/0-3 (uid:0 nice:-5 policy:1 rt_prio:99)
    -----------------

                 _------=> CPU#
                / _-----=> irqs-off
               | / _----=> need-resched
               || / _---=> hardirq/softirq
               ||| / _--=> preempt-depth
               |||| /
               |||||     delay
   cmd     pid ||||| time  |   caller
      \   /    |||||   \   |   /
   quilt-8551  0d..3    0us+: wake_up_process+0x15/0x17 <ffffffff80233e80> (sched_exec+0xc9/0x100 <ffffffff80235343>)
   quilt-8551  0d..4   26us : sched_switch_callback+0x73/0x81 <ffffffff80338d2f> (schedule+0x483/0x6d5 <ffffffff8048b3ee>)

vim:ft=help
============

  (with ftrace_enabled = 1)

============
preemption latency trace v1.1.5 on 2.6.24-rc8
--------------------------------------------------------------------
 latency: 36 us, #45/45, CPU#0 | (M:rt VP:0, KP:0, SP:0 HP:0 #P:2)
    -----------------
    | task: migration/1-5 (uid:0 nice:-5 policy:1 rt_prio:99)
    -----------------

                 _------=> CPU#
                / _-----=> irqs-off
               | / _----=> need-resched
               || / _---=> hardirq/softirq
               ||| / _--=> preempt-depth
               |||| /
               |||||     delay
   cmd     pid ||||| time  |   caller
      \   /    |||||   \   |   /
    bash-10653 1d..3    0us : wake_up_process+0x15/0x17 <ffffffff80233e80> (sched_exec+0xc9/0x100 <ffffffff80235343>)
    bash-10653 1d..3    1us : try_to_wake_up+0x271/0x2e7 <ffffffff80233dcf> (sub_preempt_count+0xc/0x7a <ffffffff8023309e>)
    bash-10653 1d..2    2us : try_to_wake_up+0x296/0x2e7 <ffffffff80233df4> (update_rq_clock+0x9/0x20 <ffffffff802303f3>)
    bash-10653 1d..2    2us : update_rq_clock+0x1e/0x20 <ffffffff80230408> (__update_rq_clock+0xc/0x90 <ffffffff80230366>)
    bash-10653 1d..2    3us : __update_rq_clock+0x1b/0x90 <ffffffff80230375> (sched_clock+0x9/0x29 <ffffffff80214529>)
    bash-10653 1d..2    4us : try_to_wake_up+0x2a6/0x2e7 <ffffffff80233e04> (activate_task+0xc/0x3f <ffffffff8022ffca>)
    bash-10653 1d..2    4us : activate_task+0x2d/0x3f <ffffffff8022ffeb> (enqueue_task+0xe/0x66 <ffffffff8022ff66>)
    bash-10653 1d..2    5us : enqueue_task+0x5b/0x66 <ffffffff8022ffb3> (enqueue_task_rt+0x9/0x3c <ffffffff80233351>)
    bash-10653 1d..2    6us : try_to_wake_up+0x2ba/0x2e7 <ffffffff80233e18> (check_preempt_wakeup+0x12/0x99 <ffffffff80234f84>)
[...]
    bash-10653 1d..5   33us : tracing_record_cmdline+0xcf/0xd4 <ffffffff80338aad> (_spin_unlock+0x9/0x33 <ffffffff8048d3ec>)
    bash-10653 1d..5   34us : _spin_unlock+0x19/0x33 <ffffffff8048d3fc> (sub_preempt_count+0xc/0x7a <ffffffff8023309e>)
    bash-10653 1d..4   35us : wakeup_sched_switch+0x65/0x2ff <ffffffff80339f66> (_spin_lock_irqsave+0xc/0xa9 <ffffffff8048d08b>)
    bash-10653 1d..4   35us : _spin_lock_irqsave+0x19/0xa9 <ffffffff8048d098> (add_preempt_count+0xe/0x77 <ffffffff8023311a>)
    bash-10653 1d..4   36us : sched_switch_callback+0x73/0x81 <ffffffff80338d2f> (schedule+0x483/0x6d5 <ffffffff8048b3ee>)

vim:ft=help
============

The [...] was added here to not waste your email box space.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/ftrace.h
kernel/trace/Kconfig
kernel/trace/Makefile
kernel/trace/trace_sched_wakeup.c [new file with mode: 0644]

index b96ef14c249a95638d77059bcfaa396be00622a6..db8a5e7abe4119f13ab5752f93b00f18ca48584f 100644 (file)
@@ -5,10 +5,6 @@
 
 #include <linux/linkage.h>
 
-#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-#define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
-#define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
-
 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
 
 struct ftrace_ops {
@@ -35,4 +31,23 @@ extern void mcount(void);
 # define unregister_ftrace_function(ops) do { } while (0)
 # define clear_ftrace_function(ops) do { } while (0)
 #endif /* CONFIG_FTRACE */
+
+
+#ifdef CONFIG_FRAME_POINTER
+/* TODO: need to fix this for ARM */
+# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
+# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
+# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
+# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
+# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
+#else
+# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+# define CALLER_ADDR1 0UL
+# define CALLER_ADDR2 0UL
+# define CALLER_ADDR3 0UL
+# define CALLER_ADDR4 0UL
+# define CALLER_ADDR5 0UL
+#endif
+
 #endif /* _LINUX_FTRACE_H */
index 5d6aa92866cd67bc4ecc226af668969d3963bc99..892ecc94a82b62f2282523bfbf989c4e6d8f170b 100644 (file)
@@ -4,6 +4,9 @@
 config HAVE_FTRACE
        bool
 
+config TRACER_MAX_TRACE
+       bool
+
 config TRACING
        bool
        select DEBUG_FS
@@ -23,6 +26,16 @@ config FTRACE
          (the bootup default), then the overhead of the instructions is very
          small and not measurable even in micro-benchmarks.
 
+config SCHED_TRACER
+       bool "Scheduling Latency Tracer"
+       depends on DEBUG_KERNEL
+       select TRACING
+       select CONTEXT_SWITCH_TRACER
+       select TRACER_MAX_TRACE
+       help
+         This tracer tracks the latency of the highest priority task
+         to be scheduled in, starting from the point it has woken up.
+
 config CONTEXT_SWITCH_TRACER
        bool "Trace process context switches"
        depends on DEBUG_KERNEL
index 6b54ceb7f16e89cc9e8a43dd4851662df8ef9679..5508cdb19aea7595c9792f0691046e3203eaf9bc 100644 (file)
@@ -3,5 +3,6 @@ obj-$(CONFIG_FTRACE) += libftrace.o
 obj-$(CONFIG_TRACING) += trace.o
 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
 obj-$(CONFIG_FTRACE) += trace_functions.o
+obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
 
 libftrace-y := ftrace.o
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
new file mode 100644 (file)
index 0000000..7c3ccef
--- /dev/null
@@ -0,0 +1,310 @@
+/*
+ * trace task wakeup timings
+ *
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
+ *
+ * Based on code from the latency_tracer, that is:
+ *
+ *  Copyright (C) 2004-2006 Ingo Molnar
+ *  Copyright (C) 2004 William Lee Irwin III
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/kallsyms.h>
+#include <linux/uaccess.h>
+#include <linux/ftrace.h>
+
+#include "trace.h"
+
+static struct trace_array      *wakeup_trace;
+static int __read_mostly       tracer_enabled;
+
+static struct task_struct      *wakeup_task;
+static int                     wakeup_cpu;
+static unsigned                        wakeup_prio = -1;
+
+static DEFINE_SPINLOCK(wakeup_lock);
+
+static void notrace __wakeup_reset(struct trace_array *tr);
+
+/*
+ * Should this new latency be reported/recorded?
+ */
+static int notrace report_latency(cycle_t delta)
+{
+       if (tracing_thresh) {
+               if (delta < tracing_thresh)
+                       return 0;
+       } else {
+               if (delta <= tracing_max_latency)
+                       return 0;
+       }
+       return 1;
+}
+
+void notrace
+wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
+{
+       unsigned long latency = 0, t0 = 0, t1 = 0;
+       struct trace_array *tr = wakeup_trace;
+       struct trace_array_cpu *data;
+       cycle_t T0, T1, delta;
+       unsigned long flags;
+       long disabled;
+       int cpu;
+
+       if (unlikely(!tracer_enabled))
+               return;
+
+       /*
+        * When we start a new trace, we set wakeup_task to NULL
+        * and then set tracer_enabled = 1. We want to make sure
+        * that another CPU does not see the tracer_enabled = 1
+        * and the wakeup_task with an older task, that might
+        * actually be the same as next.
+        */
+       smp_rmb();
+
+       if (next != wakeup_task)
+               return;
+
+       /* The task we are waitng for is waking up */
+       data = tr->data[wakeup_cpu];
+
+       /* disable local data, not wakeup_cpu data */
+       cpu = raw_smp_processor_id();
+       disabled = atomic_inc_return(&tr->data[cpu]->disabled);
+       if (likely(disabled != 1))
+               goto out;
+
+       spin_lock_irqsave(&wakeup_lock, flags);
+
+       /* We could race with grabbing wakeup_lock */
+       if (unlikely(!tracer_enabled || next != wakeup_task))
+               goto out_unlock;
+
+       ftrace(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags);
+
+       /*
+        * usecs conversion is slow so we try to delay the conversion
+        * as long as possible:
+        */
+       T0 = data->preempt_timestamp;
+       T1 = now(cpu);
+       delta = T1-T0;
+
+       if (!report_latency(delta))
+               goto out_unlock;
+
+       latency = nsecs_to_usecs(delta);
+
+       tracing_max_latency = delta;
+       t0 = nsecs_to_usecs(T0);
+       t1 = nsecs_to_usecs(T1);
+
+       update_max_tr(tr, wakeup_task, wakeup_cpu);
+
+       if (tracing_thresh) {
+               printk(KERN_INFO "(%16s-%-5d|#%d): %lu us wakeup latency "
+                      "violates %lu us threshold.\n"
+                      " => started at timestamp %lu: ",
+                               wakeup_task->comm, wakeup_task->pid,
+                               raw_smp_processor_id(),
+                               latency, nsecs_to_usecs(tracing_thresh), t0);
+       } else {
+               printk(KERN_INFO "(%16s-%-5d|#%d): new %lu us maximum "
+                      "wakeup latency.\n => started at timestamp %lu: ",
+                               wakeup_task->comm, wakeup_task->pid,
+                               cpu, latency, t0);
+       }
+
+       printk(KERN_CONT "   ended at timestamp %lu: ", t1);
+       dump_stack();
+       t1 = nsecs_to_usecs(now(cpu));
+       printk(KERN_CONT "   dump-end timestamp %lu\n\n", t1);
+
+out_unlock:
+       __wakeup_reset(tr);
+       spin_unlock_irqrestore(&wakeup_lock, flags);
+out:
+       atomic_dec(&tr->data[cpu]->disabled);
+}
+
+static void notrace __wakeup_reset(struct trace_array *tr)
+{
+       struct trace_array_cpu *data;
+       int cpu;
+
+       assert_spin_locked(&wakeup_lock);
+
+       for_each_possible_cpu(cpu) {
+               data = tr->data[cpu];
+               tracing_reset(data);
+       }
+
+       wakeup_cpu = -1;
+       wakeup_prio = -1;
+
+       if (wakeup_task)
+               put_task_struct(wakeup_task);
+
+       wakeup_task = NULL;
+}
+
+static void notrace wakeup_reset(struct trace_array *tr)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&wakeup_lock, flags);
+       __wakeup_reset(tr);
+       spin_unlock_irqrestore(&wakeup_lock, flags);
+}
+
+static notrace void
+wakeup_check_start(struct trace_array *tr, struct task_struct *p,
+                  struct task_struct *curr)
+{
+       int cpu = smp_processor_id();
+       unsigned long flags;
+       long disabled;
+
+       if (likely(!rt_task(p)) ||
+                       p->prio >= wakeup_prio ||
+                       p->prio >= curr->prio)
+               return;
+
+       disabled = atomic_inc_return(&tr->data[cpu]->disabled);
+       if (unlikely(disabled != 1))
+               goto out;
+
+       /* interrupts should be off from try_to_wake_up */
+       spin_lock(&wakeup_lock);
+
+       /* check for races. */
+       if (!tracer_enabled || p->prio >= wakeup_prio)
+               goto out_locked;
+
+       /* reset the trace */
+       __wakeup_reset(tr);
+
+       wakeup_cpu = task_cpu(p);
+       wakeup_prio = p->prio;
+
+       wakeup_task = p;
+       get_task_struct(wakeup_task);
+
+       local_save_flags(flags);
+
+       tr->data[wakeup_cpu]->preempt_timestamp = now(cpu);
+       ftrace(tr, tr->data[wakeup_cpu], CALLER_ADDR1, CALLER_ADDR2, flags);
+
+out_locked:
+       spin_unlock(&wakeup_lock);
+out:
+       atomic_dec(&tr->data[cpu]->disabled);
+}
+
+notrace void
+ftrace_wake_up_task(struct task_struct *wakee, struct task_struct *curr)
+{
+       if (likely(!tracer_enabled))
+               return;
+
+       wakeup_check_start(wakeup_trace, wakee, curr);
+}
+
+notrace void
+ftrace_wake_up_new_task(struct task_struct *wakee, struct task_struct *curr)
+{
+       if (likely(!tracer_enabled))
+               return;
+
+       wakeup_check_start(wakeup_trace, wakee, curr);
+}
+
+static notrace void start_wakeup_tracer(struct trace_array *tr)
+{
+       wakeup_reset(tr);
+
+       /*
+        * Don't let the tracer_enabled = 1 show up before
+        * the wakeup_task is reset. This may be overkill since
+        * wakeup_reset does a spin_unlock after setting the
+        * wakeup_task to NULL, but I want to be safe.
+        * This is a slow path anyway.
+        */
+       smp_wmb();
+
+       tracer_enabled = 1;
+
+       return;
+}
+
+static notrace void stop_wakeup_tracer(struct trace_array *tr)
+{
+       tracer_enabled = 0;
+}
+
+static notrace void wakeup_tracer_init(struct trace_array *tr)
+{
+       wakeup_trace = tr;
+
+       if (tr->ctrl)
+               start_wakeup_tracer(tr);
+}
+
+static notrace void wakeup_tracer_reset(struct trace_array *tr)
+{
+       if (tr->ctrl) {
+               stop_wakeup_tracer(tr);
+               /* make sure we put back any tasks we are tracing */
+               wakeup_reset(tr);
+       }
+}
+
+static void wakeup_tracer_ctrl_update(struct trace_array *tr)
+{
+       if (tr->ctrl)
+               start_wakeup_tracer(tr);
+       else
+               stop_wakeup_tracer(tr);
+}
+
+static void notrace wakeup_tracer_open(struct trace_iterator *iter)
+{
+       /* stop the trace while dumping */
+       if (iter->tr->ctrl)
+               stop_wakeup_tracer(iter->tr);
+}
+
+static void notrace wakeup_tracer_close(struct trace_iterator *iter)
+{
+       /* forget about any processes we were recording */
+       if (iter->tr->ctrl)
+               start_wakeup_tracer(iter->tr);
+}
+
+static struct tracer wakeup_tracer __read_mostly =
+{
+       .name           = "wakeup",
+       .init           = wakeup_tracer_init,
+       .reset          = wakeup_tracer_reset,
+       .open           = wakeup_tracer_open,
+       .close          = wakeup_tracer_close,
+       .ctrl_update    = wakeup_tracer_ctrl_update,
+       .print_max      = 1,
+};
+
+__init static int init_wakeup_tracer(void)
+{
+       int ret;
+
+       ret = register_tracer(&wakeup_tracer);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+device_initcall(init_wakeup_tracer);