Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 13 Mar 2010 22:40:50 +0000 (14:40 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 13 Mar 2010 22:40:50 +0000 (14:40 -0800)
* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  tracing: Do not record user stack trace from NMI context
  tracing: Disable buffer switching when starting or stopping trace
  tracing: Use same local variable when resetting the ring buffer
  function-graph: Init curr_ret_stack with ret_stack
  ring-buffer: Move disabled check into preempt disable section
  function-graph: Add tracing_thresh support to function_graph tracer
  tracing: Update the comm field in the right variable in update_max_tr
  function-graph: Use comment notation for func names of dangling '}'
  function-graph: Fix unused reference to ftrace_set_func()
  tracing: Fix warning in s_next of trace file ops
  tracing: Include irqflags headers from trace clock

1  2 
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_functions_graph.c

index a2f0fe9518318fd7813f47e05dab8a5bacb2ed50,54191d6ed195961cc6a7e590dc20aabc83335cdc..05a9f83b8819c39dcc03c76f737eaaf5ae7bbfe7
@@@ -20,7 -20,6 +20,7 @@@
  #include <linux/cpu.h>
  #include <linux/fs.h>
  
 +#include <asm/local.h>
  #include "trace.h"
  
  /*
@@@ -2233,12 -2232,12 +2233,12 @@@ ring_buffer_lock_reserve(struct ring_bu
        if (ring_buffer_flags != RB_BUFFERS_ON)
                return NULL;
  
-       if (atomic_read(&buffer->record_disabled))
-               return NULL;
        /* If we are tracing schedule, we don't want to recurse */
        resched = ftrace_preempt_disable();
  
+       if (atomic_read(&buffer->record_disabled))
+               goto out_nocheck;
        if (trace_recursive_lock())
                goto out_nocheck;
  
@@@ -2470,11 -2469,11 +2470,11 @@@ int ring_buffer_write(struct ring_buffe
        if (ring_buffer_flags != RB_BUFFERS_ON)
                return -EBUSY;
  
-       if (atomic_read(&buffer->record_disabled))
-               return -EBUSY;
        resched = ftrace_preempt_disable();
  
+       if (atomic_read(&buffer->record_disabled))
+               goto out;
        cpu = raw_smp_processor_id();
  
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
@@@ -2542,7 -2541,7 +2542,7 @@@ EXPORT_SYMBOL_GPL(ring_buffer_record_di
   * @buffer: The ring buffer to enable writes
   *
   * Note, multiple disables will need the same number of enables
 - * to truely enable the writing (much like preempt_disable).
 + * to truly enable the writing (much like preempt_disable).
   */
  void ring_buffer_record_enable(struct ring_buffer *buffer)
  {
@@@ -2578,7 -2577,7 +2578,7 @@@ EXPORT_SYMBOL_GPL(ring_buffer_record_di
   * @cpu: The CPU to enable.
   *
   * Note, multiple disables will need the same number of enables
 - * to truely enable the writing (much like preempt_disable).
 + * to truly enable the writing (much like preempt_disable).
   */
  void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
  {
diff --combined kernel/trace/trace.c
index ed01fdba4a559157c9a14687a29c5abfa89249ca,e52683f7c3b271b6936a102f1bedfbfde312f6ad..3ec2ee6f65602fde8cf701432a18b8c7b2fef11e
@@@ -92,12 -92,12 +92,12 @@@ DEFINE_PER_CPU(int, ftrace_cpu_disabled
  static inline void ftrace_disable_cpu(void)
  {
        preempt_disable();
 -      __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled));
 +      __this_cpu_inc(ftrace_cpu_disabled);
  }
  
  static inline void ftrace_enable_cpu(void)
  {
 -      __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled));
 +      __this_cpu_dec(ftrace_cpu_disabled);
        preempt_enable();
  }
  
@@@ -374,6 -374,21 +374,21 @@@ static int __init set_buf_size(char *st
  }
  __setup("trace_buf_size=", set_buf_size);
  
+ static int __init set_tracing_thresh(char *str)
+ {
+       unsigned long threshhold;
+       int ret;
+       if (!str)
+               return 0;
+       ret = strict_strtoul(str, 0, &threshhold);
+       if (ret < 0)
+               return 0;
+       tracing_thresh = threshhold * 1000;
+       return 1;
+ }
+ __setup("tracing_thresh=", set_tracing_thresh);
  unsigned long nsecs_to_usecs(unsigned long nsecs)
  {
        return nsecs / 1000;
@@@ -579,9 -594,10 +594,10 @@@ static ssize_t trace_seq_to_buffer(stru
  static arch_spinlock_t ftrace_max_lock =
        (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  
+ unsigned long __read_mostly   tracing_thresh;
  #ifdef CONFIG_TRACER_MAX_TRACE
  unsigned long __read_mostly   tracing_max_latency;
- unsigned long __read_mostly   tracing_thresh;
  
  /*
   * Copy the new maximum trace into the separate maximum-trace
@@@ -592,7 -608,7 +608,7 @@@ static voi
  __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
  {
        struct trace_array_cpu *data = tr->data[cpu];
-       struct trace_array_cpu *max_data = tr->data[cpu];
+       struct trace_array_cpu *max_data;
  
        max_tr.cpu = cpu;
        max_tr.time_start = data->preempt_timestamp;
        max_data->critical_start = data->critical_start;
        max_data->critical_end = data->critical_end;
  
-       memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
+       memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
        max_data->pid = tsk->pid;
        max_data->uid = task_uid(tsk);
        max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
@@@ -824,10 -840,10 +840,10 @@@ out
        mutex_unlock(&trace_types_lock);
  }
  
- static void __tracing_reset(struct trace_array *tr, int cpu)
+ static void __tracing_reset(struct ring_buffer *buffer, int cpu)
  {
        ftrace_disable_cpu();
-       ring_buffer_reset_cpu(tr->buffer, cpu);
+       ring_buffer_reset_cpu(buffer, cpu);
        ftrace_enable_cpu();
  }
  
@@@ -839,7 -855,7 +855,7 @@@ void tracing_reset(struct trace_array *
  
        /* Make sure all commits have finished */
        synchronize_sched();
-       __tracing_reset(tr, cpu);
+       __tracing_reset(buffer, cpu);
  
        ring_buffer_record_enable(buffer);
  }
@@@ -857,7 -873,7 +873,7 @@@ void tracing_reset_online_cpus(struct t
        tr->time_start = ftrace_now(tr->cpu);
  
        for_each_online_cpu(cpu)
-               __tracing_reset(tr, cpu);
+               __tracing_reset(buffer, cpu);
  
        ring_buffer_record_enable(buffer);
  }
@@@ -934,6 -950,8 +950,8 @@@ void tracing_start(void
                goto out;
        }
  
+       /* Prevent the buffers from switching */
+       arch_spin_lock(&ftrace_max_lock);
  
        buffer = global_trace.buffer;
        if (buffer)
        if (buffer)
                ring_buffer_record_enable(buffer);
  
+       arch_spin_unlock(&ftrace_max_lock);
        ftrace_start();
   out:
        spin_unlock_irqrestore(&tracing_start_lock, flags);
@@@ -964,6 -984,9 +984,9 @@@ void tracing_stop(void
        if (trace_stop_count++)
                goto out;
  
+       /* Prevent the buffers from switching */
+       arch_spin_lock(&ftrace_max_lock);
        buffer = global_trace.buffer;
        if (buffer)
                ring_buffer_record_disable(buffer);
        if (buffer)
                ring_buffer_record_disable(buffer);
  
+       arch_spin_unlock(&ftrace_max_lock);
   out:
        spin_unlock_irqrestore(&tracing_start_lock, flags);
  }
@@@ -1166,7 -1191,7 +1191,7 @@@ trace_function(struct trace_array *tr
        struct ftrace_entry *entry;
  
        /* If we are reading the ring buffer, don't trace */
 -      if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
 +      if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
                return;
  
        event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
@@@ -1259,6 -1284,13 +1284,13 @@@ ftrace_trace_userstack(struct ring_buff
        if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
                return;
  
+       /*
+        * NMIs can not handle page faults, even with fix ups.
+        * The save user stack can (and often does) fault.
+        */
+       if (unlikely(in_nmi()))
+               return;
        event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
                                          sizeof(*entry), flags, pc);
        if (!event)
@@@ -1703,6 -1735,7 +1735,7 @@@ static void *s_start(struct seq_file *m
  
                ftrace_enable_cpu();
  
+               iter->leftover = 0;
                for (p = iter; p && l < *pos; p = s_next(m, p, &l))
                        ;
  
@@@ -4248,10 -4281,10 +4281,10 @@@ static __init int tracer_init_debugfs(v
  #ifdef CONFIG_TRACER_MAX_TRACE
        trace_create_file("tracing_max_latency", 0644, d_tracer,
                        &tracing_max_latency, &tracing_max_lat_fops);
+ #endif
  
        trace_create_file("tracing_thresh", 0644, d_tracer,
                        &tracing_thresh, &tracing_max_lat_fops);
- #endif
  
        trace_create_file("README", 0444, d_tracer,
                        NULL, &tracing_readme_fops);
diff --combined kernel/trace/trace.h
index 09b39112a5e2ecbf04942760719891f2db289fd5,1bc8cd1431d772e6a25d7cc26f55070b16836d23..2825ef2c0b155ae1968997e3efd5e4fd8fdd8570
@@@ -396,9 -396,10 +396,10 @@@ extern int process_new_ksym_entry(char 
  
  extern unsigned long nsecs_to_usecs(unsigned long nsecs);
  
+ extern unsigned long tracing_thresh;
  #ifdef CONFIG_TRACER_MAX_TRACE
  extern unsigned long tracing_max_latency;
- extern unsigned long tracing_thresh;
  
  void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
  void update_max_tr_single(struct trace_array *tr,
@@@ -550,7 -551,7 +551,7 @@@ static inline int ftrace_trace_task(str
   * struct trace_parser - servers for reading the user input separated by spaces
   * @cont: set if the input is not complete - no final space char was found
   * @buffer: holds the parsed user input
 - * @idx: user input lenght
 + * @idx: user input length
   * @size: buffer size
   */
  struct trace_parser {
index 3fc2a575664fbc23f8fb40de0ecbd1ea0b7d2315,e9df04b60267678a932f1b1cbe6b145d5c989196..e6989d9b44dae8a23a60bcdb669f226329ef1025
@@@ -188,7 -188,7 +188,7 @@@ static int __trace_graph_entry(struct t
        struct ring_buffer *buffer = tr->buffer;
        struct ftrace_graph_ent_entry *entry;
  
 -      if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
 +      if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
                return 0;
  
        event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@@ -237,6 -237,14 +237,14 @@@ int trace_graph_entry(struct ftrace_gra
        return ret;
  }
  
+ int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
+ {
+       if (tracing_thresh)
+               return 1;
+       else
+               return trace_graph_entry(trace);
+ }
  static void __trace_graph_return(struct trace_array *tr,
                                struct ftrace_graph_ret *trace,
                                unsigned long flags,
        struct ring_buffer *buffer = tr->buffer;
        struct ftrace_graph_ret_entry *entry;
  
 -      if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
 +      if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
                return;
  
        event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
@@@ -290,13 -298,26 +298,26 @@@ void set_graph_array(struct trace_arra
        smp_mb();
  }
  
+ void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
+ {
+       if (tracing_thresh &&
+           (trace->rettime - trace->calltime < tracing_thresh))
+               return;
+       else
+               trace_graph_return(trace);
+ }
  static int graph_trace_init(struct trace_array *tr)
  {
        int ret;
  
        set_graph_array(tr);
-       ret = register_ftrace_graph(&trace_graph_return,
-                                   &trace_graph_entry);
+       if (tracing_thresh)
+               ret = register_ftrace_graph(&trace_graph_thresh_return,
+                                           &trace_graph_thresh_entry);
+       else
+               ret = register_ftrace_graph(&trace_graph_return,
+                                           &trace_graph_entry);
        if (ret)
                return ret;
        tracing_start_cmdline_record();
@@@ -920,7 -941,7 +941,7 @@@ print_graph_return(struct ftrace_graph_
                if (!ret)
                        return TRACE_TYPE_PARTIAL_LINE;
        } else {
-               ret = trace_seq_printf(s, "} (%ps)\n", (void *)trace->func);
+               ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
                if (!ret)
                        return TRACE_TYPE_PARTIAL_LINE;
        }