Merge tag 'trace-v4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 6 Nov 2015 21:30:20 +0000 (13:30 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 6 Nov 2015 21:30:20 +0000 (13:30 -0800)
Pull tracking updates from Steven Rostedt:
 "Most of the changes are clean ups and small fixes.  Some of them have
  stable tags to them.  I searched through my INBOX just as the merge
  window opened and found lots of patches to pull.  I ran them through
  all my tests and they were in linux-next for a few days.

  Features added this release:
  ----------------------------

   - Module globbing.  You can now filter function tracing to several
     modules.  # echo '*:mod:*snd*' > set_ftrace_filter (Dmitry Safonov)

   - Tracer specific options are now visible even when the tracer is not
     active.  It was rather annoying that you can only see and modify
     tracer options after enabling the tracer.  Now they are in the
     options/ directory even when the tracer is not active.  Although
     they are still only visible when the tracer is active in the
     trace_options file.

   - Trace options are now per instance (although some of the tracer
     specific options are global)

   - New tracefs file: set_event_pid.  If any pid is added to this file,
     then all events in the instance will filter out events that are not
     part of this pid.  sched_switch and sched_wakeup events handle next
     and the wakee pids"

* tag 'trace-v4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (68 commits)
  tracefs: Fix refcount imbalance in start_creating()
  tracing: Put back comma for empty fields in boot string parsing
  tracing: Apply tracer specific options from kernel command line.
  tracing: Add some documentation about set_event_pid
  ring_buffer: Remove unneeded smp_wmb() before wakeup of reader benchmark
  tracing: Allow dumping traces without tracking trace started cpus
  ring_buffer: Fix more races when terminating the producer in the benchmark
  ring_buffer: Do no not complete benchmark reader too early
  tracing: Remove redundant TP_ARGS redefining
  tracing: Rename max_stack_lock to stack_trace_max_lock
  tracing: Allow arch-specific stack tracer
  recordmcount: arm64: Replace the ignored mcount call into nop
  recordmcount: Fix endianness handling bug for nop_mcount
  tracepoints: Fix documentation of RCU lockdep checks
  tracing: ftrace_event_is_function() can return boolean
  tracing: is_legal_op() can return boolean
  ring-buffer: rb_event_is_commit() can return boolean
  ring-buffer: rb_per_cpu_empty() can return boolean
  ring_buffer: ring_buffer_empty{cpu}() can return boolean
  ring-buffer: rb_is_reader_page() can return boolean
  ...

1  2 
kernel/trace/blktrace.c
kernel/trace/ftrace.c
kernel/trace/trace_events.c
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_stack.c

Simple merge
Simple merge
index 7ca09cdc20c2f920faa004e32eb248e3bc92bf61,bee1e153005278b63676622207b4f606e2c84f72..6bbc5f652355745d24f6252a93d7b437a0efea15
@@@ -446,6 -469,142 +469,142 @@@ static void ftrace_clear_events(struct 
        mutex_unlock(&event_mutex);
  }
  
 -event_filter_pid_sched_switch_probe_pre(void *data,
+ static int cmp_pid(const void *key, const void *elt)
+ {
+       const pid_t *search_pid = key;
+       const pid_t *pid = elt;
+       if (*search_pid == *pid)
+               return 0;
+       if (*search_pid < *pid)
+               return -1;
+       return 1;
+ }
+ static bool
+ check_ignore_pid(struct trace_pid_list *filtered_pids, struct task_struct *task)
+ {
+       pid_t search_pid;
+       pid_t *pid;
+       /*
+        * Return false, because if filtered_pids does not exist,
+        * all pids are good to trace.
+        */
+       if (!filtered_pids)
+               return false;
+       search_pid = task->pid;
+       pid = bsearch(&search_pid, filtered_pids->pids,
+                     filtered_pids->nr_pids, sizeof(pid_t),
+                     cmp_pid);
+       if (!pid)
+               return true;
+       return false;
+ }
+ static void
 -event_filter_pid_sched_switch_probe_post(void *data,
++event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
+                   struct task_struct *prev, struct task_struct *next)
+ {
+       struct trace_array *tr = data;
+       struct trace_pid_list *pid_list;
+       pid_list = rcu_dereference_sched(tr->filtered_pids);
+       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+                      check_ignore_pid(pid_list, prev) &&
+                      check_ignore_pid(pid_list, next));
+ }
+ static void
++event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
+                   struct task_struct *prev, struct task_struct *next)
+ {
+       struct trace_array *tr = data;
+       struct trace_pid_list *pid_list;
+       pid_list = rcu_dereference_sched(tr->filtered_pids);
+       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+                      check_ignore_pid(pid_list, next));
+ }
+ static void
+ event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
+ {
+       struct trace_array *tr = data;
+       struct trace_pid_list *pid_list;
+       /* Nothing to do if we are already tracing */
+       if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
+               return;
+       pid_list = rcu_dereference_sched(tr->filtered_pids);
+       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+                      check_ignore_pid(pid_list, task));
+ }
+ static void
+ event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
+ {
+       struct trace_array *tr = data;
+       struct trace_pid_list *pid_list;
+       /* Nothing to do if we are not tracing */
+       if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
+               return;
+       pid_list = rcu_dereference_sched(tr->filtered_pids);
+       /* Set tracing if current is enabled */
+       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+                      check_ignore_pid(pid_list, current));
+ }
+ static void __ftrace_clear_event_pids(struct trace_array *tr)
+ {
+       struct trace_pid_list *pid_list;
+       struct trace_event_file *file;
+       int cpu;
+       pid_list = rcu_dereference_protected(tr->filtered_pids,
+                                            lockdep_is_held(&event_mutex));
+       if (!pid_list)
+               return;
+       unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
+       unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
+       unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
+       unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
+       list_for_each_entry(file, &tr->events, list) {
+               clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
+       }
+       for_each_possible_cpu(cpu)
+               per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
+       rcu_assign_pointer(tr->filtered_pids, NULL);
+       /* Wait till all users are no longer using pid filtering */
+       synchronize_sched();
+       free_pages((unsigned long)pid_list->pids, pid_list->order);
+       kfree(pid_list);
+ }
+ static void ftrace_clear_event_pids(struct trace_array *tr)
+ {
+       mutex_lock(&event_mutex);
+       __ftrace_clear_event_pids(tr);
+       mutex_unlock(&event_mutex);
+ }
  static void __put_system(struct event_subsystem *system)
  {
        struct event_filter *filter = system->filter;
Simple merge
index 8abf1ba18085742af78176dbc514095a47643c9c,0bd212af406c49fc64ad308112e9ad7fab1eb4ff..dda9e6742950305f36fbe920f9fe0c6f68d83fbf
@@@ -85,19 -91,9 +91,19 @@@ check_stack(unsigned long ip, unsigned 
        if (!object_is_on_stack(stack))
                return;
  
 +      /* Can't do this from NMI context (can cause deadlocks) */
 +      if (in_nmi())
 +              return;
 +
        local_irq_save(flags);
-       arch_spin_lock(&max_stack_lock);
+       arch_spin_lock(&stack_trace_max_lock);
  
 +      /*
 +       * RCU may not be watching, make it see us.
 +       * The stack trace code uses rcu_sched.
 +       */
 +      rcu_irq_enter();
 +
        /* In case another CPU set the tracer_frame on us */
        if (unlikely(!frame_size))
                this_size -= tracer_frame;
        }
  
   out:
-       arch_spin_unlock(&max_stack_lock);
 +      rcu_irq_exit();
+       arch_spin_unlock(&stack_trace_max_lock);
        local_irq_restore(flags);
  }