Merge tag 'trace-v4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 6 Nov 2015 21:30:20 +0000 (13:30 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 6 Nov 2015 21:30:20 +0000 (13:30 -0800)
Pull tracking updates from Steven Rostedt:
 "Most of the changes are clean ups and small fixes.  Some of them have
  stable tags to them.  I searched through my INBOX just as the merge
  window opened and found lots of patches to pull.  I ran them through
  all my tests and they were in linux-next for a few days.

  Features added this release:
  ----------------------------

   - Module globbing.  You can now filter function tracing to several
     modules.  # echo '*:mod:*snd*' > set_ftrace_filter (Dmitry Safonov)

   - Tracer specific options are now visible even when the tracer is not
     active.  It was rather annoying that you can only see and modify
     tracer options after enabling the tracer.  Now they are in the
     options/ directory even when the tracer is not active.  Although
     they are still only visible when the tracer is active in the
     trace_options file.

   - Trace options are now per instance (although some of the tracer
     specific options are global)

   - New tracefs file: set_event_pid.  If any pid is added to this file,
     then all events in the instance will filter out events that are not
     part of this pid.  sched_switch and sched_wakeup events handle next
     and the wakee pids"

* tag 'trace-v4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (68 commits)
  tracefs: Fix refcount imbalance in start_creating()
  tracing: Put back comma for empty fields in boot string parsing
  tracing: Apply tracer specific options from kernel command line.
  tracing: Add some documentation about set_event_pid
  ring_buffer: Remove unneeded smp_wmb() before wakeup of reader benchmark
  tracing: Allow dumping traces without tracking trace started cpus
  ring_buffer: Fix more races when terminating the producer in the benchmark
  ring_buffer: Do no not complete benchmark reader too early
  tracing: Remove redundant TP_ARGS redefining
  tracing: Rename max_stack_lock to stack_trace_max_lock
  tracing: Allow arch-specific stack tracer
  recordmcount: arm64: Replace the ignored mcount call into nop
  recordmcount: Fix endianness handling bug for nop_mcount
  tracepoints: Fix documentation of RCU lockdep checks
  tracing: ftrace_event_is_function() can return boolean
  tracing: is_legal_op() can return boolean
  ring-buffer: rb_event_is_commit() can return boolean
  ring-buffer: rb_per_cpu_empty() can return boolean
  ring_buffer: ring_buffer_empty{cpu}() can return boolean
  ring-buffer: rb_is_reader_page() can return boolean
  ...

1  2 
kernel/trace/blktrace.c
kernel/trace/ftrace.c
kernel/trace/trace_events.c
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_stack.c

diff --combined kernel/trace/blktrace.c
index e3a26188b95e6fbc863a98e4d1df25df5b5ddaee,b2fcf472774e7d2487eb79f1b46013412eb6b434..a990824c86044779c089156daea170412cc84e8e
@@@ -103,7 -103,7 +103,7 @@@ record_it
                memcpy((void *) t + sizeof(*t), data, len);
  
                if (blk_tracer)
-                       trace_buffer_unlock_commit(buffer, event, 0, pc);
+                       trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
        }
  }
  
@@@ -278,7 -278,7 +278,7 @@@ record_it
                        memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
  
                if (blk_tracer) {
-                       trace_buffer_unlock_commit(buffer, event, 0, pc);
+                       trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
                        return;
                }
        }
@@@ -437,7 -437,7 +437,7 @@@ int do_blk_trace_setup(struct request_q
                       struct block_device *bdev,
                       struct blk_user_trace_setup *buts)
  {
 -      struct blk_trace *old_bt, *bt = NULL;
 +      struct blk_trace *bt = NULL;
        struct dentry *dir = NULL;
        int ret;
  
        bt->trace_state = Blktrace_setup;
  
        ret = -EBUSY;
 -      old_bt = xchg(&q->blk_trace, bt);
 -      if (old_bt) {
 -              (void) xchg(&q->blk_trace, old_bt);
 +      if (cmpxchg(&q->blk_trace, NULL, bt))
                goto err;
 -      }
  
        if (atomic_inc_return(&blk_probes_ref) == 1)
                blk_register_tracepoints();
@@@ -1340,6 -1343,7 +1340,7 @@@ static const struct 
  static enum print_line_t print_one_line(struct trace_iterator *iter,
                                        bool classic)
  {
+       struct trace_array *tr = iter->tr;
        struct trace_seq *s = &iter->seq;
        const struct blk_io_trace *t;
        u16 what;
  
        t          = te_blk_io_trace(iter->ent);
        what       = t->action & ((1 << BLK_TC_SHIFT) - 1);
-       long_act   = !!(trace_flags & TRACE_ITER_VERBOSE);
+       long_act   = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
        log_action = classic ? &blk_log_action_classic : &blk_log_action;
  
        if (t->action == BLK_TN_MESSAGE) {
@@@ -1410,9 -1414,9 +1411,9 @@@ blk_tracer_set_flag(struct trace_array 
        /* don't output context-info for blk_classic output */
        if (bit == TRACE_BLK_OPT_CLASSIC) {
                if (set)
-                       trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
+                       tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
                else
-                       trace_flags |= TRACE_ITER_CONTEXT_INFO;
+                       tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
        }
        return 0;
  }
@@@ -1478,7 -1482,7 +1479,7 @@@ static int blk_trace_remove_queue(struc
  static int blk_trace_setup_queue(struct request_queue *q,
                                 struct block_device *bdev)
  {
 -      struct blk_trace *old_bt, *bt = NULL;
 +      struct blk_trace *bt = NULL;
        int ret = -ENOMEM;
  
        bt = kzalloc(sizeof(*bt), GFP_KERNEL);
  
        blk_trace_setup_lba(bt, bdev);
  
 -      old_bt = xchg(&q->blk_trace, bt);
 -      if (old_bt != NULL) {
 -              (void)xchg(&q->blk_trace, old_bt);
 -              ret = -EBUSY;
 +      ret = -EBUSY;
 +      if (cmpxchg(&q->blk_trace, NULL, bt))
                goto free_bt;
 -      }
  
        if (atomic_inc_return(&blk_probes_ref) == 1)
                blk_register_tracepoints();
diff --combined kernel/trace/ftrace.c
index 00611e95a8ee00bb91e7ccbd41c3ebcbc580e4f8,ea2725053771036575b52c82366323e80bc36024..3f743b147247034e6a794f7cf47176a2c6a44ece
@@@ -243,6 -243,11 +243,11 @@@ static void ftrace_sync_ipi(void *data
  
  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  static void update_function_graph_func(void);
+ /* Both enabled by default (can be cleared by function_graph tracer flags */
+ static bool fgraph_sleep_time = true;
+ static bool fgraph_graph_time = true;
  #else
  static inline void update_function_graph_func(void) { }
  #endif
@@@ -917,7 -922,7 +922,7 @@@ static void profile_graph_return(struc
  
        calltime = trace->rettime - trace->calltime;
  
-       if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
+       if (!fgraph_graph_time) {
                int index;
  
                index = trace->depth;
@@@ -3420,27 -3425,35 +3425,35 @@@ ftrace_notrace_open(struct inode *inode
                                 inode, file);
  }
  
- static int ftrace_match(char *str, char *regex, int len, int type)
+ /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
+ struct ftrace_glob {
+       char *search;
+       unsigned len;
+       int type;
+ };
+ static int ftrace_match(char *str, struct ftrace_glob *g)
  {
        int matched = 0;
        int slen;
  
-       switch (type) {
+       switch (g->type) {
        case MATCH_FULL:
-               if (strcmp(str, regex) == 0)
+               if (strcmp(str, g->search) == 0)
                        matched = 1;
                break;
        case MATCH_FRONT_ONLY:
-               if (strncmp(str, regex, len) == 0)
+               if (strncmp(str, g->search, g->len) == 0)
                        matched = 1;
                break;
        case MATCH_MIDDLE_ONLY:
-               if (strstr(str, regex))
+               if (strstr(str, g->search))
                        matched = 1;
                break;
        case MATCH_END_ONLY:
                slen = strlen(str);
-               if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
+               if (slen >= g->len &&
+                   memcmp(str + slen - g->len, g->search, g->len) == 0)
                        matched = 1;
                break;
        }
  }
  
  static int
- enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
+ enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
  {
        struct ftrace_func_entry *entry;
        int ret = 0;
  
        entry = ftrace_lookup_ip(hash, rec->ip);
-       if (not) {
+       if (clear_filter) {
                /* Do nothing if it doesn't exist */
                if (!entry)
                        return 0;
  }
  
  static int
- ftrace_match_record(struct dyn_ftrace *rec, char *mod,
-                   char *regex, int len, int type)
+ ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
+               struct ftrace_glob *mod_g, int exclude_mod)
  {
        char str[KSYM_SYMBOL_LEN];
        char *modname;
  
        kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
  
-       if (mod) {
-               /* module lookup requires matching the module */
-               if (!modname || strcmp(modname, mod))
+       if (mod_g) {
+               int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
+               /* blank module name to match all modules */
+               if (!mod_g->len) {
+                       /* blank module globbing: modname xor exclude_mod */
+                       if ((!exclude_mod) != (!modname))
+                               goto func_match;
+                       return 0;
+               }
+               /* not matching the module */
+               if (!modname || !mod_matches) {
+                       if (exclude_mod)
+                               goto func_match;
+                       else
+                               return 0;
+               }
+               if (mod_matches && exclude_mod)
                        return 0;
  
+ func_match:
                /* blank search means to match all funcs in the mod */
-               if (!len)
+               if (!func_g->len)
                        return 1;
        }
  
-       return ftrace_match(str, regex, len, type);
+       return ftrace_match(str, func_g);
  }
  
  static int
- match_records(struct ftrace_hash *hash, char *buff,
-             int len, char *mod, int not)
+ match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
  {
-       unsigned search_len = 0;
        struct ftrace_page *pg;
        struct dyn_ftrace *rec;
-       int type = MATCH_FULL;
-       char *search = buff;
+       struct ftrace_glob func_g = { .type = MATCH_FULL };
+       struct ftrace_glob mod_g = { .type = MATCH_FULL };
+       struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
+       int exclude_mod = 0;
        int found = 0;
        int ret;
+       int clear_filter;
+       if (func) {
+               func_g.type = filter_parse_regex(func, len, &func_g.search,
+                                                &clear_filter);
+               func_g.len = strlen(func_g.search);
+       }
  
-       if (len) {
-               type = filter_parse_regex(buff, len, &search, &not);
-               search_len = strlen(search);
+       if (mod) {
+               mod_g.type = filter_parse_regex(mod, strlen(mod),
+                               &mod_g.search, &exclude_mod);
+               mod_g.len = strlen(mod_g.search);
        }
  
        mutex_lock(&ftrace_lock);
                goto out_unlock;
  
        do_for_each_ftrace_rec(pg, rec) {
-               if (ftrace_match_record(rec, mod, search, search_len, type)) {
-                       ret = enter_record(hash, rec, not);
+               if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
+                       ret = enter_record(hash, rec, clear_filter);
                        if (ret < 0) {
                                found = ret;
                                goto out_unlock;
  static int
  ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
  {
-       return match_records(hash, buff, len, NULL, 0);
+       return match_records(hash, buff, len, NULL);
  }
  
- static int
- ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
- {
-       int not = 0;
-       /* blank or '*' mean the same */
-       if (strcmp(buff, "*") == 0)
-               buff[0] = 0;
-       /* handle the case of 'dont filter this module' */
-       if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
-               buff[0] = 0;
-               not = 1;
-       }
-       return match_records(hash, buff, strlen(buff), mod, not);
- }
  
  /*
   * We register the module command as a template to show others how
  
  static int
  ftrace_mod_callback(struct ftrace_hash *hash,
-                   char *func, char *cmd, char *param, int enable)
+                   char *func, char *cmd, char *module, int enable)
  {
-       char *mod;
-       int ret = -EINVAL;
+       int ret;
  
        /*
         * cmd == 'mod' because we only registered this func
         * you can tell which command was used by the cmd
         * parameter.
         */
-       /* we must have a module name */
-       if (!param)
-               return ret;
-       mod = strsep(&param, ":");
-       if (!strlen(mod))
-               return ret;
-       ret = ftrace_match_module_records(hash, func, mod);
+       ret = match_records(hash, func, strlen(func), module);
        if (!ret)
-               ret = -EINVAL;
+               return -EINVAL;
        if (ret < 0)
                return ret;
        return 0;
  }
  
@@@ -3699,19 -3710,20 +3710,20 @@@ register_ftrace_function_probe(char *gl
  {
        struct ftrace_ops_hash old_hash_ops;
        struct ftrace_func_probe *entry;
+       struct ftrace_glob func_g;
        struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
        struct ftrace_hash *old_hash = *orig_hash;
        struct ftrace_hash *hash;
        struct ftrace_page *pg;
        struct dyn_ftrace *rec;
-       int type, len, not;
+       int not;
        unsigned long key;
        int count = 0;
-       char *search;
        int ret;
  
-       type = filter_parse_regex(glob, strlen(glob), &search, &not);
-       len = strlen(search);
+       func_g.type = filter_parse_regex(glob, strlen(glob),
+                       &func_g.search, &not);
+       func_g.len = strlen(func_g.search);
  
        /* we do not support '!' for function probes */
        if (WARN_ON(not))
  
        do_for_each_ftrace_rec(pg, rec) {
  
-               if (!ftrace_match_record(rec, NULL, search, len, type))
+               if (!ftrace_match_record(rec, &func_g, NULL, 0))
                        continue;
  
                entry = kmalloc(sizeof(*entry), GFP_KERNEL);
@@@ -3811,24 -3823,24 +3823,24 @@@ __unregister_ftrace_function_probe(cha
        struct ftrace_func_entry *rec_entry;
        struct ftrace_func_probe *entry;
        struct ftrace_func_probe *p;
+       struct ftrace_glob func_g;
        struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
        struct ftrace_hash *old_hash = *orig_hash;
        struct list_head free_list;
        struct ftrace_hash *hash;
        struct hlist_node *tmp;
        char str[KSYM_SYMBOL_LEN];
-       int type = MATCH_FULL;
-       int i, len = 0;
-       char *search;
-       int ret;
+       int i, ret;
  
        if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
-               glob = NULL;
+               func_g.search = NULL;
        else if (glob) {
                int not;
  
-               type = filter_parse_regex(glob, strlen(glob), &search, &not);
-               len = strlen(search);
+               func_g.type = filter_parse_regex(glob, strlen(glob),
+                                                &func_g.search, &not);
+               func_g.len = strlen(func_g.search);
+               func_g.search = glob;
  
                /* we do not support '!' for function probes */
                if (WARN_ON(not))
                                continue;
  
                        /* do this last, since it is the most expensive */
-                       if (glob) {
+                       if (func_g.search) {
                                kallsyms_lookup(entry->ip, NULL, NULL,
                                                NULL, str);
-                               if (!ftrace_match(str, glob, len, type))
+                               if (!ftrace_match(str, &func_g))
                                        continue;
                        }
  
                ftrace_free_entry(entry);
        }
        mutex_unlock(&ftrace_lock);
-               
   out_unlock:
        mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
        free_ftrace_hash(hash);
@@@ -4605,21 -4617,21 +4617,21 @@@ ftrace_graph_release(struct inode *inod
  static int
  ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
  {
+       struct ftrace_glob func_g;
        struct dyn_ftrace *rec;
        struct ftrace_page *pg;
-       int search_len;
        int fail = 1;
-       int type, not;
-       char *search;
+       int not;
        bool exists;
        int i;
  
        /* decode regex */
-       type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
+       func_g.type = filter_parse_regex(buffer, strlen(buffer),
+                                        &func_g.search, &not);
        if (!not && *idx >= size)
                return -EBUSY;
  
-       search_len = strlen(search);
+       func_g.len = strlen(func_g.search);
  
        mutex_lock(&ftrace_lock);
  
  
        do_for_each_ftrace_rec(pg, rec) {
  
-               if (ftrace_match_record(rec, NULL, search, search_len, type)) {
+               if (ftrace_match_record(rec, &func_g, NULL, 0)) {
                        /* if it is in the array */
                        exists = false;
                        for (i = 0; i < *idx; i++) {
@@@ -4783,17 -4795,6 +4795,6 @@@ static int ftrace_cmp_ips(const void *a
        return 0;
  }
  
- static void ftrace_swap_ips(void *a, void *b, int size)
- {
-       unsigned long *ipa = a;
-       unsigned long *ipb = b;
-       unsigned long t;
-       t = *ipa;
-       *ipa = *ipb;
-       *ipb = t;
- }
  static int ftrace_process_locs(struct module *mod,
                               unsigned long *start,
                               unsigned long *end)
                return 0;
  
        sort(start, count, sizeof(*start),
-            ftrace_cmp_ips, ftrace_swap_ips);
+            ftrace_cmp_ips, NULL);
  
        start_pg = ftrace_allocate_pages(count);
        if (!start_pg)
@@@ -5639,6 -5640,16 +5640,16 @@@ static struct ftrace_ops graph_ops = 
        ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
  };
  
+ void ftrace_graph_sleep_time_control(bool enable)
+ {
+       fgraph_sleep_time = enable;
+ }
+ void ftrace_graph_graph_time_control(bool enable)
+ {
+       fgraph_graph_time = enable;
+ }
  int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
  {
        return 0;
@@@ -5697,7 -5708,7 +5708,7 @@@ free
  }
  
  static void
 -ftrace_graph_probe_sched_switch(void *ignore,
 +ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
                        struct task_struct *prev, struct task_struct *next)
  {
        unsigned long long timestamp;
         * Does the user want to count the time a function was asleep.
         * If so, do not update the time stamps.
         */
-       if (trace_flags & TRACE_ITER_SLEEP_TIME)
+       if (fgraph_sleep_time)
                return;
  
        timestamp = trace_clock_local();
index 7ca09cdc20c2f920faa004e32eb248e3bc92bf61,bee1e153005278b63676622207b4f606e2c84f72..6bbc5f652355745d24f6252a93d7b437a0efea15
  #include <linux/kthread.h>
  #include <linux/tracefs.h>
  #include <linux/uaccess.h>
+ #include <linux/bsearch.h>
  #include <linux/module.h>
  #include <linux/ctype.h>
+ #include <linux/sort.h>
  #include <linux/slab.h>
  #include <linux/delay.h>
  
+ #include <trace/events/sched.h>
  #include <asm/setup.h>
  
  #include "trace_output.h"
@@@ -38,21 -42,19 +42,19 @@@ static LIST_HEAD(ftrace_common_fields)
  static struct kmem_cache *field_cachep;
  static struct kmem_cache *file_cachep;
  
- #define SYSTEM_FL_FREE_NAME           (1 << 31)
  static inline int system_refcount(struct event_subsystem *system)
  {
-       return system->ref_count & ~SYSTEM_FL_FREE_NAME;
+       return system->ref_count;
  }
  
  static int system_refcount_inc(struct event_subsystem *system)
  {
-       return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
+       return system->ref_count++;
  }
  
  static int system_refcount_dec(struct event_subsystem *system)
  {
-       return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
+       return --system->ref_count;
  }
  
  /* Double loops, do not use break, only goto's work */
@@@ -212,12 -214,32 +214,32 @@@ int trace_event_raw_init(struct trace_e
  }
  EXPORT_SYMBOL_GPL(trace_event_raw_init);
  
+ bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
+ {
+       struct trace_array *tr = trace_file->tr;
+       struct trace_array_cpu *data;
+       struct trace_pid_list *pid_list;
+       pid_list = rcu_dereference_sched(tr->filtered_pids);
+       if (!pid_list)
+               return false;
+       data = this_cpu_ptr(tr->trace_buffer.data);
+       return data->ignore_pid;
+ }
+ EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
  void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
                                 struct trace_event_file *trace_file,
                                 unsigned long len)
  {
        struct trace_event_call *event_call = trace_file->event_call;
  
+       if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
+           trace_event_ignore_this_pid(trace_file))
+               return NULL;
        local_save_flags(fbuffer->flags);
        fbuffer->pc = preempt_count();
        fbuffer->trace_file = trace_file;
@@@ -338,6 -360,7 +360,7 @@@ static int __ftrace_event_enable_disabl
                                         int enable, int soft_disable)
  {
        struct trace_event_call *call = file->event_call;
+       struct trace_array *tr = file->tr;
        int ret = 0;
        int disable;
  
                        if (soft_disable)
                                set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
  
-                       if (trace_flags & TRACE_ITER_RECORD_CMD) {
+                       if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
                                tracing_start_cmdline_record();
                                set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
                        }
@@@ -446,6 -469,142 +469,142 @@@ static void ftrace_clear_events(struct 
        mutex_unlock(&event_mutex);
  }
  
 -event_filter_pid_sched_switch_probe_pre(void *data,
+ static int cmp_pid(const void *key, const void *elt)
+ {
+       const pid_t *search_pid = key;
+       const pid_t *pid = elt;
+       if (*search_pid == *pid)
+               return 0;
+       if (*search_pid < *pid)
+               return -1;
+       return 1;
+ }
+ static bool
+ check_ignore_pid(struct trace_pid_list *filtered_pids, struct task_struct *task)
+ {
+       pid_t search_pid;
+       pid_t *pid;
+       /*
+        * Return false, because if filtered_pids does not exist,
+        * all pids are good to trace.
+        */
+       if (!filtered_pids)
+               return false;
+       search_pid = task->pid;
+       pid = bsearch(&search_pid, filtered_pids->pids,
+                     filtered_pids->nr_pids, sizeof(pid_t),
+                     cmp_pid);
+       if (!pid)
+               return true;
+       return false;
+ }
+ static void
 -event_filter_pid_sched_switch_probe_post(void *data,
++event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
+                   struct task_struct *prev, struct task_struct *next)
+ {
+       struct trace_array *tr = data;
+       struct trace_pid_list *pid_list;
+       pid_list = rcu_dereference_sched(tr->filtered_pids);
+       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+                      check_ignore_pid(pid_list, prev) &&
+                      check_ignore_pid(pid_list, next));
+ }
+ static void
++event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
+                   struct task_struct *prev, struct task_struct *next)
+ {
+       struct trace_array *tr = data;
+       struct trace_pid_list *pid_list;
+       pid_list = rcu_dereference_sched(tr->filtered_pids);
+       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+                      check_ignore_pid(pid_list, next));
+ }
+ static void
+ event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
+ {
+       struct trace_array *tr = data;
+       struct trace_pid_list *pid_list;
+       /* Nothing to do if we are already tracing */
+       if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
+               return;
+       pid_list = rcu_dereference_sched(tr->filtered_pids);
+       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+                      check_ignore_pid(pid_list, task));
+ }
+ static void
+ event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
+ {
+       struct trace_array *tr = data;
+       struct trace_pid_list *pid_list;
+       /* Nothing to do if we are not tracing */
+       if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
+               return;
+       pid_list = rcu_dereference_sched(tr->filtered_pids);
+       /* Set tracing if current is enabled */
+       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+                      check_ignore_pid(pid_list, current));
+ }
+ static void __ftrace_clear_event_pids(struct trace_array *tr)
+ {
+       struct trace_pid_list *pid_list;
+       struct trace_event_file *file;
+       int cpu;
+       pid_list = rcu_dereference_protected(tr->filtered_pids,
+                                            lockdep_is_held(&event_mutex));
+       if (!pid_list)
+               return;
+       unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
+       unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
+       unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
+       unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
+       list_for_each_entry(file, &tr->events, list) {
+               clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
+       }
+       for_each_possible_cpu(cpu)
+               per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
+       rcu_assign_pointer(tr->filtered_pids, NULL);
+       /* Wait till all users are no longer using pid filtering */
+       synchronize_sched();
+       free_pages((unsigned long)pid_list->pids, pid_list->order);
+       kfree(pid_list);
+ }
+ static void ftrace_clear_event_pids(struct trace_array *tr)
+ {
+       mutex_lock(&event_mutex);
+       __ftrace_clear_event_pids(tr);
+       mutex_unlock(&event_mutex);
+ }
  static void __put_system(struct event_subsystem *system)
  {
        struct event_filter *filter = system->filter;
                kfree(filter->filter_string);
                kfree(filter);
        }
-       if (system->ref_count & SYSTEM_FL_FREE_NAME)
-               kfree(system->name);
+       kfree_const(system->name);
        kfree(system);
  }
  
@@@ -779,6 -937,58 +937,58 @@@ static void t_stop(struct seq_file *m, 
        mutex_unlock(&event_mutex);
  }
  
+ static void *p_start(struct seq_file *m, loff_t *pos)
+       __acquires(RCU)
+ {
+       struct trace_pid_list *pid_list;
+       struct trace_array *tr = m->private;
+       /*
+        * Grab the mutex, to keep calls to p_next() having the same
+        * tr->filtered_pids as p_start() has.
+        * If we just passed the tr->filtered_pids around, then RCU would
+        * have been enough, but doing that makes things more complex.
+        */
+       mutex_lock(&event_mutex);
+       rcu_read_lock_sched();
+       pid_list = rcu_dereference_sched(tr->filtered_pids);
+       if (!pid_list || *pos >= pid_list->nr_pids)
+               return NULL;
+       return (void *)&pid_list->pids[*pos];
+ }
+ static void p_stop(struct seq_file *m, void *p)
+       __releases(RCU)
+ {
+       rcu_read_unlock_sched();
+       mutex_unlock(&event_mutex);
+ }
+ static void *
+ p_next(struct seq_file *m, void *v, loff_t *pos)
+ {
+       struct trace_array *tr = m->private;
+       struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
+       (*pos)++;
+       if (*pos >= pid_list->nr_pids)
+               return NULL;
+       return (void *)&pid_list->pids[*pos];
+ }
+ static int p_show(struct seq_file *m, void *v)
+ {
+       pid_t *pid = v;
+       seq_printf(m, "%d\n", *pid);
+       return 0;
+ }
  static ssize_t
  event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
                  loff_t *ppos)
@@@ -1336,8 -1546,209 +1546,209 @@@ show_header(struct file *filp, char __u
        return r;
  }
  
+ static int max_pids(struct trace_pid_list *pid_list)
+ {
+       return (PAGE_SIZE << pid_list->order) / sizeof(pid_t);
+ }
+ static void ignore_task_cpu(void *data)
+ {
+       struct trace_array *tr = data;
+       struct trace_pid_list *pid_list;
+       /*
+        * This function is called by on_each_cpu() while the
+        * event_mutex is held.
+        */
+       pid_list = rcu_dereference_protected(tr->filtered_pids,
+                                            mutex_is_locked(&event_mutex));
+       this_cpu_write(tr->trace_buffer.data->ignore_pid,
+                      check_ignore_pid(pid_list, current));
+ }
+ static ssize_t
+ ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
+                      size_t cnt, loff_t *ppos)
+ {
+       struct seq_file *m = filp->private_data;
+       struct trace_array *tr = m->private;
+       struct trace_pid_list *filtered_pids = NULL;
+       struct trace_pid_list *pid_list = NULL;
+       struct trace_event_file *file;
+       struct trace_parser parser;
+       unsigned long val;
+       loff_t this_pos;
+       ssize_t read = 0;
+       ssize_t ret = 0;
+       pid_t pid;
+       int i;
+       if (!cnt)
+               return 0;
+       ret = tracing_update_buffers();
+       if (ret < 0)
+               return ret;
+       if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
+               return -ENOMEM;
+       mutex_lock(&event_mutex);
+       /*
+        * Load as many pids into the array before doing a
+        * swap from the tr->filtered_pids to the new list.
+        */
+       while (cnt > 0) {
+               this_pos = 0;
+               ret = trace_get_user(&parser, ubuf, cnt, &this_pos);
+               if (ret < 0 || !trace_parser_loaded(&parser))
+                       break;
+               read += ret;
+               ubuf += ret;
+               cnt -= ret;
+               parser.buffer[parser.idx] = 0;
+               ret = -EINVAL;
+               if (kstrtoul(parser.buffer, 0, &val))
+                       break;
+               if (val > INT_MAX)
+                       break;
+               pid = (pid_t)val;
+               ret = -ENOMEM;
+               if (!pid_list) {
+                       pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
+                       if (!pid_list)
+                               break;
+                       filtered_pids = rcu_dereference_protected(tr->filtered_pids,
+                                                       lockdep_is_held(&event_mutex));
+                       if (filtered_pids)
+                               pid_list->order = filtered_pids->order;
+                       else
+                               pid_list->order = 0;
+                       pid_list->pids = (void *)__get_free_pages(GFP_KERNEL,
+                                                                 pid_list->order);
+                       if (!pid_list->pids)
+                               break;
+                       if (filtered_pids) {
+                               pid_list->nr_pids = filtered_pids->nr_pids;
+                               memcpy(pid_list->pids, filtered_pids->pids,
+                                      pid_list->nr_pids * sizeof(pid_t));
+                       } else
+                               pid_list->nr_pids = 0;
+               }
+               if (pid_list->nr_pids >= max_pids(pid_list)) {
+                       pid_t *pid_page;
+                       pid_page = (void *)__get_free_pages(GFP_KERNEL,
+                                                           pid_list->order + 1);
+                       if (!pid_page)
+                               break;
+                       memcpy(pid_page, pid_list->pids,
+                              pid_list->nr_pids * sizeof(pid_t));
+                       free_pages((unsigned long)pid_list->pids, pid_list->order);
+                       pid_list->order++;
+                       pid_list->pids = pid_page;
+               }
+               pid_list->pids[pid_list->nr_pids++] = pid;
+               trace_parser_clear(&parser);
+               ret = 0;
+       }
+       trace_parser_put(&parser);
+       if (ret < 0) {
+               if (pid_list)
+                       free_pages((unsigned long)pid_list->pids, pid_list->order);
+               kfree(pid_list);
+               mutex_unlock(&event_mutex);
+               return ret;
+       }
+       if (!pid_list) {
+               mutex_unlock(&event_mutex);
+               return ret;
+       }
+       sort(pid_list->pids, pid_list->nr_pids, sizeof(pid_t), cmp_pid, NULL);
+       /* Remove duplicates */
+       for (i = 1; i < pid_list->nr_pids; i++) {
+               int start = i;
+               while (i < pid_list->nr_pids &&
+                      pid_list->pids[i - 1] == pid_list->pids[i])
+                       i++;
+               if (start != i) {
+                       if (i < pid_list->nr_pids) {
+                               memmove(&pid_list->pids[start], &pid_list->pids[i],
+                                       (pid_list->nr_pids - i) * sizeof(pid_t));
+                               pid_list->nr_pids -= i - start;
+                               i = start;
+                       } else
+                               pid_list->nr_pids = start;
+               }
+       }
+       rcu_assign_pointer(tr->filtered_pids, pid_list);
+       list_for_each_entry(file, &tr->events, list) {
+               set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
+       }
+       if (filtered_pids) {
+               synchronize_sched();
+               free_pages((unsigned long)filtered_pids->pids, filtered_pids->order);
+               kfree(filtered_pids);
+       } else {
+               /*
+                * Register a probe that is called before all other probes
+                * to set ignore_pid if next or prev do not match.
+                * Register a probe this is called after all other probes
+                * to only keep ignore_pid set if next pid matches.
+                */
+               register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
+                                                tr, INT_MAX);
+               register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
+                                                tr, 0);
+               register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
+                                                tr, INT_MAX);
+               register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
+                                                tr, 0);
+       }
+       /*
+        * Ignoring of pids is done at task switch. But we have to
+        * check for those tasks that are currently running.
+        * Always do this in case a pid was appended or removed.
+        */
+       on_each_cpu(ignore_task_cpu, tr, 1);
+       mutex_unlock(&event_mutex);
+       ret = read;
+       *ppos += read;
+       return ret;
+ }
  static int ftrace_event_avail_open(struct inode *inode, struct file *file);
  static int ftrace_event_set_open(struct inode *inode, struct file *file);
+ static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
  static int ftrace_event_release(struct inode *inode, struct file *file);
  
  static const struct seq_operations show_event_seq_ops = {
@@@ -1354,6 -1765,13 +1765,13 @@@ static const struct seq_operations show
        .stop = t_stop,
  };
  
+ static const struct seq_operations show_set_pid_seq_ops = {
+       .start = p_start,
+       .next = p_next,
+       .show = p_show,
+       .stop = p_stop,
+ };
  static const struct file_operations ftrace_avail_fops = {
        .open = ftrace_event_avail_open,
        .read = seq_read,
@@@ -1369,6 -1787,14 +1787,14 @@@ static const struct file_operations ftr
        .release = ftrace_event_release,
  };
  
+ static const struct file_operations ftrace_set_event_pid_fops = {
+       .open = ftrace_event_set_pid_open,
+       .read = seq_read,
+       .write = ftrace_event_pid_write,
+       .llseek = seq_lseek,
+       .release = ftrace_event_release,
+ };
  static const struct file_operations ftrace_enable_fops = {
        .open = tracing_open_generic,
        .read = event_enable_read,
@@@ -1479,6 -1905,26 +1905,26 @@@ ftrace_event_set_open(struct inode *ino
        return ret;
  }
  
+ static int
+ ftrace_event_set_pid_open(struct inode *inode, struct file *file)
+ {
+       const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
+       struct trace_array *tr = inode->i_private;
+       int ret;
+       if (trace_array_get(tr) < 0)
+               return -ENODEV;
+       if ((file->f_mode & FMODE_WRITE) &&
+           (file->f_flags & O_TRUNC))
+               ftrace_clear_event_pids(tr);
+       ret = ftrace_event_open(inode, file, seq_ops);
+       if (ret < 0)
+               trace_array_put(tr);
+       return ret;
+ }
  static struct event_subsystem *
  create_new_subsystem(const char *name)
  {
        system->ref_count = 1;
  
        /* Only allocate if dynamic (kprobes and modules) */
-       if (!core_kernel_data((unsigned long)name)) {
-               system->ref_count |= SYSTEM_FL_FREE_NAME;
-               system->name = kstrdup(name, GFP_KERNEL);
-               if (!system->name)
-                       goto out_free;
-       } else
-               system->name = name;
+       system->name = kstrdup_const(name, GFP_KERNEL);
+       if (!system->name)
+               goto out_free;
  
        system->filter = NULL;
  
        return system;
  
   out_free:
-       if (system->ref_count & SYSTEM_FL_FREE_NAME)
-               kfree(system->name);
+       kfree_const(system->name);
        kfree(system);
        return NULL;
  }
@@@ -2478,6 -2919,9 +2919,9 @@@ create_event_toplevel_files(struct dent
                return -ENOMEM;
        }
  
+       entry = tracefs_create_file("set_event_pid", 0644, parent,
+                                   tr, &ftrace_set_event_pid_fops);
        /* ring buffer internal formats */
        trace_create_file("header_page", 0444, d_events,
                          ring_buffer_print_page_header,
@@@ -2558,6 -3002,9 +3002,9 @@@ int event_trace_del_tracer(struct trace
        /* Disable any event triggers and associated soft-disabled events */
        clear_event_triggers(tr);
  
+       /* Clear the pid list */
+       __ftrace_clear_event_pids(tr);
        /* Disable any running events */
        __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
  
@@@ -2595,16 -3042,16 +3042,16 @@@ early_enable_events(struct trace_array 
  
                if (!token)
                        break;
-               if (!*token)
-                       continue;
  
-               /* Restarting syscalls requires that we stop them first */
-               if (disable_first)
-                       ftrace_set_clr_event(tr, token, 0);
+               if (*token) {
+                       /* Restarting syscalls requires that we stop them first */
+                       if (disable_first)
+                               ftrace_set_clr_event(tr, token, 0);
  
-               ret = ftrace_set_clr_event(tr, token, 1);
-               if (ret)
-                       pr_warn("Failed to enable trace event: %s\n", token);
+                       ret = ftrace_set_clr_event(tr, token, 1);
+                       if (ret)
+                               pr_warn("Failed to enable trace event: %s\n", token);
+               }
  
                /* Put back the comma to allow this to be called again */
                if (buf)
@@@ -2891,7 -3338,9 +3338,9 @@@ static __init void event_trace_self_tes
  
  static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
  
- static void
+ static struct trace_array *event_tr;
+ static void __init
  function_test_events_call(unsigned long ip, unsigned long parent_ip,
                          struct ftrace_ops *op, struct pt_regs *pt_regs)
  {
        entry->ip                       = ip;
        entry->parent_ip                = parent_ip;
  
-       trace_buffer_unlock_commit(buffer, event, flags, pc);
+       trace_buffer_unlock_commit(event_tr, buffer, event, flags, pc);
  
   out:
        atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
@@@ -2938,6 -3387,9 +3387,9 @@@ static struct ftrace_ops trace_ops __in
  static __init void event_trace_self_test_with_function(void)
  {
        int ret;
+       event_tr = top_trace_array();
+       if (WARN_ON(!event_tr))
+               return;
        ret = register_ftrace_function(&trace_ops);
        if (WARN_ON(ret < 0)) {
                pr_info("Failed to enable function tracer for event tests\n");
index 4bcfbac289ff9e9e6ab4d39772ad2dffd89509a8,855c2c7612e8beff14253726645923f22e44a802..9d4399b553a3c1e05387b55ec861214e86efa7ed
@@@ -34,31 -34,28 +34,28 @@@ static arch_spinlock_t wakeup_lock 
  
  static void wakeup_reset(struct trace_array *tr);
  static void __wakeup_reset(struct trace_array *tr);
- static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
- static void wakeup_graph_return(struct ftrace_graph_ret *trace);
  
  static int save_flags;
- static bool function_enabled;
- #define TRACE_DISPLAY_GRAPH     1
  
- static struct tracer_opt trace_opts[] = {
  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       /* display latency trace as call graph */
-       { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
+ static int wakeup_display_graph(struct trace_array *tr, int set);
+ # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
+ #else
+ static inline int wakeup_display_graph(struct trace_array *tr, int set)
+ {
+       return 0;
+ }
+ # define is_graph(tr) false
  #endif
-       { } /* Empty entry */
- };
- static struct tracer_flags tracer_flags = {
-       .val  = 0,
-       .opts = trace_opts,
- };
  
- #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
  
  #ifdef CONFIG_FUNCTION_TRACER
  
+ static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
+ static void wakeup_graph_return(struct ftrace_graph_ret *trace);
+ static bool function_enabled;
  /*
   * Prologue for the wakeup function tracers.
   *
@@@ -128,14 -125,13 +125,13 @@@ wakeup_tracer_call(unsigned long ip, un
        atomic_dec(&data->disabled);
        preempt_enable_notrace();
  }
- #endif /* CONFIG_FUNCTION_TRACER */
  
  static int register_wakeup_function(struct trace_array *tr, int graph, int set)
  {
        int ret;
  
        /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
-       if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION)))
+       if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
                return 0;
  
        if (graph)
@@@ -163,20 -159,40 +159,40 @@@ static void unregister_wakeup_function(
        function_enabled = false;
  }
  
- static void wakeup_function_set(struct trace_array *tr, int set)
+ static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
  {
+       if (!(mask & TRACE_ITER_FUNCTION))
+               return 0;
        if (set)
-               register_wakeup_function(tr, is_graph(), 1);
+               register_wakeup_function(tr, is_graph(tr), 1);
        else
-               unregister_wakeup_function(tr, is_graph());
+               unregister_wakeup_function(tr, is_graph(tr));
+       return 1;
+ }
+ #else
+ static int register_wakeup_function(struct trace_array *tr, int graph, int set)
+ {
+       return 0;
+ }
+ static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
+ static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
+ {
+       return 0;
  }
+ #endif /* CONFIG_FUNCTION_TRACER */
  
  static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
  {
        struct tracer *tracer = tr->current_trace;
  
-       if (mask & TRACE_ITER_FUNCTION)
-               wakeup_function_set(tr, set);
+       if (wakeup_function_set(tr, mask, set))
+               return 0;
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       if (mask & TRACE_ITER_DISPLAY_GRAPH)
+               return wakeup_display_graph(tr, set);
+ #endif
  
        return trace_keep_overwrite(tracer, mask, set);
  }
@@@ -203,14 -219,9 +219,9 @@@ static void stop_func_tracer(struct tra
  }
  
  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
- static int
- wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
+ static int wakeup_display_graph(struct trace_array *tr, int set)
  {
-       if (!(bit & TRACE_DISPLAY_GRAPH))
-               return -EINVAL;
-       if (!(is_graph() ^ set))
+       if (!(is_graph(tr) ^ set))
                return 0;
  
        stop_func_tracer(tr, !set);
@@@ -259,7 -270,7 +270,7 @@@ static void wakeup_graph_return(struct 
  
  static void wakeup_trace_open(struct trace_iterator *iter)
  {
-       if (is_graph())
+       if (is_graph(iter->tr))
                graph_trace_open(iter);
  }
  
@@@ -279,7 -290,7 +290,7 @@@ static enum print_line_t wakeup_print_l
         * In graph mode call the graph tracer output function,
         * otherwise go with the TRACE_FN event handler
         */
-       if (is_graph())
+       if (is_graph(iter->tr))
                return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
  
        return TRACE_TYPE_UNHANDLED;
  
  static void wakeup_print_header(struct seq_file *s)
  {
-       if (is_graph())
+       if (is_graph(wakeup_trace))
                print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
        else
                trace_default_header(s);
@@@ -298,7 -309,7 +309,7 @@@ __trace_function(struct trace_array *tr
                 unsigned long ip, unsigned long parent_ip,
                 unsigned long flags, int pc)
  {
-       if (is_graph())
+       if (is_graph(tr))
                trace_graph_function(tr, ip, parent_ip, flags, pc);
        else
                trace_function(tr, ip, parent_ip, flags, pc);
  #else
  #define __trace_function trace_function
  
- static int
- wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
- {
-       return -EINVAL;
- }
- static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
- {
-       return -1;
- }
  static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
  {
        return TRACE_TYPE_UNHANDLED;
  }
  
- static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
  static void wakeup_trace_open(struct trace_iterator *iter) { }
  static void wakeup_trace_close(struct trace_iterator *iter) { }
  
  #ifdef CONFIG_FUNCTION_TRACER
+ static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
+ {
+       return -1;
+ }
+ static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
  static void wakeup_print_header(struct seq_file *s)
  {
        trace_default_header(s);
@@@ -342,16 -346,16 +346,16 @@@ static void wakeup_print_header(struct 
  /*
   * Should this new latency be reported/recorded?
   */
- static int report_latency(struct trace_array *tr, cycle_t delta)
+ static bool report_latency(struct trace_array *tr, cycle_t delta)
  {
        if (tracing_thresh) {
                if (delta < tracing_thresh)
-                       return 0;
+                       return false;
        } else {
                if (delta <= tr->max_latency)
-                       return 0;
+                       return false;
        }
-       return 1;
+       return true;
  }
  
  static void
@@@ -388,7 -392,7 +392,7 @@@ tracing_sched_switch_trace(struct trace
        entry->next_cpu = task_cpu(next);
  
        if (!call_filter_check_discard(call, entry, buffer, event))
-               trace_buffer_unlock_commit(buffer, event, flags, pc);
+               trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
  }
  
  static void
@@@ -416,11 -420,11 +420,11 @@@ tracing_sched_wakeup_trace(struct trace
        entry->next_cpu                 = task_cpu(wakee);
  
        if (!call_filter_check_discard(call, entry, buffer, event))
-               trace_buffer_unlock_commit(buffer, event, flags, pc);
+               trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
  }
  
  static void notrace
 -probe_wakeup_sched_switch(void *ignore,
 +probe_wakeup_sched_switch(void *ignore, bool preempt,
                          struct task_struct *prev, struct task_struct *next)
  {
        struct trace_array_cpu *data;
@@@ -635,7 -639,7 +639,7 @@@ static void start_wakeup_tracer(struct 
         */
        smp_wmb();
  
-       if (start_func_tracer(tr, is_graph()))
+       if (start_func_tracer(tr, is_graph(tr)))
                printk(KERN_ERR "failed to start wakeup tracer\n");
  
        return;
@@@ -648,7 -652,7 +652,7 @@@ fail_deprobe
  static void stop_wakeup_tracer(struct trace_array *tr)
  {
        tracer_enabled = 0;
-       stop_func_tracer(tr, is_graph());
+       stop_func_tracer(tr, is_graph(tr));
        unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
        unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
        unregister_trace_sched_wakeup(probe_wakeup, NULL);
@@@ -659,7 -663,7 +663,7 @@@ static bool wakeup_busy
  
  static int __wakeup_tracer_init(struct trace_array *tr)
  {
-       save_flags = trace_flags;
+       save_flags = tr->trace_flags;
  
        /* non overwrite screws up the latency tracers */
        set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
@@@ -740,8 -744,6 +744,6 @@@ static struct tracer wakeup_tracer __re
        .print_max      = true,
        .print_header   = wakeup_print_header,
        .print_line     = wakeup_print_line,
-       .flags          = &tracer_flags,
-       .set_flag       = wakeup_set_flag,
        .flag_changed   = wakeup_flag_changed,
  #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_wakeup,
@@@ -762,8 -764,6 +764,6 @@@ static struct tracer wakeup_rt_tracer _
        .print_max      = true,
        .print_header   = wakeup_print_header,
        .print_line     = wakeup_print_line,
-       .flags          = &tracer_flags,
-       .set_flag       = wakeup_set_flag,
        .flag_changed   = wakeup_flag_changed,
  #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_wakeup,
@@@ -784,8 -784,6 +784,6 @@@ static struct tracer wakeup_dl_tracer _
        .print_max      = true,
        .print_header   = wakeup_print_header,
        .print_line     = wakeup_print_line,
-       .flags          = &tracer_flags,
-       .set_flag       = wakeup_set_flag,
        .flag_changed   = wakeup_flag_changed,
  #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_wakeup,
index 8abf1ba18085742af78176dbc514095a47643c9c,0bd212af406c49fc64ad308112e9ad7fab1eb4ff..dda9e6742950305f36fbe920f9fe0c6f68d83fbf
  
  #include "trace.h"
  
- #define STACK_TRACE_ENTRIES 500
  static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
         { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
unsigned stack_trace_index[STACK_TRACE_ENTRIES];
  
  /*
   * Reserve one entry for the passed in ip. This will allow
   * us to remove most or all of the stack size overhead
   * added by the stack tracer itself.
   */
- static struct stack_trace max_stack_trace = {
+ struct stack_trace stack_trace_max = {
        .max_entries            = STACK_TRACE_ENTRIES - 1,
        .entries                = &stack_dump_trace[0],
  };
  
static unsigned long max_stack_size;
static arch_spinlock_t max_stack_lock =
unsigned long stack_trace_max_size;
arch_spinlock_t stack_trace_max_lock =
        (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  
  static DEFINE_PER_CPU(int, trace_active);
@@@ -42,30 -40,38 +40,38 @@@ static DEFINE_MUTEX(stack_sysctl_mutex)
  int stack_tracer_enabled;
  static int last_stack_tracer_enabled;
  
static inline void print_max_stack(void)
void stack_trace_print(void)
  {
        long i;
        int size;
  
        pr_emerg("        Depth    Size   Location    (%d entries)\n"
                           "        -----    ----   --------\n",
-                          max_stack_trace.nr_entries);
+                          stack_trace_max.nr_entries);
  
-       for (i = 0; i < max_stack_trace.nr_entries; i++) {
+       for (i = 0; i < stack_trace_max.nr_entries; i++) {
                if (stack_dump_trace[i] == ULONG_MAX)
                        break;
-               if (i+1 == max_stack_trace.nr_entries ||
+               if (i+1 == stack_trace_max.nr_entries ||
                                stack_dump_trace[i+1] == ULONG_MAX)
-                       size = stack_dump_index[i];
+                       size = stack_trace_index[i];
                else
-                       size = stack_dump_index[i] - stack_dump_index[i+1];
+                       size = stack_trace_index[i] - stack_trace_index[i+1];
  
-               pr_emerg("%3ld) %8d   %5d   %pS\n", i, stack_dump_index[i],
+               pr_emerg("%3ld) %8d   %5d   %pS\n", i, stack_trace_index[i],
                                size, (void *)stack_dump_trace[i]);
        }
  }
  
- static inline void
+ /*
+  * When arch-specific code overides this function, the following
+  * data should be filled up, assuming stack_trace_max_lock is held to
+  * prevent concurrent updates.
+  *     stack_trace_index[]
+  *     stack_trace_max
+  *     stack_trace_max_size
+  */
+ void __weak
  check_stack(unsigned long ip, unsigned long *stack)
  {
        unsigned long this_size, flags; unsigned long *p, *top, *start;
        /* Remove the frame of the tracer */
        this_size -= frame_size;
  
-       if (this_size <= max_stack_size)
+       if (this_size <= stack_trace_max_size)
                return;
  
        /* we do not handle interrupt stacks yet */
        if (!object_is_on_stack(stack))
                return;
  
 +      /* Can't do this from NMI context (can cause deadlocks) */
 +      if (in_nmi())
 +              return;
 +
        local_irq_save(flags);
-       arch_spin_lock(&max_stack_lock);
+       arch_spin_lock(&stack_trace_max_lock);
  
 +      /*
 +       * RCU may not be watching, make it see us.
 +       * The stack trace code uses rcu_sched.
 +       */
 +      rcu_irq_enter();
 +
        /* In case another CPU set the tracer_frame on us */
        if (unlikely(!frame_size))
                this_size -= tracer_frame;
  
        /* a race could have already updated it */
-       if (this_size <= max_stack_size)
+       if (this_size <= stack_trace_max_size)
                goto out;
  
-       max_stack_size = this_size;
+       stack_trace_max_size = this_size;
  
-       max_stack_trace.nr_entries = 0;
-       max_stack_trace.skip = 3;
+       stack_trace_max.nr_entries = 0;
+       stack_trace_max.skip = 3;
  
-       save_stack_trace(&max_stack_trace);
+       save_stack_trace(&stack_trace_max);
  
        /* Skip over the overhead of the stack tracer itself */
-       for (i = 0; i < max_stack_trace.nr_entries; i++) {
+       for (i = 0; i < stack_trace_max.nr_entries; i++) {
                if (stack_dump_trace[i] == ip)
                        break;
        }
         * loop will only happen once. This code only takes place
         * on a new max, so it is far from a fast path.
         */
-       while (i < max_stack_trace.nr_entries) {
+       while (i < stack_trace_max.nr_entries) {
                int found = 0;
  
-               stack_dump_index[x] = this_size;
+               stack_trace_index[x] = this_size;
                p = start;
  
-               for (; p < top && i < max_stack_trace.nr_entries; p++) {
+               for (; p < top && i < stack_trace_max.nr_entries; p++) {
                        if (stack_dump_trace[i] == ULONG_MAX)
                                break;
                        if (*p == stack_dump_trace[i]) {
                                stack_dump_trace[x] = stack_dump_trace[i++];
-                               this_size = stack_dump_index[x++] =
+                               this_size = stack_trace_index[x++] =
                                        (top - p) * sizeof(unsigned long);
                                found = 1;
                                /* Start the search from here */
                                if (unlikely(!tracer_frame)) {
                                        tracer_frame = (p - stack) *
                                                sizeof(unsigned long);
-                                       max_stack_size -= tracer_frame;
+                                       stack_trace_max_size -= tracer_frame;
                                }
                        }
                }
                        i++;
        }
  
-       max_stack_trace.nr_entries = x;
+       stack_trace_max.nr_entries = x;
        for (; x < i; x++)
                stack_dump_trace[x] = ULONG_MAX;
  
        if (task_stack_end_corrupted(current)) {
-               print_max_stack();
+               stack_trace_print();
                BUG();
        }
  
   out:
-       arch_spin_unlock(&max_stack_lock);
 +      rcu_irq_exit();
+       arch_spin_unlock(&stack_trace_max_lock);
        local_irq_restore(flags);
  }
  
@@@ -251,9 -246,9 +257,9 @@@ stack_max_size_write(struct file *filp
        cpu = smp_processor_id();
        per_cpu(trace_active, cpu)++;
  
-       arch_spin_lock(&max_stack_lock);
+       arch_spin_lock(&stack_trace_max_lock);
        *ptr = val;
-       arch_spin_unlock(&max_stack_lock);
+       arch_spin_unlock(&stack_trace_max_lock);
  
        per_cpu(trace_active, cpu)--;
        local_irq_restore(flags);
@@@ -273,7 -268,7 +279,7 @@@ __next(struct seq_file *m, loff_t *pos
  {
        long n = *pos - 1;
  
-       if (n > max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
+       if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
                return NULL;
  
        m->private = (void *)n;
@@@ -296,7 -291,7 +302,7 @@@ static void *t_start(struct seq_file *m
        cpu = smp_processor_id();
        per_cpu(trace_active, cpu)++;
  
-       arch_spin_lock(&max_stack_lock);
+       arch_spin_lock(&stack_trace_max_lock);
  
        if (*pos == 0)
                return SEQ_START_TOKEN;
@@@ -308,7 -303,7 +314,7 @@@ static void t_stop(struct seq_file *m, 
  {
        int cpu;
  
-       arch_spin_unlock(&max_stack_lock);
+       arch_spin_unlock(&stack_trace_max_lock);
  
        cpu = smp_processor_id();
        per_cpu(trace_active, cpu)--;
@@@ -343,9 -338,9 +349,9 @@@ static int t_show(struct seq_file *m, v
                seq_printf(m, "        Depth    Size   Location"
                           "    (%d entries)\n"
                           "        -----    ----   --------\n",
-                          max_stack_trace.nr_entries);
+                          stack_trace_max.nr_entries);
  
-               if (!stack_tracer_enabled && !max_stack_size)
+               if (!stack_tracer_enabled && !stack_trace_max_size)
                        print_disabled(m);
  
                return 0;
  
        i = *(long *)v;
  
-       if (i >= max_stack_trace.nr_entries ||
+       if (i >= stack_trace_max.nr_entries ||
            stack_dump_trace[i] == ULONG_MAX)
                return 0;
  
-       if (i+1 == max_stack_trace.nr_entries ||
+       if (i+1 == stack_trace_max.nr_entries ||
            stack_dump_trace[i+1] == ULONG_MAX)
-               size = stack_dump_index[i];
+               size = stack_trace_index[i];
        else
-               size = stack_dump_index[i] - stack_dump_index[i+1];
+               size = stack_trace_index[i] - stack_trace_index[i+1];
  
-       seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
+       seq_printf(m, "%3ld) %8d   %5d   ", i, stack_trace_index[i], size);
  
        trace_lookup_stack(m, i);
  
@@@ -453,7 -448,7 +459,7 @@@ static __init int stack_trace_init(void
                return 0;
  
        trace_create_file("stack_max_size", 0644, d_tracer,
-                       &max_stack_size, &stack_max_size_fops);
+                       &stack_trace_max_size, &stack_max_size_fops);
  
        trace_create_file("stack_trace", 0444, d_tracer,
                        NULL, &stack_trace_fops);