Merge tag 'trace-v4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 17 Nov 2017 22:58:01 +0000 (14:58 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 17 Nov 2017 22:58:01 +0000 (14:58 -0800)
Pull tracing updates from

 - allow module init functions to be traced

 - clean up some unused or not used by config events (saves space)

 - clean up of trace histogram code

 - add support for preempt and interrupt enabled/disable events

 - other various clean ups

* tag 'trace-v4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (30 commits)
  tracing, thermal: Hide cpu cooling trace events when not in use
  tracing, thermal: Hide devfreq trace events when not in use
  ftrace: Kill FTRACE_OPS_FL_PER_CPU
  perf/ftrace: Small cleanup
  perf/ftrace: Fix function trace events
  perf/ftrace: Revert ("perf/ftrace: Fix double traces of perf on ftrace:function")
  tracing, dma-buf: Remove unused trace event dma_fence_annotate_wait_on
  tracing, memcg, vmscan: Hide trace events when not in use
  tracing/xen: Hide events that are not used when X86_PAE is not defined
  tracing: mark trace_test_buffer as __maybe_unused
  printk: Remove superfluous memory barriers from printk_safe
  ftrace: Clear hashes of stale ips of init memory
  tracing: Add support for preempt and irq enable/disable events
  tracing: Prepare to add preempt and irq trace events
  ftrace/kallsyms: Have /proc/kallsyms show saved mod init functions
  ftrace: Add freeing algorithm to free ftrace_mod_maps
  ftrace: Save module init functions kallsyms symbols for tracing
  ftrace: Allow module init functions to be traced
  ftrace: Add a ftrace_free_mem() function for modules to use
  tracing: Reimplement log2
  ...

32 files changed:
drivers/dma-buf/dma-fence.c
include/linux/ftrace.h
include/linux/init.h
include/linux/perf_event.h
include/linux/trace_events.h
include/trace/events/dma_fence.h
include/trace/events/preemptirq.h [new file with mode: 0644]
include/trace/events/thermal.h
include/trace/events/vmscan.h
include/trace/events/xen.h
kernel/events/core.c
kernel/kallsyms.c
kernel/module.c
kernel/printk/printk_safe.c
kernel/trace/Kconfig
kernel/trace/Makefile
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_event_perf.c
kernel/trace/trace_events.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_irqsoff.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_probe.c
kernel/trace/trace_probe.h
kernel/trace/trace_selftest.c
kernel/trace/trace_syscalls.c
kernel/trace/trace_uprobe.c
kernel/trace/tracing_map.c
kernel/trace/tracing_map.h

index 9a302799040e4529bc5d08ce7c17343421ea19b6..5d101c4053e05f2fddb8c656d543afb62fecd05b 100644 (file)
@@ -27,7 +27,6 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/dma_fence.h>
 
-EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on);
 EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
 EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
 
index e54d257983f28c4e395d9a7bf871652e7f89c3de..2bab81951ced732fb832f3b92cd2b71e840f9b78 100644 (file)
@@ -52,6 +52,30 @@ static inline void early_trace_init(void) { }
 struct module;
 struct ftrace_hash;
 
+#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
+       defined(CONFIG_DYNAMIC_FTRACE)
+const char *
+ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
+                  unsigned long *off, char **modname, char *sym);
+int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
+                          char *type, char *name,
+                          char *module_name, int *exported);
+#else
+static inline const char *
+ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
+                  unsigned long *off, char **modname, char *sym)
+{
+       return NULL;
+}
+static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
+                                        char *type, char *name,
+                                        char *module_name, int *exported)
+{
+       return -1;
+}
+#endif
+
+
 #ifdef CONFIG_FUNCTION_TRACER
 
 extern int ftrace_enabled;
@@ -79,10 +103,6 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
  * ENABLED - set/unset when ftrace_ops is registered/unregistered
  * DYNAMIC - set when ftrace_ops is registered to denote dynamically
  *           allocated ftrace_ops which need special care
- * PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops
- *           could be controlled by following calls:
- *             ftrace_function_local_enable
- *             ftrace_function_local_disable
  * SAVE_REGS - The ftrace_ops wants regs saved at each function called
  *            and passed to the callback. If this flag is set, but the
  *            architecture does not support passing regs
@@ -126,21 +146,20 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
 enum {
        FTRACE_OPS_FL_ENABLED                   = 1 << 0,
        FTRACE_OPS_FL_DYNAMIC                   = 1 << 1,
-       FTRACE_OPS_FL_PER_CPU                   = 1 << 2,
-       FTRACE_OPS_FL_SAVE_REGS                 = 1 << 3,
-       FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED    = 1 << 4,
-       FTRACE_OPS_FL_RECURSION_SAFE            = 1 << 5,
-       FTRACE_OPS_FL_STUB                      = 1 << 6,
-       FTRACE_OPS_FL_INITIALIZED               = 1 << 7,
-       FTRACE_OPS_FL_DELETED                   = 1 << 8,
-       FTRACE_OPS_FL_ADDING                    = 1 << 9,
-       FTRACE_OPS_FL_REMOVING                  = 1 << 10,
-       FTRACE_OPS_FL_MODIFYING                 = 1 << 11,
-       FTRACE_OPS_FL_ALLOC_TRAMP               = 1 << 12,
-       FTRACE_OPS_FL_IPMODIFY                  = 1 << 13,
-       FTRACE_OPS_FL_PID                       = 1 << 14,
-       FTRACE_OPS_FL_RCU                       = 1 << 15,
-       FTRACE_OPS_FL_TRACE_ARRAY               = 1 << 16,
+       FTRACE_OPS_FL_SAVE_REGS                 = 1 << 2,
+       FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED    = 1 << 3,
+       FTRACE_OPS_FL_RECURSION_SAFE            = 1 << 4,
+       FTRACE_OPS_FL_STUB                      = 1 << 5,
+       FTRACE_OPS_FL_INITIALIZED               = 1 << 6,
+       FTRACE_OPS_FL_DELETED                   = 1 << 7,
+       FTRACE_OPS_FL_ADDING                    = 1 << 8,
+       FTRACE_OPS_FL_REMOVING                  = 1 << 9,
+       FTRACE_OPS_FL_MODIFYING                 = 1 << 10,
+       FTRACE_OPS_FL_ALLOC_TRAMP               = 1 << 11,
+       FTRACE_OPS_FL_IPMODIFY                  = 1 << 12,
+       FTRACE_OPS_FL_PID                       = 1 << 13,
+       FTRACE_OPS_FL_RCU                       = 1 << 14,
+       FTRACE_OPS_FL_TRACE_ARRAY               = 1 << 15,
 };
 
 #ifdef CONFIG_DYNAMIC_FTRACE
@@ -152,8 +171,10 @@ struct ftrace_ops_hash {
 };
 
 void ftrace_free_init_mem(void);
+void ftrace_free_mem(struct module *mod, void *start, void *end);
 #else
 static inline void ftrace_free_init_mem(void) { }
+static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
 #endif
 
 /*
@@ -173,7 +194,6 @@ struct ftrace_ops {
        unsigned long                   flags;
        void                            *private;
        ftrace_func_t                   saved_func;
-       int __percpu                    *disabled;
 #ifdef CONFIG_DYNAMIC_FTRACE
        struct ftrace_ops_hash          local_hash;
        struct ftrace_ops_hash          *func_hash;
@@ -205,55 +225,6 @@ int register_ftrace_function(struct ftrace_ops *ops);
 int unregister_ftrace_function(struct ftrace_ops *ops);
 void clear_ftrace_function(void);
 
-/**
- * ftrace_function_local_enable - enable ftrace_ops on current cpu
- *
- * This function enables tracing on current cpu by decreasing
- * the per cpu control variable.
- * It must be called with preemption disabled and only on ftrace_ops
- * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
- * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
- */
-static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
-{
-       if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
-               return;
-
-       (*this_cpu_ptr(ops->disabled))--;
-}
-
-/**
- * ftrace_function_local_disable - disable ftrace_ops on current cpu
- *
- * This function disables tracing on current cpu by increasing
- * the per cpu control variable.
- * It must be called with preemption disabled and only on ftrace_ops
- * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
- * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
- */
-static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
-{
-       if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
-               return;
-
-       (*this_cpu_ptr(ops->disabled))++;
-}
-
-/**
- * ftrace_function_local_disabled - returns ftrace_ops disabled value
- *                                  on current cpu
- *
- * This function returns value of ftrace_ops::disabled on current cpu.
- * It must be called with preemption disabled and only on ftrace_ops
- * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
- * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
- */
-static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
-{
-       WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU));
-       return *this_cpu_ptr(ops->disabled);
-}
-
 extern void ftrace_stub(unsigned long a0, unsigned long a1,
                        struct ftrace_ops *op, struct pt_regs *regs);
 
@@ -271,6 +242,7 @@ static inline int ftrace_nr_registered_ops(void)
 static inline void clear_ftrace_function(void) { }
 static inline void ftrace_kill(void) { }
 static inline void ftrace_free_init_mem(void) { }
+static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
 #endif /* CONFIG_FUNCTION_TRACER */
 
 #ifdef CONFIG_STACK_TRACER
@@ -743,7 +715,8 @@ static inline unsigned long get_lock_parent_ip(void)
   static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
 #endif
 
-#ifdef CONFIG_PREEMPT_TRACER
+#if defined(CONFIG_PREEMPT_TRACER) || \
+       (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
   extern void trace_preempt_on(unsigned long a0, unsigned long a1);
   extern void trace_preempt_off(unsigned long a0, unsigned long a1);
 #else
index f38b993edacb7c30fd40f3eb9623373aab529397..ea1b31101d9e32fad9a134d6f1444d6f68bd6b73 100644 (file)
@@ -40,7 +40,7 @@
 
 /* These are for everybody (although not all archs will actually
    discard it in modules) */
-#define __init         __section(.init.text) __cold __inittrace __latent_entropy
+#define __init         __section(.init.text) __cold  __latent_entropy
 #define __initdata     __section(.init.data)
 #define __initconst    __section(.init.rodata)
 #define __exitdata     __section(.exit.data)
 
 #ifdef MODULE
 #define __exitused
-#define __inittrace notrace
 #else
 #define __exitused  __used
-#define __inittrace
 #endif
 
 #define __exit          __section(.exit.text) __exitused __cold notrace
index 874b71a700586340439e34ea73a0dc5d14695bba..2c9c87d8a0c18e5f5c1cf2a8e148504e4f3ad3a9 100644 (file)
@@ -1169,7 +1169,7 @@ extern void perf_event_init(void);
 extern void perf_tp_event(u16 event_type, u64 count, void *record,
                          int entry_size, struct pt_regs *regs,
                          struct hlist_head *head, int rctx,
-                         struct task_struct *task, struct perf_event *event);
+                         struct task_struct *task);
 extern void perf_bp_event(struct perf_event *event, void *data);
 
 #ifndef perf_misc_flags
index 84014ecfa67ff284fc6b657e565e57ea12d9c89e..af44e7c2d577e8f0bd7c50f12ca70deeb4714dcc 100644 (file)
@@ -174,6 +174,11 @@ enum trace_reg {
        TRACE_REG_PERF_UNREGISTER,
        TRACE_REG_PERF_OPEN,
        TRACE_REG_PERF_CLOSE,
+       /*
+        * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a
+        * custom action was taken and the default action is not to be
+        * performed.
+        */
        TRACE_REG_PERF_ADD,
        TRACE_REG_PERF_DEL,
 #endif
@@ -542,9 +547,9 @@ void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
 static inline void
 perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
                       u64 count, struct pt_regs *regs, void *head,
-                      struct task_struct *task, struct perf_event *event)
+                      struct task_struct *task)
 {
-       perf_tp_event(type, count, raw_data, size, regs, head, rctx, task, event);
+       perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
 }
 
 #endif
index d61bfddcc621f16ac4ca4a85348c483bf3f30d44..2212adda8f77f7d8cb44b0bdb0b22445b1fcb87d 100644 (file)
@@ -9,46 +9,6 @@
 
 struct dma_fence;
 
-TRACE_EVENT(dma_fence_annotate_wait_on,
-
-       /* fence: the fence waiting on f1, f1: the fence to be waited on. */
-       TP_PROTO(struct dma_fence *fence, struct dma_fence *f1),
-
-       TP_ARGS(fence, f1),
-
-       TP_STRUCT__entry(
-               __string(driver, fence->ops->get_driver_name(fence))
-               __string(timeline, fence->ops->get_timeline_name(fence))
-               __field(unsigned int, context)
-               __field(unsigned int, seqno)
-
-               __string(waiting_driver, f1->ops->get_driver_name(f1))
-               __string(waiting_timeline, f1->ops->get_timeline_name(f1))
-               __field(unsigned int, waiting_context)
-               __field(unsigned int, waiting_seqno)
-       ),
-
-       TP_fast_assign(
-               __assign_str(driver, fence->ops->get_driver_name(fence))
-               __assign_str(timeline, fence->ops->get_timeline_name(fence))
-               __entry->context = fence->context;
-               __entry->seqno = fence->seqno;
-
-               __assign_str(waiting_driver, f1->ops->get_driver_name(f1))
-               __assign_str(waiting_timeline, f1->ops->get_timeline_name(f1))
-               __entry->waiting_context = f1->context;
-               __entry->waiting_seqno = f1->seqno;
-
-       ),
-
-       TP_printk("driver=%s timeline=%s context=%u seqno=%u "  \
-                 "waits on driver=%s timeline=%s context=%u seqno=%u",
-                 __get_str(driver), __get_str(timeline), __entry->context,
-                 __entry->seqno,
-                 __get_str(waiting_driver), __get_str(waiting_timeline),
-                 __entry->waiting_context, __entry->waiting_seqno)
-);
-
 DECLARE_EVENT_CLASS(dma_fence,
 
        TP_PROTO(struct dma_fence *fence),
diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h
new file mode 100644 (file)
index 0000000..f5024c5
--- /dev/null
@@ -0,0 +1,70 @@
+#ifdef CONFIG_PREEMPTIRQ_EVENTS
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM preemptirq
+
+#if !defined(_TRACE_PREEMPTIRQ_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PREEMPTIRQ_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+#include <linux/string.h>
+#include <asm/sections.h>
+
+DECLARE_EVENT_CLASS(preemptirq_template,
+
+       TP_PROTO(unsigned long ip, unsigned long parent_ip),
+
+       TP_ARGS(ip, parent_ip),
+
+       TP_STRUCT__entry(
+               __field(u32, caller_offs)
+               __field(u32, parent_offs)
+       ),
+
+       TP_fast_assign(
+               __entry->caller_offs = (u32)(ip - (unsigned long)_stext);
+               __entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext);
+       ),
+
+       TP_printk("caller=%pF parent=%pF",
+                 (void *)((unsigned long)(_stext) + __entry->caller_offs),
+                 (void *)((unsigned long)(_stext) + __entry->parent_offs))
+);
+
+#ifndef CONFIG_PROVE_LOCKING
+DEFINE_EVENT(preemptirq_template, irq_disable,
+            TP_PROTO(unsigned long ip, unsigned long parent_ip),
+            TP_ARGS(ip, parent_ip));
+
+DEFINE_EVENT(preemptirq_template, irq_enable,
+            TP_PROTO(unsigned long ip, unsigned long parent_ip),
+            TP_ARGS(ip, parent_ip));
+#endif
+
+#ifdef CONFIG_DEBUG_PREEMPT
+DEFINE_EVENT(preemptirq_template, preempt_disable,
+            TP_PROTO(unsigned long ip, unsigned long parent_ip),
+            TP_ARGS(ip, parent_ip));
+
+DEFINE_EVENT(preemptirq_template, preempt_enable,
+            TP_PROTO(unsigned long ip, unsigned long parent_ip),
+            TP_ARGS(ip, parent_ip));
+#endif
+
+#endif /* _TRACE_PREEMPTIRQ_H */
+
+#include <trace/define_trace.h>
+
+#else /* !CONFIG_PREEMPTIRQ_EVENTS */
+
+#define trace_irq_enable(...)
+#define trace_irq_disable(...)
+#define trace_preempt_enable(...)
+#define trace_preempt_disable(...)
+#define trace_irq_enable_rcuidle(...)
+#define trace_irq_disable_rcuidle(...)
+#define trace_preempt_enable_rcuidle(...)
+#define trace_preempt_disable_rcuidle(...)
+
+#endif
index 466c09d882ad3e928447f687b39bebb139f0821f..78946640fe03962c7207dec66844a895c0481f1d 100644 (file)
@@ -91,6 +91,7 @@ TRACE_EVENT(thermal_zone_trip,
                show_tzt_type(__entry->trip_type))
 );
 
+#ifdef CONFIG_CPU_THERMAL
 TRACE_EVENT(thermal_power_cpu_get_power,
        TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load,
                size_t load_len, u32 dynamic_power, u32 static_power),
@@ -148,7 +149,9 @@ TRACE_EVENT(thermal_power_cpu_limit,
                __get_bitmask(cpumask), __entry->freq, __entry->cdev_state,
                __entry->power)
 );
+#endif /* CONFIG_CPU_THERMAL */
 
+#ifdef CONFIG_DEVFREQ_THERMAL
 TRACE_EVENT(thermal_power_devfreq_get_power,
        TP_PROTO(struct thermal_cooling_device *cdev,
                 struct devfreq_dev_status *status, unsigned long freq,
@@ -204,6 +207,7 @@ TRACE_EVENT(thermal_power_devfreq_limit,
                __get_str(type), __entry->freq, __entry->cdev_state,
                __entry->power)
 );
+#endif /* CONFIG_DEVFREQ_THERMAL */
 #endif /* _TRACE_THERMAL_H */
 
 /* This part must be outside protection */
index dc23cf03240348f54df97966bf9372d2684bc249..d70b53e65f4323cdb51dc41c18b890f2ae7e6f25 100644 (file)
@@ -134,6 +134,7 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_direct_reclaim_b
        TP_ARGS(order, may_writepage, gfp_flags, classzone_idx)
 );
 
+#ifdef CONFIG_MEMCG
 DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin,
 
        TP_PROTO(int order, int may_writepage, gfp_t gfp_flags, int classzone_idx),
@@ -147,6 +148,7 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_softlimit_
 
        TP_ARGS(order, may_writepage, gfp_flags, classzone_idx)
 );
+#endif /* CONFIG_MEMCG */
 
 DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_end_template,
 
@@ -172,6 +174,7 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_direct_reclaim_end
        TP_ARGS(nr_reclaimed)
 );
 
+#ifdef CONFIG_MEMCG
 DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_reclaim_end,
 
        TP_PROTO(unsigned long nr_reclaimed),
@@ -185,6 +188,7 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_re
 
        TP_ARGS(nr_reclaimed)
 );
+#endif /* CONFIG_MEMCG */
 
 TRACE_EVENT(mm_shrink_slab_start,
        TP_PROTO(struct shrinker *shr, struct shrink_control *sc,
index a7c8b452aab9c7fefaa3908a31d00214b24d5717..b8adf05c534e725d1e0e3b614181b008e2488f65 100644 (file)
@@ -148,7 +148,6 @@ DECLARE_EVENT_CLASS(xen_mmu__set_pte,
                     TP_ARGS(ptep, pteval))
 
 DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte);
-DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic);
 
 TRACE_EVENT(xen_mmu_set_pte_at,
            TP_PROTO(struct mm_struct *mm, unsigned long addr,
@@ -170,21 +169,6 @@ TRACE_EVENT(xen_mmu_set_pte_at,
                      (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
        );
 
-TRACE_EVENT(xen_mmu_pte_clear,
-           TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
-           TP_ARGS(mm, addr, ptep),
-           TP_STRUCT__entry(
-                   __field(struct mm_struct *, mm)
-                   __field(unsigned long, addr)
-                   __field(pte_t *, ptep)
-                   ),
-           TP_fast_assign(__entry->mm = mm;
-                          __entry->addr = addr;
-                          __entry->ptep = ptep),
-           TP_printk("mm %p addr %lx ptep %p",
-                     __entry->mm, __entry->addr, __entry->ptep)
-       );
-
 TRACE_DEFINE_SIZEOF(pmdval_t);
 
 TRACE_EVENT(xen_mmu_set_pmd,
@@ -202,6 +186,24 @@ TRACE_EVENT(xen_mmu_set_pmd,
                      (int)sizeof(pmdval_t) * 2, (unsigned long long)__entry->pmdval)
        );
 
+#ifdef CONFIG_X86_PAE
+DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic);
+
+TRACE_EVENT(xen_mmu_pte_clear,
+           TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
+           TP_ARGS(mm, addr, ptep),
+           TP_STRUCT__entry(
+                   __field(struct mm_struct *, mm)
+                   __field(unsigned long, addr)
+                   __field(pte_t *, ptep)
+                   ),
+           TP_fast_assign(__entry->mm = mm;
+                          __entry->addr = addr;
+                          __entry->ptep = ptep),
+           TP_printk("mm %p addr %lx ptep %p",
+                     __entry->mm, __entry->addr, __entry->ptep)
+       );
+
 TRACE_EVENT(xen_mmu_pmd_clear,
            TP_PROTO(pmd_t *pmdp),
            TP_ARGS(pmdp),
@@ -211,6 +213,7 @@ TRACE_EVENT(xen_mmu_pmd_clear,
            TP_fast_assign(__entry->pmdp = pmdp),
            TP_printk("pmdp %p", __entry->pmdp)
        );
+#endif
 
 #if CONFIG_PGTABLE_LEVELS >= 4
 
index 3939a4674e0ae48395f290edc20d61f4c307c89b..9404c631bd3f9be1d0f501acc353d181c1fdbd40 100644 (file)
@@ -7874,15 +7874,16 @@ void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
                }
        }
        perf_tp_event(call->event.type, count, raw_data, size, regs, head,
-                     rctx, task, NULL);
+                     rctx, task);
 }
 EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
 
 void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
                   struct pt_regs *regs, struct hlist_head *head, int rctx,
-                  struct task_struct *task, struct perf_event *event)
+                  struct task_struct *task)
 {
        struct perf_sample_data data;
+       struct perf_event *event;
 
        struct perf_raw_record raw = {
                .frag = {
@@ -7896,15 +7897,9 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
 
        perf_trace_buf_update(record, event_type);
 
-       /* Use the given event instead of the hlist */
-       if (event) {
+       hlist_for_each_entry_rcu(event, head, hlist_entry) {
                if (perf_tp_event_match(event, &data, regs))
                        perf_swevent_event(event, count, &data, regs);
-       } else {
-               hlist_for_each_entry_rcu(event, head, hlist_entry) {
-                       if (perf_tp_event_match(event, &data, regs))
-                               perf_swevent_event(event, count, &data, regs);
-               }
        }
 
        /*
index 1e6ae66c6244329fa21f6190b1ab9d088493bfd5..531ffa984bc262716b3412f5cfc2476c448d379f 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/ctype.h>
 #include <linux/slab.h>
 #include <linux/filter.h>
+#include <linux/ftrace.h>
 #include <linux/compiler.h>
 
 #include <asm/sections.h>
@@ -337,6 +338,10 @@ const char *kallsyms_lookup(unsigned long addr,
        if (!ret)
                ret = bpf_address_lookup(addr, symbolsize,
                                         offset, modname, namebuf);
+
+       if (!ret)
+               ret = ftrace_mod_address_lookup(addr, symbolsize,
+                                               offset, modname, namebuf);
        return ret;
 }
 
@@ -474,6 +479,7 @@ EXPORT_SYMBOL(__print_symbol);
 struct kallsym_iter {
        loff_t pos;
        loff_t pos_mod_end;
+       loff_t pos_ftrace_mod_end;
        unsigned long value;
        unsigned int nameoff; /* If iterating in core kernel symbols. */
        char type;
@@ -497,11 +503,25 @@ static int get_ksymbol_mod(struct kallsym_iter *iter)
        return 1;
 }
 
+static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter)
+{
+       int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end,
+                                        &iter->value, &iter->type,
+                                        iter->name, iter->module_name,
+                                        &iter->exported);
+       if (ret < 0) {
+               iter->pos_ftrace_mod_end = iter->pos;
+               return 0;
+       }
+
+       return 1;
+}
+
 static int get_ksymbol_bpf(struct kallsym_iter *iter)
 {
        iter->module_name[0] = '\0';
        iter->exported = 0;
-       return bpf_get_kallsym(iter->pos - iter->pos_mod_end,
+       return bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end,
                               &iter->value, &iter->type,
                               iter->name) < 0 ? 0 : 1;
 }
@@ -526,20 +546,31 @@ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
        iter->name[0] = '\0';
        iter->nameoff = get_symbol_offset(new_pos);
        iter->pos = new_pos;
-       if (new_pos == 0)
+       if (new_pos == 0) {
                iter->pos_mod_end = 0;
+               iter->pos_ftrace_mod_end = 0;
+       }
 }
 
 static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
 {
        iter->pos = pos;
 
-       if (iter->pos_mod_end > 0 &&
-           iter->pos_mod_end < iter->pos)
+       if (iter->pos_ftrace_mod_end > 0 &&
+           iter->pos_ftrace_mod_end < iter->pos)
                return get_ksymbol_bpf(iter);
 
-       if (!get_ksymbol_mod(iter))
-               return get_ksymbol_bpf(iter);
+       if (iter->pos_mod_end > 0 &&
+           iter->pos_mod_end < iter->pos) {
+               if (!get_ksymbol_ftrace_mod(iter))
+                       return get_ksymbol_bpf(iter);
+               return 1;
+       }
+
+       if (!get_ksymbol_mod(iter)) {
+               if (!get_ksymbol_ftrace_mod(iter))
+                       return get_ksymbol_bpf(iter);
+       }
 
        return 1;
 }
index 222aba4aa960a947488afa52e21c11a3d47d457a..f0411a27176552a2a172c534a84c37e10c76705d 100644 (file)
@@ -3481,6 +3481,8 @@ static noinline int do_init_module(struct module *mod)
        if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC))
                async_synchronize_full();
 
+       ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base +
+                       mod->init_layout.size);
        mutex_lock(&module_mutex);
        /* Drop initial reference. */
        module_put(mod);
index 3cdaeaef9ce1a63bfde3bc631946e40ed731e035..724d9292d4b9614e62b78e7e0c27e0e8fb0dd49c 100644 (file)
@@ -39,7 +39,7 @@
  * There are situations when we want to make sure that all buffers
  * were handled or when IRQs are blocked.
  */
-static int printk_safe_irq_ready;
+static int printk_safe_irq_ready __read_mostly;
 
 #define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) -    \
                                sizeof(atomic_t) -                      \
@@ -63,11 +63,8 @@ static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
 /* Get flushed in a more safe context. */
 static void queue_flush_work(struct printk_safe_seq_buf *s)
 {
-       if (printk_safe_irq_ready) {
-               /* Make sure that IRQ work is really initialized. */
-               smp_rmb();
+       if (printk_safe_irq_ready)
                irq_work_queue(&s->work);
-       }
 }
 
 /*
@@ -398,8 +395,12 @@ void __init printk_safe_init(void)
 #endif
        }
 
-       /* Make sure that IRQ works are initialized before enabling. */
-       smp_wmb();
+       /*
+        * In the highly unlikely event that a NMI were to trigger at
+        * this moment. Make sure IRQ work is set up before this
+        * variable is set.
+        */
+       barrier();
        printk_safe_irq_ready = 1;
 
        /* Flush pending messages that did not have scheduled IRQ works. */
index f54b7b6b4a4bcbe50b207a2a40d8d64bf5acd12c..af7dad126c13cecbe73f5d797778f005b5838377 100644 (file)
@@ -160,6 +160,17 @@ config FUNCTION_GRAPH_TRACER
          address on the current task structure into a stack of calls.
 
 
+config PREEMPTIRQ_EVENTS
+       bool "Enable trace events for preempt and irq disable/enable"
+       select TRACE_IRQFLAGS
+       depends on DEBUG_PREEMPT || !PROVE_LOCKING
+       default n
+       help
+         Enable tracing of disable and enable events for preemption and irqs.
+         For tracing preempt disable/enable events, DEBUG_PREEMPT must be
+         enabled. For tracing irq disable/enable events, PROVE_LOCKING must
+         be disabled.
+
 config IRQSOFF_TRACER
        bool "Interrupts-off Latency Tracer"
        default n
index 19a15b2f119010f5dd38d2dfe99ff7d2eb81a5f0..e2538c7638d44d635542382a11df6d803d6af1c5 100644 (file)
@@ -35,6 +35,7 @@ obj-$(CONFIG_TRACING) += trace_printk.o
 obj-$(CONFIG_TRACING_MAP) += tracing_map.o
 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
 obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
+obj-$(CONFIG_PREEMPTIRQ_EVENTS) += trace_irqsoff.o
 obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
index 8319e09e15b945f14f9046edeb885e173ef26652..ccdf3664e4a9a7f6a79423dec807cd9f0c3ecd6f 100644 (file)
@@ -203,30 +203,6 @@ void clear_ftrace_function(void)
        ftrace_trace_function = ftrace_stub;
 }
 
-static void per_cpu_ops_disable_all(struct ftrace_ops *ops)
-{
-       int cpu;
-
-       for_each_possible_cpu(cpu)
-               *per_cpu_ptr(ops->disabled, cpu) = 1;
-}
-
-static int per_cpu_ops_alloc(struct ftrace_ops *ops)
-{
-       int __percpu *disabled;
-
-       if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
-               return -EINVAL;
-
-       disabled = alloc_percpu(int);
-       if (!disabled)
-               return -ENOMEM;
-
-       ops->disabled = disabled;
-       per_cpu_ops_disable_all(ops);
-       return 0;
-}
-
 static void ftrace_sync(struct work_struct *work)
 {
        /*
@@ -262,8 +238,8 @@ static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
         * If this is a dynamic, RCU, or per CPU ops, or we force list func,
         * then it needs to call the list anyway.
         */
-       if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU |
-                         FTRACE_OPS_FL_RCU) || FTRACE_FORCE_LIST_FUNC)
+       if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
+           FTRACE_FORCE_LIST_FUNC)
                return ftrace_ops_list_func;
 
        return ftrace_ops_get_func(ops);
@@ -422,11 +398,6 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
        if (!core_kernel_data((unsigned long)ops))
                ops->flags |= FTRACE_OPS_FL_DYNAMIC;
 
-       if (ops->flags & FTRACE_OPS_FL_PER_CPU) {
-               if (per_cpu_ops_alloc(ops))
-                       return -ENOMEM;
-       }
-
        add_ftrace_ops(&ftrace_ops_list, ops);
 
        /* Always save the function, and reset at unregistering */
@@ -2727,11 +2698,6 @@ void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
 {
 }
 
-static void per_cpu_ops_free(struct ftrace_ops *ops)
-{
-       free_percpu(ops->disabled);
-}
-
 static void ftrace_startup_enable(int command)
 {
        if (saved_ftrace_func != ftrace_trace_function) {
@@ -2833,7 +2799,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
                 * not currently active, we can just free them
                 * without synchronizing all CPUs.
                 */
-               if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU))
+               if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
                        goto free_ops;
 
                return 0;
@@ -2880,7 +2846,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
         * The same goes for freeing the per_cpu data of the per_cpu
         * ops.
         */
-       if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) {
+       if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
                /*
                 * We need to do a hard force of sched synchronization.
                 * This is because we use preempt_disable() to do RCU, but
@@ -2903,9 +2869,6 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
 
  free_ops:
                arch_ftrace_trampoline_free(ops);
-
-               if (ops->flags & FTRACE_OPS_FL_PER_CPU)
-                       per_cpu_ops_free(ops);
        }
 
        return 0;
@@ -5672,10 +5635,29 @@ static int ftrace_process_locs(struct module *mod,
        return ret;
 }
 
+struct ftrace_mod_func {
+       struct list_head        list;
+       char                    *name;
+       unsigned long           ip;
+       unsigned int            size;
+};
+
+struct ftrace_mod_map {
+       struct rcu_head         rcu;
+       struct list_head        list;
+       struct module           *mod;
+       unsigned long           start_addr;
+       unsigned long           end_addr;
+       struct list_head        funcs;
+       unsigned int            num_funcs;
+};
+
 #ifdef CONFIG_MODULES
 
 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
 
+static LIST_HEAD(ftrace_mod_maps);
+
 static int referenced_filters(struct dyn_ftrace *rec)
 {
        struct ftrace_ops *ops;
@@ -5729,8 +5711,26 @@ static void clear_mod_from_hashes(struct ftrace_page *pg)
        mutex_unlock(&trace_types_lock);
 }
 
+static void ftrace_free_mod_map(struct rcu_head *rcu)
+{
+       struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
+       struct ftrace_mod_func *mod_func;
+       struct ftrace_mod_func *n;
+
+       /* All the contents of mod_map are now not visible to readers */
+       list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
+               kfree(mod_func->name);
+               list_del(&mod_func->list);
+               kfree(mod_func);
+       }
+
+       kfree(mod_map);
+}
+
 void ftrace_release_mod(struct module *mod)
 {
+       struct ftrace_mod_map *mod_map;
+       struct ftrace_mod_map *n;
        struct dyn_ftrace *rec;
        struct ftrace_page **last_pg;
        struct ftrace_page *tmp_page = NULL;
@@ -5742,6 +5742,14 @@ void ftrace_release_mod(struct module *mod)
        if (ftrace_disabled)
                goto out_unlock;
 
+       list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
+               if (mod_map->mod == mod) {
+                       list_del_rcu(&mod_map->list);
+                       call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map);
+                       break;
+               }
+       }
+
        /*
         * Each module has its own ftrace_pages, remove
         * them from the list.
@@ -5749,7 +5757,8 @@ void ftrace_release_mod(struct module *mod)
        last_pg = &ftrace_pages_start;
        for (pg = ftrace_pages_start; pg; pg = *last_pg) {
                rec = &pg->records[0];
-               if (within_module_core(rec->ip, mod)) {
+               if (within_module_core(rec->ip, mod) ||
+                   within_module_init(rec->ip, mod)) {
                        /*
                         * As core pages are first, the first
                         * page should never be a module page.
@@ -5818,7 +5827,8 @@ void ftrace_module_enable(struct module *mod)
                 * not part of this module, then skip this pg,
                 * which the "break" will do.
                 */
-               if (!within_module_core(rec->ip, mod))
+               if (!within_module_core(rec->ip, mod) &&
+                   !within_module_init(rec->ip, mod))
                        break;
 
                cnt = 0;
@@ -5863,23 +5873,245 @@ void ftrace_module_init(struct module *mod)
        ftrace_process_locs(mod, mod->ftrace_callsites,
                            mod->ftrace_callsites + mod->num_ftrace_callsites);
 }
+
+static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
+                               struct dyn_ftrace *rec)
+{
+       struct ftrace_mod_func *mod_func;
+       unsigned long symsize;
+       unsigned long offset;
+       char str[KSYM_SYMBOL_LEN];
+       char *modname;
+       const char *ret;
+
+       ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
+       if (!ret)
+               return;
+
+       mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
+       if (!mod_func)
+               return;
+
+       mod_func->name = kstrdup(str, GFP_KERNEL);
+       if (!mod_func->name) {
+               kfree(mod_func);
+               return;
+       }
+
+       mod_func->ip = rec->ip - offset;
+       mod_func->size = symsize;
+
+       mod_map->num_funcs++;
+
+       list_add_rcu(&mod_func->list, &mod_map->funcs);
+}
+
+static struct ftrace_mod_map *
+allocate_ftrace_mod_map(struct module *mod,
+                       unsigned long start, unsigned long end)
+{
+       struct ftrace_mod_map *mod_map;
+
+       mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
+       if (!mod_map)
+               return NULL;
+
+       mod_map->mod = mod;
+       mod_map->start_addr = start;
+       mod_map->end_addr = end;
+       mod_map->num_funcs = 0;
+
+       INIT_LIST_HEAD_RCU(&mod_map->funcs);
+
+       list_add_rcu(&mod_map->list, &ftrace_mod_maps);
+
+       return mod_map;
+}
+
+static const char *
+ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
+                          unsigned long addr, unsigned long *size,
+                          unsigned long *off, char *sym)
+{
+       struct ftrace_mod_func *found_func =  NULL;
+       struct ftrace_mod_func *mod_func;
+
+       list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
+               if (addr >= mod_func->ip &&
+                   addr < mod_func->ip + mod_func->size) {
+                       found_func = mod_func;
+                       break;
+               }
+       }
+
+       if (found_func) {
+               if (size)
+                       *size = found_func->size;
+               if (off)
+                       *off = addr - found_func->ip;
+               if (sym)
+                       strlcpy(sym, found_func->name, KSYM_NAME_LEN);
+
+               return found_func->name;
+       }
+
+       return NULL;
+}
+
+const char *
+ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
+                  unsigned long *off, char **modname, char *sym)
+{
+       struct ftrace_mod_map *mod_map;
+       const char *ret = NULL;
+
+       /* mod_map is freed via call_rcu_sched() */
+       preempt_disable();
+       list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
+               ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
+               if (ret) {
+                       if (modname)
+                               *modname = mod_map->mod->name;
+                       break;
+               }
+       }
+       preempt_enable();
+
+       return ret;
+}
+
+int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
+                          char *type, char *name,
+                          char *module_name, int *exported)
+{
+       struct ftrace_mod_map *mod_map;
+       struct ftrace_mod_func *mod_func;
+
+       preempt_disable();
+       list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
+
+               if (symnum >= mod_map->num_funcs) {
+                       symnum -= mod_map->num_funcs;
+                       continue;
+               }
+
+               list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
+                       if (symnum > 1) {
+                               symnum--;
+                               continue;
+                       }
+
+                       *value = mod_func->ip;
+                       *type = 'T';
+                       strlcpy(name, mod_func->name, KSYM_NAME_LEN);
+                       strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
+                       *exported = 1;
+                       preempt_enable();
+                       return 0;
+               }
+               WARN_ON(1);
+               break;
+       }
+       preempt_enable();
+       return -ERANGE;
+}
+
+#else
+static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
+                               struct dyn_ftrace *rec) { }
+static inline struct ftrace_mod_map *
+allocate_ftrace_mod_map(struct module *mod,
+                       unsigned long start, unsigned long end)
+{
+       return NULL;
+}
 #endif /* CONFIG_MODULES */
 
-void __init ftrace_free_init_mem(void)
+struct ftrace_init_func {
+       struct list_head list;
+       unsigned long ip;
+};
+
+/* Clear any init ips from hashes */
+static void
+clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
+{
+       struct ftrace_func_entry *entry;
+
+       if (ftrace_hash_empty(hash))
+               return;
+
+       entry = __ftrace_lookup_ip(hash, func->ip);
+
+       /*
+        * Do not allow this rec to match again.
+        * Yeah, it may waste some memory, but will be removed
+        * if/when the hash is modified again.
+        */
+       if (entry)
+               entry->ip = 0;
+}
+
+static void
+clear_func_from_hashes(struct ftrace_init_func *func)
+{
+       struct trace_array *tr;
+
+       mutex_lock(&trace_types_lock);
+       list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+               if (!tr->ops || !tr->ops->func_hash)
+                       continue;
+               mutex_lock(&tr->ops->func_hash->regex_lock);
+               clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
+               clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
+               mutex_unlock(&tr->ops->func_hash->regex_lock);
+       }
+       mutex_unlock(&trace_types_lock);
+}
+
+static void add_to_clear_hash_list(struct list_head *clear_list,
+                                  struct dyn_ftrace *rec)
+{
+       struct ftrace_init_func *func;
+
+       func = kmalloc(sizeof(*func), GFP_KERNEL);
+       if (!func) {
+               WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n");
+               return;
+       }
+
+       func->ip = rec->ip;
+       list_add(&func->list, clear_list);
+}
+
+void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
 {
-       unsigned long start = (unsigned long)(&__init_begin);
-       unsigned long end = (unsigned long)(&__init_end);
+       unsigned long start = (unsigned long)(start_ptr);
+       unsigned long end = (unsigned long)(end_ptr);
        struct ftrace_page **last_pg = &ftrace_pages_start;
        struct ftrace_page *pg;
        struct dyn_ftrace *rec;
        struct dyn_ftrace key;
+       struct ftrace_mod_map *mod_map = NULL;
+       struct ftrace_init_func *func, *func_next;
+       struct list_head clear_hash;
        int order;
 
+       INIT_LIST_HEAD(&clear_hash);
+
        key.ip = start;
        key.flags = end;        /* overload flags, as it is unsigned long */
 
        mutex_lock(&ftrace_lock);
 
+       /*
+        * If we are freeing module init memory, then check if
+        * any tracer is active. If so, we need to save a mapping of
+        * the module functions being freed with the address.
+        */
+       if (mod && ftrace_ops_list != &ftrace_list_end)
+               mod_map = allocate_ftrace_mod_map(mod, start, end);
+
        for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
                if (end < pg->records[0].ip ||
                    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
@@ -5890,6 +6122,13 @@ void __init ftrace_free_init_mem(void)
                              ftrace_cmp_recs);
                if (!rec)
                        continue;
+
+               /* rec will be cleared from hashes after ftrace_lock unlock */
+               add_to_clear_hash_list(&clear_hash, rec);
+
+               if (mod_map)
+                       save_ftrace_mod_rec(mod_map, rec);
+
                pg->index--;
                ftrace_update_tot_cnt--;
                if (!pg->index) {
@@ -5908,6 +6147,19 @@ void __init ftrace_free_init_mem(void)
                goto again;
        }
        mutex_unlock(&ftrace_lock);
+
+       list_for_each_entry_safe(func, func_next, &clear_hash, list) {
+               clear_func_from_hashes(func);
+               kfree(func);
+       }
+}
+
+void __init ftrace_free_init_mem(void)
+{
+       void *start = (void *)(&__init_begin);
+       void *end = (void *)(&__init_end);
+
+       ftrace_free_mem(NULL, start, end);
 }
 
 void __init ftrace_init(void)
@@ -6063,10 +6315,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
                 * If any of the above fails then the op->func() is not executed.
                 */
                if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
-                   (!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
-                    !ftrace_function_local_disabled(op)) &&
                    ftrace_ops_test(op, ip, regs)) {
-                   
                        if (FTRACE_WARN_ON(!op->func)) {
                                pr_warn("op=%p %pS\n", op, op);
                                goto out;
@@ -6124,10 +6373,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
 
        preempt_disable_notrace();
 
-       if (!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
-           !ftrace_function_local_disabled(op)) {
-               op->func(ip, parent_ip, op, regs);
-       }
+       op->func(ip, parent_ip, op, regs);
 
        preempt_enable_notrace();
        trace_clear_recursion(bit);
@@ -6151,7 +6397,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
         * or does per cpu logic, then we need to call the assist handler.
         */
        if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
-           ops->flags & (FTRACE_OPS_FL_RCU | FTRACE_OPS_FL_PER_CPU))
+           ops->flags & FTRACE_OPS_FL_RCU)
                return ftrace_ops_assist_func;
 
        return ops->func;
index d57fede84b3803c15bfa4eb0324bbea50f4bdd10..91874a95060de5de11aa47d3fbddb8c4980a0da8 100644 (file)
@@ -2536,61 +2536,29 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
  * The lock and unlock are done within a preempt disable section.
  * The current_context per_cpu variable can only be modified
  * by the current task between lock and unlock. But it can
- * be modified more than once via an interrupt. To pass this
- * information from the lock to the unlock without having to
- * access the 'in_interrupt()' functions again (which do show
- * a bit of overhead in something as critical as function tracing,
- * we use a bitmask trick.
+ * be modified more than once via an interrupt. There are four
+ * different contexts that we need to consider.
  *
- *  bit 0 =  NMI context
- *  bit 1 =  IRQ context
- *  bit 2 =  SoftIRQ context
- *  bit 3 =  normal context.
+ *  Normal context.
+ *  SoftIRQ context
+ *  IRQ context
+ *  NMI context
  *
- * This works because this is the order of contexts that can
- * preempt other contexts. A SoftIRQ never preempts an IRQ
- * context.
- *
- * When the context is determined, the corresponding bit is
- * checked and set (if it was set, then a recursion of that context
- * happened).
- *
- * On unlock, we need to clear this bit. To do so, just subtract
- * 1 from the current_context and AND it to itself.
- *
- * (binary)
- *  101 - 1 = 100
- *  101 & 100 = 100 (clearing bit zero)
- *
- *  1010 - 1 = 1001
- *  1010 & 1001 = 1000 (clearing bit 1)
- *
- * The least significant bit can be cleared this way, and it
- * just so happens that it is the same bit corresponding to
- * the current context.
+ * If for some reason the ring buffer starts to recurse, we
+ * only allow that to happen at most 4 times (one for each
+ * context). If it happens 5 times, then we consider this a
+ * recusive loop and do not let it go further.
  */
 
 static __always_inline int
 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
 {
-       unsigned int val = cpu_buffer->current_context;
-       int bit;
-
-       if (in_interrupt()) {
-               if (in_nmi())
-                       bit = RB_CTX_NMI;
-               else if (in_irq())
-                       bit = RB_CTX_IRQ;
-               else
-                       bit = RB_CTX_SOFTIRQ;
-       } else
-               bit = RB_CTX_NORMAL;
-
-       if (unlikely(val & (1 << bit)))
+       if (cpu_buffer->current_context >= 4)
                return 1;
 
-       val |= (1 << bit);
-       cpu_buffer->current_context = val;
+       cpu_buffer->current_context++;
+       /* Interrupts must see this update */
+       barrier();
 
        return 0;
 }
@@ -2598,7 +2566,9 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
 static __always_inline void
 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
 {
-       cpu_buffer->current_context &= cpu_buffer->current_context - 1;
+       /* Don't let the dec leak out */
+       barrier();
+       cpu_buffer->current_context--;
 }
 
 /**
index 752e5daf0896fc529876f8801a95e23118713338..73e67b68c53b47d5b422970cd0dee1d0bec27002 100644 (file)
@@ -7687,6 +7687,7 @@ static int instance_mkdir(const char *name)
        struct trace_array *tr;
        int ret;
 
+       mutex_lock(&event_mutex);
        mutex_lock(&trace_types_lock);
 
        ret = -EEXIST;
@@ -7742,6 +7743,7 @@ static int instance_mkdir(const char *name)
        list_add(&tr->list, &ftrace_trace_arrays);
 
        mutex_unlock(&trace_types_lock);
+       mutex_unlock(&event_mutex);
 
        return 0;
 
@@ -7753,6 +7755,7 @@ static int instance_mkdir(const char *name)
 
  out_unlock:
        mutex_unlock(&trace_types_lock);
+       mutex_unlock(&event_mutex);
 
        return ret;
 
@@ -7765,6 +7768,7 @@ static int instance_rmdir(const char *name)
        int ret;
        int i;
 
+       mutex_lock(&event_mutex);
        mutex_lock(&trace_types_lock);
 
        ret = -ENODEV;
@@ -7810,6 +7814,7 @@ static int instance_rmdir(const char *name)
 
  out_unlock:
        mutex_unlock(&trace_types_lock);
+       mutex_unlock(&event_mutex);
 
        return ret;
 }
@@ -8276,6 +8281,92 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
 }
 EXPORT_SYMBOL_GPL(ftrace_dump);
 
+int trace_run_command(const char *buf, int (*createfn)(int, char **))
+{
+       char **argv;
+       int argc, ret;
+
+       argc = 0;
+       ret = 0;
+       argv = argv_split(GFP_KERNEL, buf, &argc);
+       if (!argv)
+               return -ENOMEM;
+
+       if (argc)
+               ret = createfn(argc, argv);
+
+       argv_free(argv);
+
+       return ret;
+}
+
+#define WRITE_BUFSIZE  4096
+
+ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
+                               size_t count, loff_t *ppos,
+                               int (*createfn)(int, char **))
+{
+       char *kbuf, *buf, *tmp;
+       int ret = 0;
+       size_t done = 0;
+       size_t size;
+
+       kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
+       if (!kbuf)
+               return -ENOMEM;
+
+       while (done < count) {
+               size = count - done;
+
+               if (size >= WRITE_BUFSIZE)
+                       size = WRITE_BUFSIZE - 1;
+
+               if (copy_from_user(kbuf, buffer + done, size)) {
+                       ret = -EFAULT;
+                       goto out;
+               }
+               kbuf[size] = '\0';
+               buf = kbuf;
+               do {
+                       tmp = strchr(buf, '\n');
+                       if (tmp) {
+                               *tmp = '\0';
+                               size = tmp - buf + 1;
+                       } else {
+                               size = strlen(buf);
+                               if (done + size < count) {
+                                       if (buf != kbuf)
+                                               break;
+                                       /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
+                                       pr_warn("Line length is too long: Should be less than %d\n",
+                                               WRITE_BUFSIZE - 2);
+                                       ret = -EINVAL;
+                                       goto out;
+                               }
+                       }
+                       done += size;
+
+                       /* Remove comments */
+                       tmp = strchr(buf, '#');
+
+                       if (tmp)
+                               *tmp = '\0';
+
+                       ret = trace_run_command(buf, createfn);
+                       if (ret)
+                               goto out;
+                       buf += size;
+
+               } while (done < count);
+       }
+       ret = done;
+
+out:
+       kfree(kbuf);
+
+       return ret;
+}
+
 __init static int tracer_alloc_buffers(void)
 {
        int ring_buf_size;
index 6b0b343a36a278be32a89e91e95066e84020c620..2a6d0325a76181a0a8b309eaa2b69f91b350d98a 100644 (file)
@@ -739,8 +739,6 @@ extern int trace_selftest_startup_wakeup(struct tracer *trace,
                                         struct trace_array *tr);
 extern int trace_selftest_startup_nop(struct tracer *trace,
                                         struct trace_array *tr);
-extern int trace_selftest_startup_sched_switch(struct tracer *trace,
-                                              struct trace_array *tr);
 extern int trace_selftest_startup_branch(struct tracer *trace,
                                         struct trace_array *tr);
 /*
@@ -1755,6 +1753,13 @@ void trace_printk_start_comm(void);
 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
 
+#define MAX_EVENT_NAME_LEN     64
+
+extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
+extern ssize_t trace_parse_run_command(struct file *file,
+               const char __user *buffer, size_t count, loff_t *ppos,
+               int (*createfn)(int, char**));
+
 /*
  * Normal trace_printk() and friends allocates special buffers
  * to do the manipulation, as well as saves the print formats
index 13ba2d3f6a91a147c8377041ee387ff457ab21c2..55d6dff37dafad5732da6adf85cddc38a4bb43bc 100644 (file)
@@ -240,27 +240,41 @@ void perf_trace_destroy(struct perf_event *p_event)
 int perf_trace_add(struct perf_event *p_event, int flags)
 {
        struct trace_event_call *tp_event = p_event->tp_event;
-       struct hlist_head __percpu *pcpu_list;
-       struct hlist_head *list;
-
-       pcpu_list = tp_event->perf_events;
-       if (WARN_ON_ONCE(!pcpu_list))
-               return -EINVAL;
 
        if (!(flags & PERF_EF_START))
                p_event->hw.state = PERF_HES_STOPPED;
 
-       list = this_cpu_ptr(pcpu_list);
-       hlist_add_head_rcu(&p_event->hlist_entry, list);
+       /*
+        * If TRACE_REG_PERF_ADD returns false; no custom action was performed
+        * and we need to take the default action of enqueueing our event on
+        * the right per-cpu hlist.
+        */
+       if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event)) {
+               struct hlist_head __percpu *pcpu_list;
+               struct hlist_head *list;
+
+               pcpu_list = tp_event->perf_events;
+               if (WARN_ON_ONCE(!pcpu_list))
+                       return -EINVAL;
+
+               list = this_cpu_ptr(pcpu_list);
+               hlist_add_head_rcu(&p_event->hlist_entry, list);
+       }
 
-       return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
+       return 0;
 }
 
 void perf_trace_del(struct perf_event *p_event, int flags)
 {
        struct trace_event_call *tp_event = p_event->tp_event;
-       hlist_del_rcu(&p_event->hlist_entry);
-       tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
+
+       /*
+        * If TRACE_REG_PERF_DEL returns false; no custom action was performed
+        * and we need to take the default action of dequeueing our event from
+        * the right per-cpu hlist.
+        */
+       if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event))
+               hlist_del_rcu(&p_event->hlist_entry);
 }
 
 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
@@ -306,16 +320,25 @@ static void
 perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
                          struct ftrace_ops *ops, struct pt_regs *pt_regs)
 {
-       struct perf_event *event;
        struct ftrace_entry *entry;
-       struct hlist_head *head;
+       struct perf_event *event;
+       struct hlist_head head;
        struct pt_regs regs;
        int rctx;
 
-       head = this_cpu_ptr(event_function.perf_events);
-       if (hlist_empty(head))
+       if ((unsigned long)ops->private != smp_processor_id())
                return;
 
+       event = container_of(ops, struct perf_event, ftrace_ops);
+
+       /*
+        * @event->hlist entry is NULL (per INIT_HLIST_NODE), and all
+        * the perf code does is hlist_for_each_entry_rcu(), so we can
+        * get away with simply setting the @head.first pointer in order
+        * to create a singular list.
+        */
+       head.first = &event->hlist_entry;
+
 #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
                    sizeof(u64)) - sizeof(u32))
 
@@ -330,9 +353,8 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
 
        entry->ip = ip;
        entry->parent_ip = parent_ip;
-       event = container_of(ops, struct perf_event, ftrace_ops);
        perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
-                             1, &regs, head, NULL, event);
+                             1, &regs, &head, NULL);
 
 #undef ENTRY_SIZE
 }
@@ -341,8 +363,10 @@ static int perf_ftrace_function_register(struct perf_event *event)
 {
        struct ftrace_ops *ops = &event->ftrace_ops;
 
-       ops->flags |= FTRACE_OPS_FL_PER_CPU | FTRACE_OPS_FL_RCU;
-       ops->func = perf_ftrace_function_call;
+       ops->flags   = FTRACE_OPS_FL_RCU;
+       ops->func    = perf_ftrace_function_call;
+       ops->private = (void *)(unsigned long)nr_cpu_ids;
+
        return register_ftrace_function(ops);
 }
 
@@ -354,19 +378,11 @@ static int perf_ftrace_function_unregister(struct perf_event *event)
        return ret;
 }
 
-static void perf_ftrace_function_enable(struct perf_event *event)
-{
-       ftrace_function_local_enable(&event->ftrace_ops);
-}
-
-static void perf_ftrace_function_disable(struct perf_event *event)
-{
-       ftrace_function_local_disable(&event->ftrace_ops);
-}
-
 int perf_ftrace_event_register(struct trace_event_call *call,
                               enum trace_reg type, void *data)
 {
+       struct perf_event *event = data;
+
        switch (type) {
        case TRACE_REG_REGISTER:
        case TRACE_REG_UNREGISTER:
@@ -379,11 +395,11 @@ int perf_ftrace_event_register(struct trace_event_call *call,
        case TRACE_REG_PERF_CLOSE:
                return perf_ftrace_function_unregister(data);
        case TRACE_REG_PERF_ADD:
-               perf_ftrace_function_enable(data);
-               return 0;
+               event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id();
+               return 1;
        case TRACE_REG_PERF_DEL:
-               perf_ftrace_function_disable(data);
-               return 0;
+               event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids;
+               return 1;
        }
 
        return -EINVAL;
index 87468398b9ed6206722db4126cc6c6318580e357..ec0f9aa4e1516bd7fe2ee17b6b12ff27a81d1482 100644 (file)
@@ -1406,8 +1406,8 @@ static int subsystem_open(struct inode *inode, struct file *filp)
                return -ENODEV;
 
        /* Make sure the system still exists */
-       mutex_lock(&trace_types_lock);
        mutex_lock(&event_mutex);
+       mutex_lock(&trace_types_lock);
        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
                list_for_each_entry(dir, &tr->systems, list) {
                        if (dir == inode->i_private) {
@@ -1421,8 +1421,8 @@ static int subsystem_open(struct inode *inode, struct file *filp)
                }
        }
  exit_loop:
-       mutex_unlock(&event_mutex);
        mutex_unlock(&trace_types_lock);
+       mutex_unlock(&event_mutex);
 
        if (!system)
                return -ENODEV;
@@ -2294,15 +2294,15 @@ static void __add_event_to_tracers(struct trace_event_call *call);
 int trace_add_event_call(struct trace_event_call *call)
 {
        int ret;
-       mutex_lock(&trace_types_lock);
        mutex_lock(&event_mutex);
+       mutex_lock(&trace_types_lock);
 
        ret = __register_event(call, NULL);
        if (ret >= 0)
                __add_event_to_tracers(call);
 
-       mutex_unlock(&event_mutex);
        mutex_unlock(&trace_types_lock);
+       mutex_unlock(&event_mutex);
        return ret;
 }
 
@@ -2356,13 +2356,13 @@ int trace_remove_event_call(struct trace_event_call *call)
 {
        int ret;
 
-       mutex_lock(&trace_types_lock);
        mutex_lock(&event_mutex);
+       mutex_lock(&trace_types_lock);
        down_write(&trace_event_sem);
        ret = probe_remove_event_call(call);
        up_write(&trace_event_sem);
-       mutex_unlock(&event_mutex);
        mutex_unlock(&trace_types_lock);
+       mutex_unlock(&event_mutex);
 
        return ret;
 }
@@ -2424,8 +2424,8 @@ static int trace_module_notify(struct notifier_block *self,
 {
        struct module *mod = data;
 
-       mutex_lock(&trace_types_lock);
        mutex_lock(&event_mutex);
+       mutex_lock(&trace_types_lock);
        switch (val) {
        case MODULE_STATE_COMING:
                trace_module_add_events(mod);
@@ -2434,8 +2434,8 @@ static int trace_module_notify(struct notifier_block *self,
                trace_module_remove_events(mod);
                break;
        }
-       mutex_unlock(&event_mutex);
        mutex_unlock(&trace_types_lock);
+       mutex_unlock(&event_mutex);
 
        return 0;
 }
@@ -2950,24 +2950,24 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
  * creates the event hierachry in the @parent/events directory.
  *
  * Returns 0 on success.
+ *
+ * Must be called with event_mutex held.
  */
 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
 {
        int ret;
 
-       mutex_lock(&event_mutex);
+       lockdep_assert_held(&event_mutex);
 
        ret = create_event_toplevel_files(parent, tr);
        if (ret)
-               goto out_unlock;
+               goto out;
 
        down_write(&trace_event_sem);
        __trace_add_event_dirs(tr);
        up_write(&trace_event_sem);
 
- out_unlock:
-       mutex_unlock(&event_mutex);
-
+ out:
        return ret;
 }
 
@@ -2996,9 +2996,10 @@ early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
        return ret;
 }
 
+/* Must be called with event_mutex held */
 int event_trace_del_tracer(struct trace_array *tr)
 {
-       mutex_lock(&event_mutex);
+       lockdep_assert_held(&event_mutex);
 
        /* Disable any event triggers and associated soft-disabled events */
        clear_event_triggers(tr);
@@ -3019,8 +3020,6 @@ int event_trace_del_tracer(struct trace_array *tr)
 
        tr->event_dir = NULL;
 
-       mutex_unlock(&event_mutex);
-
        return 0;
 }
 
index 1c21d0e2a145a6e180116fc1fd0705ae7e3fdb17..1e1558c99d56090eea6a321c11d7790334fb0e24 100644 (file)
@@ -28,12 +28,16 @@ struct hist_field;
 
 typedef u64 (*hist_field_fn_t) (struct hist_field *field, void *event);
 
+#define HIST_FIELD_OPERANDS_MAX        2
+
 struct hist_field {
        struct ftrace_event_field       *field;
        unsigned long                   flags;
        hist_field_fn_t                 fn;
        unsigned int                    size;
        unsigned int                    offset;
+       unsigned int                    is_signed;
+       struct hist_field               *operands[HIST_FIELD_OPERANDS_MAX];
 };
 
 static u64 hist_field_none(struct hist_field *field, void *event)
@@ -71,7 +75,9 @@ static u64 hist_field_pstring(struct hist_field *hist_field, void *event)
 
 static u64 hist_field_log2(struct hist_field *hist_field, void *event)
 {
-       u64 val = *(u64 *)(event + hist_field->field->offset);
+       struct hist_field *operand = hist_field->operands[0];
+
+       u64 val = operand->fn(operand, event);
 
        return (u64) ilog2(roundup_pow_of_two(val));
 }
@@ -110,16 +116,16 @@ DEFINE_HIST_FIELD_FN(u8);
 #define HIST_KEY_SIZE_MAX      (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
 
 enum hist_field_flags {
-       HIST_FIELD_FL_HITCOUNT          = 1,
-       HIST_FIELD_FL_KEY               = 2,
-       HIST_FIELD_FL_STRING            = 4,
-       HIST_FIELD_FL_HEX               = 8,
-       HIST_FIELD_FL_SYM               = 16,
-       HIST_FIELD_FL_SYM_OFFSET        = 32,
-       HIST_FIELD_FL_EXECNAME          = 64,
-       HIST_FIELD_FL_SYSCALL           = 128,
-       HIST_FIELD_FL_STACKTRACE        = 256,
-       HIST_FIELD_FL_LOG2              = 512,
+       HIST_FIELD_FL_HITCOUNT          = 1 << 0,
+       HIST_FIELD_FL_KEY               = 1 << 1,
+       HIST_FIELD_FL_STRING            = 1 << 2,
+       HIST_FIELD_FL_HEX               = 1 << 3,
+       HIST_FIELD_FL_SYM               = 1 << 4,
+       HIST_FIELD_FL_SYM_OFFSET        = 1 << 5,
+       HIST_FIELD_FL_EXECNAME          = 1 << 6,
+       HIST_FIELD_FL_SYSCALL           = 1 << 7,
+       HIST_FIELD_FL_STACKTRACE        = 1 << 8,
+       HIST_FIELD_FL_LOG2              = 1 << 9,
 };
 
 struct hist_trigger_attrs {
@@ -146,6 +152,25 @@ struct hist_trigger_data {
        struct tracing_map              *map;
 };
 
+static const char *hist_field_name(struct hist_field *field,
+                                  unsigned int level)
+{
+       const char *field_name = "";
+
+       if (level > 1)
+               return field_name;
+
+       if (field->field)
+               field_name = field->field->name;
+       else if (field->flags & HIST_FIELD_FL_LOG2)
+               field_name = hist_field_name(field->operands[0], ++level);
+
+       if (field_name == NULL)
+               field_name = "";
+
+       return field_name;
+}
+
 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
 {
        hist_field_fn_t fn = NULL;
@@ -340,8 +365,20 @@ static const struct tracing_map_ops hist_trigger_elt_comm_ops = {
        .elt_init       = hist_trigger_elt_comm_init,
 };
 
-static void destroy_hist_field(struct hist_field *hist_field)
+static void destroy_hist_field(struct hist_field *hist_field,
+                              unsigned int level)
 {
+       unsigned int i;
+
+       if (level > 2)
+               return;
+
+       if (!hist_field)
+               return;
+
+       for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
+               destroy_hist_field(hist_field->operands[i], level + 1);
+
        kfree(hist_field);
 }
 
@@ -368,7 +405,10 @@ static struct hist_field *create_hist_field(struct ftrace_event_field *field,
        }
 
        if (flags & HIST_FIELD_FL_LOG2) {
+               unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
                hist_field->fn = hist_field_log2;
+               hist_field->operands[0] = create_hist_field(field, fl);
+               hist_field->size = hist_field->operands[0]->size;
                goto out;
        }
 
@@ -388,7 +428,7 @@ static struct hist_field *create_hist_field(struct ftrace_event_field *field,
                hist_field->fn = select_value_fn(field->size,
                                                 field->is_signed);
                if (!hist_field->fn) {
-                       destroy_hist_field(hist_field);
+                       destroy_hist_field(hist_field, 0);
                        return NULL;
                }
        }
@@ -405,7 +445,7 @@ static void destroy_hist_fields(struct hist_trigger_data *hist_data)
 
        for (i = 0; i < TRACING_MAP_FIELDS_MAX; i++) {
                if (hist_data->fields[i]) {
-                       destroy_hist_field(hist_data->fields[i]);
+                       destroy_hist_field(hist_data->fields[i], 0);
                        hist_data->fields[i] = NULL;
                }
        }
@@ -450,7 +490,7 @@ static int create_val_field(struct hist_trigger_data *hist_data,
        }
 
        field = trace_find_event_field(file->event_call, field_name);
-       if (!field) {
+       if (!field || !field->size) {
                ret = -EINVAL;
                goto out;
        }
@@ -548,7 +588,7 @@ static int create_key_field(struct hist_trigger_data *hist_data,
                }
 
                field = trace_find_event_field(file->event_call, field_name);
-               if (!field) {
+               if (!field || !field->size) {
                        ret = -EINVAL;
                        goto out;
                }
@@ -653,7 +693,6 @@ static int is_descending(const char *str)
 static int create_sort_keys(struct hist_trigger_data *hist_data)
 {
        char *fields_str = hist_data->attrs->sort_key_str;
-       struct ftrace_event_field *field = NULL;
        struct tracing_map_sort_key *sort_key;
        int descending, ret = 0;
        unsigned int i, j;
@@ -670,7 +709,9 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
        }
 
        for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
+               struct hist_field *hist_field;
                char *field_str, *field_name;
+               const char *test_name;
 
                sort_key = &hist_data->sort_keys[i];
 
@@ -703,8 +744,10 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
                }
 
                for (j = 1; j < hist_data->n_fields; j++) {
-                       field = hist_data->fields[j]->field;
-                       if (field && (strcmp(field_name, field->name) == 0)) {
+                       hist_field = hist_data->fields[j];
+                       test_name = hist_field_name(hist_field, 0);
+
+                       if (strcmp(field_name, test_name) == 0) {
                                sort_key->field_idx = j;
                                descending = is_descending(field_str);
                                if (descending < 0) {
@@ -952,6 +995,7 @@ hist_trigger_entry_print(struct seq_file *m,
        struct hist_field *key_field;
        char str[KSYM_SYMBOL_LEN];
        bool multiline = false;
+       const char *field_name;
        unsigned int i;
        u64 uval;
 
@@ -963,26 +1007,27 @@ hist_trigger_entry_print(struct seq_file *m,
                if (i > hist_data->n_vals)
                        seq_puts(m, ", ");
 
+               field_name = hist_field_name(key_field, 0);
+
                if (key_field->flags & HIST_FIELD_FL_HEX) {
                        uval = *(u64 *)(key + key_field->offset);
-                       seq_printf(m, "%s: %llx",
-                                  key_field->field->name, uval);
+                       seq_printf(m, "%s: %llx", field_name, uval);
                } else if (key_field->flags & HIST_FIELD_FL_SYM) {
                        uval = *(u64 *)(key + key_field->offset);
                        sprint_symbol_no_offset(str, uval);
-                       seq_printf(m, "%s: [%llx] %-45s",
-                                  key_field->field->name, uval, str);
+                       seq_printf(m, "%s: [%llx] %-45s", field_name,
+                                  uval, str);
                } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
                        uval = *(u64 *)(key + key_field->offset);
                        sprint_symbol(str, uval);
-                       seq_printf(m, "%s: [%llx] %-55s",
-                                  key_field->field->name, uval, str);
+                       seq_printf(m, "%s: [%llx] %-55s", field_name,
+                                  uval, str);
                } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
                        char *comm = elt->private_data;
 
                        uval = *(u64 *)(key + key_field->offset);
-                       seq_printf(m, "%s: %-16s[%10llu]",
-                                  key_field->field->name, comm, uval);
+                       seq_printf(m, "%s: %-16s[%10llu]", field_name,
+                                  comm, uval);
                } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
                        const char *syscall_name;
 
@@ -991,8 +1036,8 @@ hist_trigger_entry_print(struct seq_file *m,
                        if (!syscall_name)
                                syscall_name = "unknown_syscall";
 
-                       seq_printf(m, "%s: %-30s[%3llu]",
-                                  key_field->field->name, syscall_name, uval);
+                       seq_printf(m, "%s: %-30s[%3llu]", field_name,
+                                  syscall_name, uval);
                } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
                        seq_puts(m, "stacktrace:\n");
                        hist_trigger_stacktrace_print(m,
@@ -1000,15 +1045,14 @@ hist_trigger_entry_print(struct seq_file *m,
                                                      HIST_STACKTRACE_DEPTH);
                        multiline = true;
                } else if (key_field->flags & HIST_FIELD_FL_LOG2) {
-                       seq_printf(m, "%s: ~ 2^%-2llu", key_field->field->name,
+                       seq_printf(m, "%s: ~ 2^%-2llu", field_name,
                                   *(u64 *)(key + key_field->offset));
                } else if (key_field->flags & HIST_FIELD_FL_STRING) {
-                       seq_printf(m, "%s: %-50s", key_field->field->name,
+                       seq_printf(m, "%s: %-50s", field_name,
                                   (char *)(key + key_field->offset));
                } else {
                        uval = *(u64 *)(key + key_field->offset);
-                       seq_printf(m, "%s: %10llu", key_field->field->name,
-                                  uval);
+                       seq_printf(m, "%s: %10llu", field_name, uval);
                }
        }
 
@@ -1021,13 +1065,13 @@ hist_trigger_entry_print(struct seq_file *m,
                   tracing_map_read_sum(elt, HITCOUNT_IDX));
 
        for (i = 1; i < hist_data->n_vals; i++) {
+               field_name = hist_field_name(hist_data->fields[i], 0);
+
                if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
-                       seq_printf(m, "  %s: %10llx",
-                                  hist_data->fields[i]->field->name,
+                       seq_printf(m, "  %s: %10llx", field_name,
                                   tracing_map_read_sum(elt, i));
                } else {
-                       seq_printf(m, "  %s: %10llu",
-                                  hist_data->fields[i]->field->name,
+                       seq_printf(m, "  %s: %10llu", field_name,
                                   tracing_map_read_sum(elt, i));
                }
        }
@@ -1062,7 +1106,7 @@ static void hist_trigger_show(struct seq_file *m,
                              struct event_trigger_data *data, int n)
 {
        struct hist_trigger_data *hist_data;
-       int n_entries, ret = 0;
+       int n_entries;
 
        if (n > 0)
                seq_puts(m, "\n\n");
@@ -1073,10 +1117,8 @@ static void hist_trigger_show(struct seq_file *m,
 
        hist_data = data->private_data;
        n_entries = print_entries(m, hist_data);
-       if (n_entries < 0) {
-               ret = n_entries;
+       if (n_entries < 0)
                n_entries = 0;
-       }
 
        seq_printf(m, "\nTotals:\n    Hits: %llu\n    Entries: %u\n    Dropped: %llu\n",
                   (u64)atomic64_read(&hist_data->map->hits),
@@ -1142,7 +1184,9 @@ static const char *get_hist_field_flags(struct hist_field *hist_field)
 
 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
 {
-       seq_printf(m, "%s", hist_field->field->name);
+       const char *field_name = hist_field_name(hist_field, 0);
+
+       seq_printf(m, "%s", field_name);
        if (hist_field->flags) {
                const char *flags_str = get_hist_field_flags(hist_field);
 
index 7758bc0617cb15d8731defbc67912b5eb46246c2..03ecb4465ee4587290e0474143f425f892771140 100644 (file)
 
 #include "trace.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/preemptirq.h>
+
+#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
 static struct trace_array              *irqsoff_trace __read_mostly;
 static int                             tracer_enabled __read_mostly;
 
@@ -462,64 +466,44 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
 
 #else /* !CONFIG_PROVE_LOCKING */
 
-/*
- * Stubs:
- */
-
-void trace_softirqs_on(unsigned long ip)
-{
-}
-
-void trace_softirqs_off(unsigned long ip)
-{
-}
-
-inline void print_irqtrace_events(struct task_struct *curr)
-{
-}
-
 /*
  * We are only interested in hardirq on/off events:
  */
-void trace_hardirqs_on(void)
+static inline void tracer_hardirqs_on(void)
 {
        if (!preempt_trace() && irq_trace())
                stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 }
-EXPORT_SYMBOL(trace_hardirqs_on);
 
-void trace_hardirqs_off(void)
+static inline void tracer_hardirqs_off(void)
 {
        if (!preempt_trace() && irq_trace())
                start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 }
-EXPORT_SYMBOL(trace_hardirqs_off);
 
-__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
+static inline void tracer_hardirqs_on_caller(unsigned long caller_addr)
 {
        if (!preempt_trace() && irq_trace())
                stop_critical_timing(CALLER_ADDR0, caller_addr);
 }
-EXPORT_SYMBOL(trace_hardirqs_on_caller);
 
-__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
+static inline void tracer_hardirqs_off_caller(unsigned long caller_addr)
 {
        if (!preempt_trace() && irq_trace())
                start_critical_timing(CALLER_ADDR0, caller_addr);
 }
-EXPORT_SYMBOL(trace_hardirqs_off_caller);
 
 #endif /* CONFIG_PROVE_LOCKING */
 #endif /*  CONFIG_IRQSOFF_TRACER */
 
 #ifdef CONFIG_PREEMPT_TRACER
-void trace_preempt_on(unsigned long a0, unsigned long a1)
+static inline void tracer_preempt_on(unsigned long a0, unsigned long a1)
 {
        if (preempt_trace() && !irq_trace())
                stop_critical_timing(a0, a1);
 }
 
-void trace_preempt_off(unsigned long a0, unsigned long a1)
+static inline void tracer_preempt_off(unsigned long a0, unsigned long a1)
 {
        if (preempt_trace() && !irq_trace())
                start_critical_timing(a0, a1);
@@ -781,3 +765,100 @@ __init static int init_irqsoff_tracer(void)
        return 0;
 }
 core_initcall(init_irqsoff_tracer);
+#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
+
+#ifndef CONFIG_IRQSOFF_TRACER
+static inline void tracer_hardirqs_on(void) { }
+static inline void tracer_hardirqs_off(void) { }
+static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) { }
+static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) { }
+#endif
+
+#ifndef CONFIG_PREEMPT_TRACER
+static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
+static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
+#endif
+
+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
+/* Per-cpu variable to prevent redundant calls when IRQs already off */
+static DEFINE_PER_CPU(int, tracing_irq_cpu);
+
+void trace_hardirqs_on(void)
+{
+       if (!this_cpu_read(tracing_irq_cpu))
+               return;
+
+       trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+       tracer_hardirqs_on();
+
+       this_cpu_write(tracing_irq_cpu, 0);
+}
+EXPORT_SYMBOL(trace_hardirqs_on);
+
+void trace_hardirqs_off(void)
+{
+       if (this_cpu_read(tracing_irq_cpu))
+               return;
+
+       this_cpu_write(tracing_irq_cpu, 1);
+
+       trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+       tracer_hardirqs_off();
+}
+EXPORT_SYMBOL(trace_hardirqs_off);
+
+__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
+{
+       if (!this_cpu_read(tracing_irq_cpu))
+               return;
+
+       trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
+       tracer_hardirqs_on_caller(caller_addr);
+
+       this_cpu_write(tracing_irq_cpu, 0);
+}
+EXPORT_SYMBOL(trace_hardirqs_on_caller);
+
+__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
+{
+       if (this_cpu_read(tracing_irq_cpu))
+               return;
+
+       this_cpu_write(tracing_irq_cpu, 1);
+
+       trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
+       tracer_hardirqs_off_caller(caller_addr);
+}
+EXPORT_SYMBOL(trace_hardirqs_off_caller);
+
+/*
+ * Stubs:
+ */
+
+void trace_softirqs_on(unsigned long ip)
+{
+}
+
+void trace_softirqs_off(unsigned long ip)
+{
+}
+
+inline void print_irqtrace_events(struct task_struct *curr)
+{
+}
+#endif
+
+#if defined(CONFIG_PREEMPT_TRACER) || \
+       (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
+void trace_preempt_on(unsigned long a0, unsigned long a1)
+{
+       trace_preempt_enable_rcuidle(a0, a1);
+       tracer_preempt_on(a0, a1);
+}
+
+void trace_preempt_off(unsigned long a0, unsigned long a1)
+{
+       trace_preempt_disable_rcuidle(a0, a1);
+       tracer_preempt_off(a0, a1);
+}
+#endif
index abf92e478cfb59e8b82c8851c7238e4f75669f2c..492700c5fb4d27979d119b13028024a06f27b400 100644 (file)
@@ -907,8 +907,8 @@ static int probes_open(struct inode *inode, struct file *file)
 static ssize_t probes_write(struct file *file, const char __user *buffer,
                            size_t count, loff_t *ppos)
 {
-       return traceprobe_probes_write(file, buffer, count, ppos,
-                       create_trace_kprobe);
+       return trace_parse_run_command(file, buffer, count, ppos,
+                                      create_trace_kprobe);
 }
 
 static const struct file_operations kprobe_events_ops = {
@@ -1199,7 +1199,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
        memset(&entry[1], 0, dsize);
        store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
        perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
-                             head, NULL, NULL);
+                             head, NULL);
 }
 NOKPROBE_SYMBOL(kprobe_perf_func);
 
@@ -1234,7 +1234,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
        entry->ret_ip = (unsigned long)ri->ret_addr;
        store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
        perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
-                             head, NULL, NULL);
+                             head, NULL);
 }
 NOKPROBE_SYMBOL(kretprobe_perf_func);
 #endif /* CONFIG_PERF_EVENTS */
@@ -1431,9 +1431,9 @@ static __init int kprobe_trace_self_tests_init(void)
 
        pr_info("Testing kprobe tracing: ");
 
-       ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
-                                 "$stack $stack0 +0($stack)",
-                                 create_trace_kprobe);
+       ret = trace_run_command("p:testprobe kprobe_trace_selftest_target "
+                               "$stack $stack0 +0($stack)",
+                               create_trace_kprobe);
        if (WARN_ON_ONCE(ret)) {
                pr_warn("error on probing function entry.\n");
                warn++;
@@ -1453,8 +1453,8 @@ static __init int kprobe_trace_self_tests_init(void)
                }
        }
 
-       ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
-                                 "$retval", create_trace_kprobe);
+       ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target "
+                               "$retval", create_trace_kprobe);
        if (WARN_ON_ONCE(ret)) {
                pr_warn("error on probing function return.\n");
                warn++;
@@ -1524,13 +1524,13 @@ static __init int kprobe_trace_self_tests_init(void)
                        disable_trace_kprobe(tk, file);
        }
 
-       ret = traceprobe_command("-:testprobe", create_trace_kprobe);
+       ret = trace_run_command("-:testprobe", create_trace_kprobe);
        if (WARN_ON_ONCE(ret)) {
                pr_warn("error on deleting a probe.\n");
                warn++;
        }
 
-       ret = traceprobe_command("-:testprobe2", create_trace_kprobe);
+       ret = trace_run_command("-:testprobe2", create_trace_kprobe);
        if (WARN_ON_ONCE(ret)) {
                pr_warn("error on deleting a probe.\n");
                warn++;
index 52478f033f88f2d38315c887406608554c391a32..d5935730867709232f547fd08ef6a3659ea819c9 100644 (file)
@@ -623,92 +623,6 @@ void traceprobe_free_probe_arg(struct probe_arg *arg)
        kfree(arg->comm);
 }
 
-int traceprobe_command(const char *buf, int (*createfn)(int, char **))
-{
-       char **argv;
-       int argc, ret;
-
-       argc = 0;
-       ret = 0;
-       argv = argv_split(GFP_KERNEL, buf, &argc);
-       if (!argv)
-               return -ENOMEM;
-
-       if (argc)
-               ret = createfn(argc, argv);
-
-       argv_free(argv);
-
-       return ret;
-}
-
-#define WRITE_BUFSIZE  4096
-
-ssize_t traceprobe_probes_write(struct file *file, const char __user *buffer,
-                               size_t count, loff_t *ppos,
-                               int (*createfn)(int, char **))
-{
-       char *kbuf, *buf, *tmp;
-       int ret = 0;
-       size_t done = 0;
-       size_t size;
-
-       kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
-       if (!kbuf)
-               return -ENOMEM;
-
-       while (done < count) {
-               size = count - done;
-
-               if (size >= WRITE_BUFSIZE)
-                       size = WRITE_BUFSIZE - 1;
-
-               if (copy_from_user(kbuf, buffer + done, size)) {
-                       ret = -EFAULT;
-                       goto out;
-               }
-               kbuf[size] = '\0';
-               buf = kbuf;
-               do {
-                       tmp = strchr(buf, '\n');
-                       if (tmp) {
-                               *tmp = '\0';
-                               size = tmp - buf + 1;
-                       } else {
-                               size = strlen(buf);
-                               if (done + size < count) {
-                                       if (buf != kbuf)
-                                               break;
-                                       /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
-                                       pr_warn("Line length is too long: Should be less than %d\n",
-                                               WRITE_BUFSIZE - 2);
-                                       ret = -EINVAL;
-                                       goto out;
-                               }
-                       }
-                       done += size;
-
-                       /* Remove comments */
-                       tmp = strchr(buf, '#');
-
-                       if (tmp)
-                               *tmp = '\0';
-
-                       ret = traceprobe_command(buf, createfn);
-                       if (ret)
-                               goto out;
-                       buf += size;
-
-               } while (done < count);
-       }
-       ret = done;
-
-out:
-       kfree(kbuf);
-
-       return ret;
-}
-
 static int __set_print_fmt(struct trace_probe *tp, char *buf, int len,
                           bool is_return)
 {
index 903273c93e6167afcbe2de99451a906c2e79ab1f..fb66e3eaa192a24d924ec743dd2814283f8298c8 100644 (file)
@@ -42,7 +42,6 @@
 
 #define MAX_TRACE_ARGS         128
 #define MAX_ARGSTR_LEN         63
-#define MAX_EVENT_NAME_LEN     64
 #define MAX_STRING_SIZE                PATH_MAX
 
 /* Reserved field names */
@@ -356,12 +355,6 @@ extern void traceprobe_free_probe_arg(struct probe_arg *arg);
 
 extern int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset);
 
-extern ssize_t traceprobe_probes_write(struct file *file,
-               const char __user *buffer, size_t count, loff_t *ppos,
-               int (*createfn)(int, char**));
-
-extern int traceprobe_command(const char *buf, int (*createfn)(int, char**));
-
 /* Sum up total data length for dynamic arraies (strings) */
 static nokprobe_inline int
 __get_data_size(struct trace_probe *tp, struct pt_regs *regs)
index cd70eb5df38ecce05eba1bc7a485b6cb59e674f1..11e9daa4a568a22c38669269a89769b0b3bae9f3 100644 (file)
@@ -60,7 +60,7 @@ static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
  * Test the trace buffer to see if all the elements
  * are still sane.
  */
-static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
+static int __maybe_unused trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
 {
        unsigned long flags, cnt = 0;
        int cpu, ret = 0;
@@ -1151,38 +1151,6 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
 }
 #endif /* CONFIG_SCHED_TRACER */
 
-#ifdef CONFIG_CONTEXT_SWITCH_TRACER
-int
-trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
-{
-       unsigned long count;
-       int ret;
-
-       /* start the tracing */
-       ret = tracer_init(trace, tr);
-       if (ret) {
-               warn_failed_init_tracer(trace, ret);
-               return ret;
-       }
-
-       /* Sleep for a 1/10 of a second */
-       msleep(100);
-       /* stop the tracing. */
-       tracing_stop();
-       /* check the trace buffer */
-       ret = trace_test_buffer(&tr->trace_buffer, &count);
-       trace->reset(tr);
-       tracing_start();
-
-       if (!ret && !count) {
-               printk(KERN_CONT ".. no entries found ..");
-               ret = -1;
-       }
-
-       return ret;
-}
-#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
-
 #ifdef CONFIG_BRANCH_TRACER
 int
 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
index 19bcaaac884be8ae033184d3ed2ff0a53d35a9f2..f93a56d2db275be64df083344b68ec65f3c32473 100644 (file)
@@ -625,7 +625,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
 
        perf_trace_buf_submit(rec, size, rctx,
                              sys_data->enter_event->event.type, 1, regs,
-                             head, NULL, NULL);
+                             head, NULL);
 }
 
 static int perf_sysenter_enable(struct trace_event_call *call)
@@ -721,7 +721,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
        }
 
        perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type,
-                             1, regs, head, NULL, NULL);
+                             1, regs, head, NULL);
 }
 
 static int perf_sysexit_enable(struct trace_event_call *call)
index 153c0e4114611fde7e7212c7a9bf40b8e42578e5..40592e7b3568bcfd41220f579fc05066e3675b33 100644 (file)
@@ -651,7 +651,7 @@ static int probes_open(struct inode *inode, struct file *file)
 static ssize_t probes_write(struct file *file, const char __user *buffer,
                            size_t count, loff_t *ppos)
 {
-       return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe);
+       return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe);
 }
 
 static const struct file_operations uprobe_events_ops = {
@@ -1155,7 +1155,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
        }
 
        perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
-                             head, NULL, NULL);
+                             head, NULL);
  out:
        preempt_enable();
 }
index 305039b122fafba242f73b5982289ce4a12a6e20..07e75344725ba254f5c42a314659142f0fd48528 100644 (file)
@@ -428,7 +428,8 @@ __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
 
                if (test_key && test_key == key_hash && entry->val &&
                    keys_match(key, entry->val->key, map->key_size)) {
-                       atomic64_inc(&map->hits);
+                       if (!lookup_only)
+                               atomic64_inc(&map->hits);
                        return entry->val;
                }
 
index ab0ca77331d0429fbab0e38b8e3525bbaa734bce..5b5bbf8ae550dfe2c7d6b08496d1a8327b0d6a10 100644 (file)
@@ -6,7 +6,7 @@
 #define TRACING_MAP_BITS_MAX           17
 #define TRACING_MAP_BITS_MIN           7
 
-#define TRACING_MAP_KEYS_MAX           2
+#define TRACING_MAP_KEYS_MAX           3
 #define TRACING_MAP_VALS_MAX           3
 #define TRACING_MAP_FIELDS_MAX         (TRACING_MAP_KEYS_MAX + \
                                         TRACING_MAP_VALS_MAX)