ftrace: Return pt_regs to function trace callback
[linux-block.git] / kernel / trace / ftrace.c
index a008663d86c8e740c64c1cc26977a7a199917b9b..6ff07ad0ede30343b454da332d41697610b8c304 100644 (file)
 
 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
 
+static struct ftrace_ops ftrace_list_end __read_mostly = {
+       .func           = ftrace_stub,
+};
+
 /* ftrace_enabled is a method to turn ftrace on or off */
 int ftrace_enabled __read_mostly;
 static int last_ftrace_enabled;
 
 /* Quick disabling of function tracer. */
-int function_trace_stop;
+int function_trace_stop __read_mostly;
+
+/* Current function tracing op */
+struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
 
 /* List for set_ftrace_pid's pids. */
 LIST_HEAD(ftrace_pids);
@@ -86,22 +93,22 @@ static int ftrace_disabled __read_mostly;
 
 static DEFINE_MUTEX(ftrace_lock);
 
-static struct ftrace_ops ftrace_list_end __read_mostly = {
-       .func           = ftrace_stub,
-};
-
 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
-static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
-ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
 static struct ftrace_ops global_ops;
 static struct ftrace_ops control_ops;
 
-static void
-ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
+#if ARCH_SUPPORTS_FTRACE_OPS
+static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+                                struct ftrace_ops *op, struct pt_regs *regs);
+#else
+/* See comment below, where ftrace_ops_list_func is defined */
+static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
+#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
+#endif
 
 /*
  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
@@ -112,29 +119,29 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
  *
  * Silly Alpha and silly pointer-speculation compiler optimizations!
  */
-static void ftrace_global_list_func(unsigned long ip,
-                                   unsigned long parent_ip)
+static void
+ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
+                       struct ftrace_ops *op, struct pt_regs *regs)
 {
-       struct ftrace_ops *op;
-
        if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
                return;
 
        trace_recursion_set(TRACE_GLOBAL_BIT);
        op = rcu_dereference_raw(ftrace_global_list); /*see above*/
        while (op != &ftrace_list_end) {
-               op->func(ip, parent_ip);
+               op->func(ip, parent_ip, op, regs);
                op = rcu_dereference_raw(op->next); /*see above*/
        };
        trace_recursion_clear(TRACE_GLOBAL_BIT);
 }
 
-static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
+static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
+                           struct ftrace_ops *op, struct pt_regs *regs)
 {
        if (!test_tsk_trace_trace(current))
                return;
 
-       ftrace_pid_function(ip, parent_ip);
+       ftrace_pid_function(ip, parent_ip, op, regs);
 }
 
 static void set_ftrace_pid_function(ftrace_func_t func)
@@ -153,25 +160,9 @@ static void set_ftrace_pid_function(ftrace_func_t func)
 void clear_ftrace_function(void)
 {
        ftrace_trace_function = ftrace_stub;
-       __ftrace_trace_function = ftrace_stub;
-       __ftrace_trace_function_delay = ftrace_stub;
        ftrace_pid_function = ftrace_stub;
 }
 
-#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
-/*
- * For those archs that do not test ftrace_trace_stop in their
- * mcount call site, we need to do it from C.
- */
-static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
-{
-       if (function_trace_stop)
-               return;
-
-       __ftrace_trace_function(ip, parent_ip);
-}
-#endif
-
 static void control_ops_disable_all(struct ftrace_ops *ops)
 {
        int cpu;
@@ -230,28 +221,26 @@ static void update_ftrace_function(void)
 
        /*
         * If we are at the end of the list and this ops is
-        * not dynamic, then have the mcount trampoline call
-        * the function directly
+        * not dynamic and the arch supports passing ops, then have the
+        * mcount trampoline call the function directly.
         */
        if (ftrace_ops_list == &ftrace_list_end ||
            (ftrace_ops_list->next == &ftrace_list_end &&
-            !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
+            !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
+            !FTRACE_FORCE_LIST_FUNC)) {
+               /* Set the ftrace_ops that the arch callback uses */
+               if (ftrace_ops_list == &global_ops)
+                       function_trace_op = ftrace_global_list;
+               else
+                       function_trace_op = ftrace_ops_list;
                func = ftrace_ops_list->func;
-       else
+       } else {
+               /* Just use the default ftrace_ops */
+               function_trace_op = &ftrace_list_end;
                func = ftrace_ops_list_func;
+       }
 
-#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
        ftrace_trace_function = func;
-#else
-#ifdef CONFIG_DYNAMIC_FTRACE
-       /* do not update till all functions have been modified */
-       __ftrace_trace_function_delay = func;
-#else
-       __ftrace_trace_function = func;
-#endif
-       ftrace_trace_function =
-               (func == ftrace_stub) ? func : ftrace_test_stop_func;
-#endif
 }
 
 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
@@ -312,7 +301,7 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list,
 
 static int __register_ftrace_function(struct ftrace_ops *ops)
 {
-       if (ftrace_disabled)
+       if (unlikely(ftrace_disabled))
                return -ENODEV;
 
        if (FTRACE_WARN_ON(ops == &global_ops))
@@ -773,7 +762,8 @@ ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
 }
 
 static void
-function_profile_call(unsigned long ip, unsigned long parent_ip)
+function_profile_call(unsigned long ip, unsigned long parent_ip,
+                     struct ftrace_ops *ops, struct pt_regs *regs)
 {
        struct ftrace_profile_stat *stat;
        struct ftrace_profile *rec;
@@ -803,7 +793,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip)
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static int profile_graph_entry(struct ftrace_graph_ent *trace)
 {
-       function_profile_call(trace->func, 0);
+       function_profile_call(trace->func, 0, NULL, NULL);
        return 1;
 }
 
@@ -1882,16 +1872,6 @@ static void ftrace_run_update_code(int command)
         */
        arch_ftrace_update_code(command);
 
-#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
-       /*
-        * For archs that call ftrace_test_stop_func(), we must
-        * wait till after we update all the function callers
-        * before we update the callback. This keeps different
-        * ops that record different functions from corrupting
-        * each other.
-        */
-       __ftrace_trace_function = __ftrace_trace_function_delay;
-#endif
        function_trace_stop--;
 
        ret = ftrace_arch_code_modify_post_process();
@@ -2790,8 +2770,8 @@ static int __init ftrace_mod_cmd_init(void)
 }
 device_initcall(ftrace_mod_cmd_init);
 
-static void
-function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
+static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
+                                     struct ftrace_ops *op, struct pt_regs *pt_regs)
 {
        struct ftrace_func_probe *entry;
        struct hlist_head *hhd;
@@ -3942,10 +3922,9 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
 static void
-ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
+ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
+                       struct ftrace_ops *op, struct pt_regs *regs)
 {
-       struct ftrace_ops *op;
-
        if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
                return;
 
@@ -3959,7 +3938,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
        while (op != &ftrace_list_end) {
                if (!ftrace_function_local_disabled(op) &&
                    ftrace_ops_test(op, ip))
-                       op->func(ip, parent_ip);
+                       op->func(ip, parent_ip, op, regs);
 
                op = rcu_dereference_raw(op->next);
        };
@@ -3971,11 +3950,15 @@ static struct ftrace_ops control_ops = {
        .func = ftrace_ops_control_func,
 };
 
-static void
-ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
+static inline void
+__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+                      struct ftrace_ops *ignored, struct pt_regs *regs)
 {
        struct ftrace_ops *op;
 
+       if (function_trace_stop)
+               return;
+
        if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
                return;
 
@@ -3988,13 +3971,39 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
        op = rcu_dereference_raw(ftrace_ops_list);
        while (op != &ftrace_list_end) {
                if (ftrace_ops_test(op, ip))
-                       op->func(ip, parent_ip);
+                       op->func(ip, parent_ip, op, regs);
                op = rcu_dereference_raw(op->next);
        };
        preempt_enable_notrace();
        trace_recursion_clear(TRACE_INTERNAL_BIT);
 }
 
+/*
+ * Some archs only support passing ip and parent_ip. Even though
+ * the list function ignores the op parameter, we do not want any
+ * C side effects, where a function is called without the caller
+ * sending a third parameter.
+ * Archs are to support both the regs and ftrace_ops at the same time.
+ * If they support ftrace_ops, it is assumed they support regs.
+ * If call backs want to use regs, they must either check for regs
+ * being NULL, or ARCH_SUPPORTS_FTRACE_SAVE_REGS.
+ * Note, ARCH_SUPPORT_SAVE_REGS expects a full regs to be saved.
+ * An architecture can pass partial regs with ftrace_ops and still
+ * set the ARCH_SUPPORT_FTARCE_OPS.
+ */
+#if ARCH_SUPPORTS_FTRACE_OPS
+static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+                                struct ftrace_ops *op, struct pt_regs *regs)
+{
+       __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
+}
+#else
+static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
+{
+       __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
+}
+#endif
+
 static void clear_ftrace_swapper(void)
 {
        struct task_struct *p;
@@ -4299,16 +4308,12 @@ int register_ftrace_function(struct ftrace_ops *ops)
 
        mutex_lock(&ftrace_lock);
 
-       if (unlikely(ftrace_disabled))
-               goto out_unlock;
-
        ret = __register_ftrace_function(ops);
        if (!ret)
                ret = ftrace_startup(ops, 0);
 
-
- out_unlock:
        mutex_unlock(&ftrace_lock);
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(register_ftrace_function);