riscv: stacktrace: Add USER_STACKTRACE support
authorJinjie Ruan <ruanjinjie@huawei.com>
Mon, 8 Jul 2024 03:28:47 +0000 (11:28 +0800)
committerPalmer Dabbelt <palmer@rivosinc.com>
Sun, 15 Sep 2024 06:57:16 +0000 (23:57 -0700)
Currently, userstacktrace is unsupported for riscv. So use the
perf_callchain_user() code as blueprint to implement the
arch_stack_walk_user() which add userstacktrace support on riscv.
Meanwhile, we can use arch_stack_walk_user() to simplify the implementation
of perf_callchain_user().

A ftrace test case is shown as below:

# cd /sys/kernel/debug/tracing
# echo 1 > options/userstacktrace
# echo 1 > options/sym-userobj
# echo 1 > events/sched/sched_process_fork/enable
# cat trace
......
            bash-178     [000] ...1.    97.968395: sched_process_fork: comm=bash pid=178 child_comm=bash child_pid=231
            bash-178     [000] ...1.    97.970075: <user stack trace>
 => /lib/libc.so.6[+0xb5090]

Also a simple perf test is ok as below:

# perf record -e cpu-clock --call-graph fp top
# perf report --call-graph

.....
[[31m  66.54%[[m     0.00%  top      [kernel.kallsyms]            [k] ret_from_exception
            |
            ---ret_from_exception
               |
               |--[[31m58.97%[[m--do_trap_ecall_u
               |          |
               |          |--[[31m17.34%[[m--__riscv_sys_read
               |          |          ksys_read
               |          |          |
               |          |           --[[31m16.88%[[m--vfs_read
               |          |                     |
               |          |                     |--[[31m10.90%[[m--seq_read

Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
Tested-by: Jinjie Ruan <ruanjinjie@huawei.com>
Cc: Björn Töpel <bjorn@kernel.org>
Link: https://lore.kernel.org/r/20240708032847.2998158-3-ruanjinjie@huawei.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
arch/riscv/Kconfig
arch/riscv/kernel/perf_callchain.c
arch/riscv/kernel/stacktrace.c

index 0f3cd7c3a4360529331d8dd0023c11460b957622..443bac99c94a83dee5eefea5a2bd43dc59b5a716 100644 (file)
@@ -200,6 +200,7 @@ config RISCV
        select THREAD_INFO_IN_TASK
        select TRACE_IRQFLAGS_SUPPORT
        select UACCESS_MEMCPY if !MMU
+       select USER_STACKTRACE_SUPPORT
        select ZONE_DMA32 if 64BIT
 
 config CLANG_SUPPORTS_DYNAMIC_FTRACE
index 2932791e93882176bd1a515781f9e9f0d1145424..c7468af77c663ab30d5728476e882d0882d1afa5 100644 (file)
@@ -6,37 +6,9 @@
 
 #include <asm/stacktrace.h>
 
-/*
- * Get the return address for a single stackframe and return a pointer to the
- * next frame tail.
- */
-static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
-                                   unsigned long fp, unsigned long reg_ra)
+static bool fill_callchain(void *entry, unsigned long pc)
 {
-       struct stackframe buftail;
-       unsigned long ra = 0;
-       unsigned long __user *user_frame_tail =
-               (unsigned long __user *)(fp - sizeof(struct stackframe));
-
-       /* Check accessibility of one struct frame_tail beyond */
-       if (!access_ok(user_frame_tail, sizeof(buftail)))
-               return 0;
-       if (__copy_from_user_inatomic(&buftail, user_frame_tail,
-                                     sizeof(buftail)))
-               return 0;
-
-       if (reg_ra != 0)
-               ra = reg_ra;
-       else
-               ra = buftail.ra;
-
-       fp = buftail.fp;
-       if (ra != 0)
-               perf_callchain_store(entry, ra);
-       else
-               return 0;
-
-       return fp;
+       return perf_callchain_store(entry, pc) == 0;
 }
 
 /*
@@ -56,19 +28,7 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
 void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
                         struct pt_regs *regs)
 {
-       unsigned long fp = 0;
-
-       fp = regs->s0;
-       perf_callchain_store(entry, regs->epc);
-
-       fp = user_backtrace(entry, fp, regs->ra);
-       while (fp && !(fp & 0x7) && entry->nr < entry->max_stack)
-               fp = user_backtrace(entry, fp, 0);
-}
-
-static bool fill_callchain(void *entry, unsigned long pc)
-{
-       return perf_callchain_store(entry, pc) == 0;
+       arch_stack_walk_user(fill_callchain, entry, regs);
 }
 
 void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
index c6d5de22463f9d54a85675f1fbb3b63ba4a47619..153a2db4c5fa147d774abaae26d8ba52316c00df 100644 (file)
@@ -162,3 +162,46 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void
 {
        walk_stackframe(task, regs, consume_entry, cookie);
 }
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static unsigned long unwind_user_frame(stack_trace_consume_fn consume_entry,
+                                      void *cookie, unsigned long fp,
+                                      unsigned long reg_ra)
+{
+       struct stackframe buftail;
+       unsigned long ra = 0;
+       unsigned long __user *user_frame_tail =
+               (unsigned long __user *)(fp - sizeof(struct stackframe));
+
+       /* Check accessibility of one struct frame_tail beyond */
+       if (!access_ok(user_frame_tail, sizeof(buftail)))
+               return 0;
+       if (__copy_from_user_inatomic(&buftail, user_frame_tail,
+                                     sizeof(buftail)))
+               return 0;
+
+       ra = reg_ra ? : buftail.ra;
+
+       fp = buftail.fp;
+       if (!ra || !consume_entry(cookie, ra))
+               return 0;
+
+       return fp;
+}
+
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+                         const struct pt_regs *regs)
+{
+       unsigned long fp = 0;
+
+       fp = regs->s0;
+       if (!consume_entry(cookie, regs->epc))
+               return;
+
+       fp = unwind_user_frame(consume_entry, cookie, fp, regs->ra);
+       while (fp && !(fp & 0x7))
+               fp = unwind_user_frame(consume_entry, cookie, fp, 0);
+}