Merge tag 'probes-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux...
[linux-block.git] / arch / loongarch / kernel / ftrace_dyn.c
CommitLineData
4733f09d
QZ
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Based on arch/arm64/kernel/ftrace.c
4 *
5 * Copyright (C) 2022 Loongson Technology Corporation Limited
6 */
7
8#include <linux/ftrace.h>
09e679c2 9#include <linux/kprobes.h>
4733f09d
QZ
10#include <linux/uaccess.h>
11
12#include <asm/inst.h>
28ac0a9e 13#include <asm/module.h>
4733f09d
QZ
14
15static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, bool validate)
16{
17 u32 replaced;
18
19 if (validate) {
20 if (larch_insn_read((void *)pc, &replaced))
21 return -EFAULT;
22
23 if (replaced != old)
24 return -EINVAL;
25 }
26
27 if (larch_insn_patch_text((void *)pc, new))
28 return -EPERM;
29
30 return 0;
31}
32
28ac0a9e 33#ifdef CONFIG_MODULES
24d4f527 34static bool reachable_by_bl(unsigned long addr, unsigned long pc)
28ac0a9e 35{
24d4f527 36 long offset = (long)addr - (long)pc;
28ac0a9e 37
24d4f527 38 return offset >= -SZ_128M && offset < SZ_128M;
28ac0a9e
QZ
39}
40
41static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
42{
43 struct plt_entry *plt = mod->arch.ftrace_trampolines;
44
45 if (addr == FTRACE_ADDR)
46 return &plt[FTRACE_PLT_IDX];
47 if (addr == FTRACE_REGS_ADDR &&
48 IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
49 return &plt[FTRACE_REGS_PLT_IDX];
50
51 return NULL;
52}
53
24d4f527
YT
54/*
55 * Find the address the callsite must branch to in order to reach '*addr'.
56 *
57 * Due to the limited range of 'bl' instruction, modules may be placed too far
58 * away to branch directly and we must use a PLT.
59 *
60 * Returns true when '*addr' contains a reachable target address, or has been
61 * modified to contain a PLT address. Returns false otherwise.
62 */
63static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod, unsigned long *addr)
28ac0a9e 64{
24d4f527 65 unsigned long pc = rec->ip + LOONGARCH_INSN_SIZE;
28ac0a9e
QZ
66 struct plt_entry *plt;
67
9cdc3b6a
YT
68 /*
69 * If a custom trampoline is unreachable, rely on the ftrace_regs_caller
70 * trampoline which knows how to indirectly reach that trampoline through
71 * ops->direct_call.
72 */
73 if (*addr != FTRACE_ADDR && *addr != FTRACE_REGS_ADDR && !reachable_by_bl(*addr, pc))
74 *addr = FTRACE_REGS_ADDR;
75
24d4f527
YT
76 /*
77 * When the target is within range of the 'bl' instruction, use 'addr'
78 * as-is and branch to that directly.
79 */
80 if (reachable_by_bl(*addr, pc))
81 return true;
82
83 /*
84 * 'mod' is only set at module load time, but if we end up
85 * dealing with an out-of-range condition, we can assume it
86 * is due to a module being loaded far away from the kernel.
87 *
88 * NOTE: __module_text_address() must be called with preemption
89 * disabled, but we can rely on ftrace_lock to ensure that 'mod'
90 * retains its validity throughout the remainder of this code.
91 */
92 if (!mod) {
93 preempt_disable();
94 mod = __module_text_address(pc);
95 preempt_enable();
96 }
97
98 if (WARN_ON(!mod))
99 return false;
100
101 plt = get_ftrace_plt(mod, *addr);
28ac0a9e 102 if (!plt) {
24d4f527
YT
103 pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
104 return false;
28ac0a9e
QZ
105 }
106
24d4f527
YT
107 *addr = (unsigned long)plt;
108 return true;
109}
110#else /* !CONFIG_MODULES */
111static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod, unsigned long *addr)
112{
113 return true;
28ac0a9e
QZ
114}
115#endif
116
819cf655 117#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
8778ba2c
QZ
118int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
119{
120 u32 old, new;
121 unsigned long pc;
122
123 pc = rec->ip + LOONGARCH_INSN_SIZE;
124
24d4f527
YT
125 if (!ftrace_find_callable_addr(rec, NULL, &addr))
126 return -EINVAL;
28ac0a9e 127
24d4f527
YT
128 if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
129 return -EINVAL;
28ac0a9e 130
8778ba2c
QZ
131 new = larch_insn_gen_bl(pc, addr);
132 old = larch_insn_gen_bl(pc, old_addr);
133
134 return ftrace_modify_code(pc, old, new, true);
135}
136#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
137
4733f09d
QZ
138int ftrace_update_ftrace_func(ftrace_func_t func)
139{
140 u32 new;
141 unsigned long pc;
142
143 pc = (unsigned long)&ftrace_call;
144 new = larch_insn_gen_bl(pc, (unsigned long)func);
145
146 return ftrace_modify_code(pc, 0, new, false);
147}
148
149/*
150 * The compiler has inserted 2 NOPs before the regular function prologue.
151 * T series registers are available and safe because of LoongArch's psABI.
152 *
153 * At runtime, we can replace nop with bl to enable ftrace call and replace bl
154 * with nop to disable ftrace call. The bl requires us to save the original RA
155 * value, so it saves RA at t0 here.
156 *
157 * Details are:
158 *
159 * | Compiled | Disabled | Enabled |
160 * +------------+------------------------+------------------------+
161 * | nop | move t0, ra | move t0, ra |
162 * | nop | nop | bl ftrace_caller |
163 * | func_body | func_body | func_body |
164 *
165 * The RA value will be recovered by ftrace_regs_entry, and restored into RA
166 * before returning to the regular function prologue. When a function is not
167 * being traced, the "move t0, ra" is not harmful.
168 */
169
170int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
171{
172 u32 old, new;
173 unsigned long pc;
174
175 pc = rec->ip;
176 old = larch_insn_gen_nop();
177 new = larch_insn_gen_move(LOONGARCH_GPR_T0, LOONGARCH_GPR_RA);
178
179 return ftrace_modify_code(pc, old, new, true);
180}
181
182int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
183{
184 u32 old, new;
185 unsigned long pc;
186
187 pc = rec->ip + LOONGARCH_INSN_SIZE;
188
24d4f527
YT
189 if (!ftrace_find_callable_addr(rec, NULL, &addr))
190 return -EINVAL;
28ac0a9e 191
4733f09d
QZ
192 old = larch_insn_gen_nop();
193 new = larch_insn_gen_bl(pc, addr);
194
195 return ftrace_modify_code(pc, old, new, true);
196}
197
198int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
199{
200 u32 old, new;
201 unsigned long pc;
202
203 pc = rec->ip + LOONGARCH_INSN_SIZE;
204
24d4f527
YT
205 if (!ftrace_find_callable_addr(rec, NULL, &addr))
206 return -EINVAL;
28ac0a9e 207
4733f09d
QZ
208 new = larch_insn_gen_nop();
209 old = larch_insn_gen_bl(pc, addr);
210
211 return ftrace_modify_code(pc, old, new, true);
212}
213
214void arch_ftrace_update_code(int command)
215{
216 command |= FTRACE_MAY_SLEEP;
217 ftrace_modify_all_code(command);
218}
219
220int __init ftrace_dyn_arch_init(void)
221{
222 return 0;
223}
5fcfad3d
QZ
224
225#ifdef CONFIG_FUNCTION_GRAPH_TRACER
226void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent)
227{
228 unsigned long old;
229 unsigned long return_hooker = (unsigned long)&return_to_handler;
230
231 if (unlikely(atomic_read(&current->tracing_graph_pause)))
232 return;
233
234 old = *parent;
235
a51ac524 236 if (!function_graph_enter(old, self_addr, 0, parent))
5fcfad3d
QZ
237 *parent = return_hooker;
238}
239
ac7127e1
QZ
240#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
241void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
242 struct ftrace_ops *op, struct ftrace_regs *fregs)
243{
244 struct pt_regs *regs = &fregs->regs;
245 unsigned long *parent = (unsigned long *)&regs->regs[1];
246
247 prepare_ftrace_return(ip, (unsigned long *)parent);
248}
249#else
5fcfad3d
QZ
250static int ftrace_modify_graph_caller(bool enable)
251{
252 u32 branch, nop;
253 unsigned long pc, func;
254 extern void ftrace_graph_call(void);
255
256 pc = (unsigned long)&ftrace_graph_call;
257 func = (unsigned long)&ftrace_graph_caller;
258
259 nop = larch_insn_gen_nop();
260 branch = larch_insn_gen_b(pc, func);
261
262 if (enable)
263 return ftrace_modify_code(pc, nop, branch, true);
264 else
265 return ftrace_modify_code(pc, branch, nop, true);
266}
267
268int ftrace_enable_ftrace_graph_caller(void)
269{
270 return ftrace_modify_graph_caller(true);
271}
272
273int ftrace_disable_ftrace_graph_caller(void)
274{
275 return ftrace_modify_graph_caller(false);
276}
ac7127e1 277#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
5fcfad3d 278#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
09e679c2
TY
279
280#ifdef CONFIG_KPROBES_ON_FTRACE
281/* Ftrace callback handler for kprobes -- called under preepmt disabled */
282void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
283 struct ftrace_ops *ops, struct ftrace_regs *fregs)
284{
285 int bit;
286 struct pt_regs *regs;
287 struct kprobe *p;
288 struct kprobe_ctlblk *kcb;
289
1a7d0890
SB
290 if (unlikely(kprobe_ftrace_disabled))
291 return;
292
09e679c2
TY
293 bit = ftrace_test_recursion_trylock(ip, parent_ip);
294 if (bit < 0)
295 return;
296
297 p = get_kprobe((kprobe_opcode_t *)ip);
298 if (unlikely(!p) || kprobe_disabled(p))
299 goto out;
300
301 regs = ftrace_get_regs(fregs);
302 if (!regs)
303 goto out;
304
305 kcb = get_kprobe_ctlblk();
306 if (kprobe_running()) {
307 kprobes_inc_nmissed_count(p);
308 } else {
309 unsigned long orig_ip = instruction_pointer(regs);
310
311 instruction_pointer_set(regs, ip);
312
313 __this_cpu_write(current_kprobe, p);
314 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
315 if (!p->pre_handler || !p->pre_handler(p, regs)) {
316 /*
317 * Emulate singlestep (and also recover regs->csr_era)
318 * as if there is a nop
319 */
320 instruction_pointer_set(regs, (unsigned long)p->addr + MCOUNT_INSN_SIZE);
321 if (unlikely(p->post_handler)) {
322 kcb->kprobe_status = KPROBE_HIT_SSDONE;
323 p->post_handler(p, regs, 0);
324 }
325 instruction_pointer_set(regs, orig_ip);
326 }
327
328 /*
329 * If pre_handler returns !0, it changes regs->csr_era. We have to
330 * skip emulating post_handler.
331 */
332 __this_cpu_write(current_kprobe, NULL);
333 }
334out:
335 ftrace_test_recursion_unlock(bit);
336}
337NOKPROBE_SYMBOL(kprobe_ftrace_handler);
338
339int arch_prepare_kprobe_ftrace(struct kprobe *p)
340{
341 p->ainsn.insn = NULL;
342 return 0;
343}
344#endif /* CONFIG_KPROBES_ON_FTRACE */