1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Kernel Probes Jump Optimization (Optprobes)
5 * Copyright (C) IBM Corporation, 2002, 2004
6 * Copyright (C) Hitachi Ltd., 2012
8 #include <linux/kprobes.h>
9 #include <linux/perf_event.h>
10 #include <linux/ptrace.h>
11 #include <linux/string.h>
12 #include <linux/slab.h>
13 #include <linux/hardirq.h>
14 #include <linux/preempt.h>
15 #include <linux/extable.h>
16 #include <linux/kdebug.h>
17 #include <linux/kallsyms.h>
18 #include <linux/ftrace.h>
19 #include <linux/objtool.h>
20 #include <linux/pgtable.h>
21 #include <linux/static_call.h>
23 #include <asm/text-patching.h>
24 #include <asm/cacheflush.h>
26 #include <linux/uaccess.h>
27 #include <asm/alternative.h>
29 #include <asm/debugreg.h>
30 #include <asm/set_memory.h>
31 #include <asm/sections.h>
32 #include <asm/nospec-branch.h>
36 unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
38 struct optimized_kprobe *op;
43 for (i = 0; i < JMP32_INSN_SIZE; i++) {
44 kp = get_kprobe((void *)addr - i);
45 /* This function only handles jump-optimized kprobe */
46 if (kp && kprobe_optimized(kp)) {
47 op = container_of(kp, struct optimized_kprobe, kp);
48 /* If op->list is not empty, op is under optimizing */
49 if (list_empty(&op->list))
57 * If the kprobe can be optimized, original bytes which can be
58 * overwritten by jump destination address. In this case, original
59 * bytes must be recovered from op->optinsn.copied_insn buffer.
61 if (copy_from_kernel_nofault(buf, (void *)addr,
62 MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
65 if (addr == (unsigned long)kp->addr) {
67 memcpy(buf + 1, op->optinsn.copied_insn, DISP32_SIZE);
69 offs = addr - (unsigned long)kp->addr - 1;
70 memcpy(buf, op->optinsn.copied_insn + offs, DISP32_SIZE - offs);
73 return (unsigned long)buf;
76 static void synthesize_clac(kprobe_opcode_t *addr)
79 * Can't be static_cpu_has() due to how objtool treats this feature bit.
80 * This isn't a fast path anyway.
82 if (!boot_cpu_has(X86_FEATURE_SMAP))
85 /* Replace the NOP3 with CLAC */
91 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
92 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
100 *(unsigned long *)addr = val;
104 ".pushsection .rodata\n"
105 "optprobe_template_func:\n"
106 ".global optprobe_template_entry\n"
107 "optprobe_template_entry:\n"
109 /* We don't bother saving the ss register */
112 ".global optprobe_template_clac\n"
113 "optprobe_template_clac:\n"
117 ".global optprobe_template_val\n"
118 "optprobe_template_val:\n"
121 ".global optprobe_template_call\n"
122 "optprobe_template_call:\n"
124 /* Move flags to rsp */
125 " movq 18*8(%rsp), %rdx\n"
126 " movq %rdx, 19*8(%rsp)\n"
128 /* Skip flags entry */
131 #else /* CONFIG_X86_32 */
134 ".global optprobe_template_clac\n"
135 "optprobe_template_clac:\n"
139 ".global optprobe_template_val\n"
140 "optprobe_template_val:\n"
142 ".global optprobe_template_call\n"
143 "optprobe_template_call:\n"
145 /* Move flags into esp */
146 " movl 14*4(%esp), %edx\n"
147 " movl %edx, 15*4(%esp)\n"
149 /* Skip flags entry */
153 ".global optprobe_template_end\n"
154 "optprobe_template_end:\n"
157 void optprobe_template_func(void);
158 STACK_FRAME_NON_STANDARD(optprobe_template_func);
160 #define TMPL_CLAC_IDX \
161 ((long)optprobe_template_clac - (long)optprobe_template_entry)
162 #define TMPL_MOVE_IDX \
163 ((long)optprobe_template_val - (long)optprobe_template_entry)
164 #define TMPL_CALL_IDX \
165 ((long)optprobe_template_call - (long)optprobe_template_entry)
166 #define TMPL_END_IDX \
167 ((long)optprobe_template_end - (long)optprobe_template_entry)
169 /* Optimized kprobe call back function: called from optinsn */
171 optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
173 /* This is possible if op is under delayed unoptimizing */
174 if (kprobe_disabled(&op->kp))
178 if (kprobe_running()) {
179 kprobes_inc_nmissed_count(&op->kp);
181 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
182 /* Save skipped registers */
183 regs->cs = __KERNEL_CS;
185 regs->cs |= get_kernel_rpl();
188 regs->ip = (unsigned long)op->kp.addr + INT3_INSN_SIZE;
189 regs->orig_ax = ~0UL;
191 __this_cpu_write(current_kprobe, &op->kp);
192 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
193 opt_pre_handler(&op->kp, regs);
194 __this_cpu_write(current_kprobe, NULL);
198 NOKPROBE_SYMBOL(optimized_callback);
200 static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
205 while (len < JMP32_INSN_SIZE) {
206 ret = __copy_instruction(dest + len, src + len, real + len, &insn);
207 if (!ret || !can_boost(&insn, src + len))
211 /* Check whether the address range is reserved */
212 if (ftrace_text_reserved(src, src + len - 1) ||
213 alternatives_text_reserved(src, src + len - 1) ||
214 jump_label_text_reserved(src, src + len - 1) ||
215 static_call_text_reserved(src, src + len - 1))
221 /* Check whether insn is indirect jump */
222 static int __insn_is_indirect_jump(struct insn *insn)
224 return ((insn->opcode.bytes[0] == 0xff &&
225 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
226 insn->opcode.bytes[0] == 0xea); /* Segment based jump */
229 /* Check whether insn jumps into specified address range */
230 static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
232 unsigned long target = 0;
234 switch (insn->opcode.bytes[0]) {
235 case 0xe0: /* loopne */
236 case 0xe1: /* loope */
237 case 0xe2: /* loop */
238 case 0xe3: /* jcxz */
239 case 0xe9: /* near relative jump */
240 case 0xeb: /* short relative jump */
243 if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
247 if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
251 target = (unsigned long)insn->next_byte + insn->immediate.value;
253 return (start <= target && target <= start + len);
256 static int insn_is_indirect_jump(struct insn *insn)
258 int ret = __insn_is_indirect_jump(insn);
260 #ifdef CONFIG_RETPOLINE
262 * Jump to x86_indirect_thunk_* is treated as an indirect jump.
263 * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
264 * older gcc may use indirect jump. So we add this check instead of
265 * replace indirect-jump check.
268 ret = insn_jump_into_range(insn,
269 (unsigned long)__indirect_thunk_start,
270 (unsigned long)__indirect_thunk_end -
271 (unsigned long)__indirect_thunk_start);
276 /* Decode whole function to ensure any instructions don't jump into target */
277 static int can_optimize(unsigned long paddr)
279 unsigned long addr, size = 0, offset = 0;
281 kprobe_opcode_t buf[MAX_INSN_SIZE];
283 /* Lookup symbol including addr */
284 if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
288 * Do not optimize in the entry code due to the unstable
289 * stack handling and registers setup.
291 if (((paddr >= (unsigned long)__entry_text_start) &&
292 (paddr < (unsigned long)__entry_text_end)))
295 /* Check there is enough space for a relative jump. */
296 if (size - offset < JMP32_INSN_SIZE)
299 /* Decode instructions */
300 addr = paddr - offset;
301 while (addr < paddr - offset + size) { /* Decode until function end */
302 unsigned long recovered_insn;
303 if (search_exception_tables(addr))
305 * Since some fixup code will jumps into this function,
306 * we can't optimize kprobe in this function.
309 recovered_insn = recover_probed_instruction(buf, addr);
312 kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
313 insn_get_length(&insn);
314 /* Another subsystem puts a breakpoint */
315 if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
317 /* Recover address */
318 insn.kaddr = (void *)addr;
319 insn.next_byte = (void *)(addr + insn.length);
320 /* Check any instructions don't jump into target */
321 if (insn_is_indirect_jump(&insn) ||
322 insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE,
331 /* Check optimized_kprobe can actually be optimized. */
332 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
337 for (i = 1; i < op->optinsn.size; i++) {
338 p = get_kprobe(op->kp.addr + i);
339 if (p && !kprobe_disabled(p))
346 /* Check the addr is within the optimized instructions. */
347 int arch_within_optimized_kprobe(struct optimized_kprobe *op,
350 return ((unsigned long)op->kp.addr <= addr &&
351 (unsigned long)op->kp.addr + op->optinsn.size > addr);
354 /* Free optimized instruction slot */
356 void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
358 u8 *slot = op->optinsn.insn;
360 int len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE;
362 /* Record the perf event before freeing the slot */
364 perf_event_text_poke(slot, slot, len, NULL, 0);
366 free_optinsn_slot(slot, dirty);
367 op->optinsn.insn = NULL;
368 op->optinsn.size = 0;
372 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
374 __arch_remove_optimized_kprobe(op, 1);
378 * Copy replacing target instructions
379 * Target instructions MUST be relocatable (checked inside)
380 * This is called when new aggr(opt)probe is allocated or reused.
382 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
383 struct kprobe *__unused)
385 u8 *buf = NULL, *slot;
389 if (!can_optimize((unsigned long)op->kp.addr))
392 buf = kzalloc(MAX_OPTINSN_SIZE, GFP_KERNEL);
396 op->optinsn.insn = slot = get_optinsn_slot();
403 * Verify if the address gap is in 2GB range, because this uses
406 rel = (long)slot - (long)op->kp.addr + JMP32_INSN_SIZE;
407 if (abs(rel) > 0x7fffffff) {
412 /* Copy arch-dep-instance from template */
413 memcpy(buf, optprobe_template_entry, TMPL_END_IDX);
415 /* Copy instructions into the out-of-line buffer */
416 ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr,
417 slot + TMPL_END_IDX);
420 op->optinsn.size = ret;
421 len = TMPL_END_IDX + op->optinsn.size;
423 synthesize_clac(buf + TMPL_CLAC_IDX);
425 /* Set probe information */
426 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
428 /* Set probe function call */
429 synthesize_relcall(buf + TMPL_CALL_IDX,
430 slot + TMPL_CALL_IDX, optimized_callback);
432 /* Set returning jmp instruction at the tail of out-of-line buffer */
433 synthesize_reljump(buf + len, slot + len,
434 (u8 *)op->kp.addr + op->optinsn.size);
435 len += JMP32_INSN_SIZE;
438 * Note len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE is also
439 * used in __arch_remove_optimized_kprobe().
442 /* We have to use text_poke() for instruction buffer because it is RO */
443 perf_event_text_poke(slot, NULL, 0, buf, len);
444 text_poke(slot, buf, len);
452 __arch_remove_optimized_kprobe(op, 0);
457 * Replace breakpoints (INT3) with relative jumps (JMP.d32).
458 * Caller must call with locking kprobe_mutex and text_mutex.
460 * The caller will have installed a regular kprobe and after that issued
461 * syncrhonize_rcu_tasks(), this ensures that the instruction(s) that live in
462 * the 4 bytes after the INT3 are unused and can now be overwritten.
464 void arch_optimize_kprobes(struct list_head *oplist)
466 struct optimized_kprobe *op, *tmp;
467 u8 insn_buff[JMP32_INSN_SIZE];
469 list_for_each_entry_safe(op, tmp, oplist, list) {
470 s32 rel = (s32)((long)op->optinsn.insn -
471 ((long)op->kp.addr + JMP32_INSN_SIZE));
473 WARN_ON(kprobe_disabled(&op->kp));
475 /* Backup instructions which will be replaced by jump address */
476 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_INSN_SIZE,
479 insn_buff[0] = JMP32_INSN_OPCODE;
480 *(s32 *)(&insn_buff[1]) = rel;
482 text_poke_bp(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL);
484 list_del_init(&op->list);
489 * Replace a relative jump (JMP.d32) with a breakpoint (INT3).
491 * After that, we can restore the 4 bytes after the INT3 to undo what
492 * arch_optimize_kprobes() scribbled. This is safe since those bytes will be
493 * unused once the INT3 lands.
495 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
497 u8 new[JMP32_INSN_SIZE] = { INT3_INSN_OPCODE, };
498 u8 old[JMP32_INSN_SIZE];
499 u8 *addr = op->kp.addr;
501 memcpy(old, op->kp.addr, JMP32_INSN_SIZE);
502 memcpy(new + INT3_INSN_SIZE,
503 op->optinsn.copied_insn,
504 JMP32_INSN_SIZE - INT3_INSN_SIZE);
506 text_poke(addr, new, INT3_INSN_SIZE);
508 text_poke(addr + INT3_INSN_SIZE,
509 new + INT3_INSN_SIZE,
510 JMP32_INSN_SIZE - INT3_INSN_SIZE);
513 perf_event_text_poke(op->kp.addr, old, JMP32_INSN_SIZE, new, JMP32_INSN_SIZE);
517 * Recover original instructions and breakpoints from relative jumps.
518 * Caller must call with locking kprobe_mutex.
520 extern void arch_unoptimize_kprobes(struct list_head *oplist,
521 struct list_head *done_list)
523 struct optimized_kprobe *op, *tmp;
525 list_for_each_entry_safe(op, tmp, oplist, list) {
526 arch_unoptimize_kprobe(op);
527 list_move(&op->list, done_list);
531 int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
533 struct optimized_kprobe *op;
535 if (p->flags & KPROBE_FLAG_OPTIMIZED) {
536 /* This kprobe is really able to run optimized path. */
537 op = container_of(p, struct optimized_kprobe, kp);
538 /* Detour through copied instructions */
539 regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
541 reset_current_kprobe();
546 NOKPROBE_SYMBOL(setup_detour_execution);