2 * arch/arm64/kernel/probes/kprobes.c
4 * Kprobes support for ARM64
6 * Copyright (C) 2013 Linaro Limited.
7 * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
19 #include <linux/kernel.h>
20 #include <linux/kprobes.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/stop_machine.h>
24 #include <linux/stringify.h>
25 #include <asm/traps.h>
26 #include <asm/ptrace.h>
27 #include <asm/cacheflush.h>
28 #include <asm/debug-monitors.h>
29 #include <asm/system_misc.h>
31 #include <asm/uaccess.h>
33 #include <asm-generic/sections.h>
35 #include "decode-insn.h"
37 #define MIN_STACK_SIZE(addr) (on_irq_stack(addr, raw_smp_processor_id()) ? \
38 min((unsigned long)IRQ_STACK_SIZE, \
39 IRQ_STACK_PTR(raw_smp_processor_id()) - (addr)) : \
40 min((unsigned long)MAX_STACK_SIZE, \
41 (unsigned long)current_thread_info() + THREAD_START_SP - (addr)))
43 void jprobe_return_break(void);
45 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
46 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
49 post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
51 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
53 /* prepare insn slot */
54 p->ainsn.insn[0] = cpu_to_le32(p->opcode);
56 flush_icache_range((uintptr_t) (p->ainsn.insn),
57 (uintptr_t) (p->ainsn.insn) +
58 MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
61 * Needs restoring of return address after stepping xol.
63 p->ainsn.restore = (unsigned long) p->addr +
64 sizeof(kprobe_opcode_t);
67 static void __kprobes arch_prepare_simulate(struct kprobe *p)
69 /* This instructions is not executed xol. No need to adjust the PC */
73 static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
75 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
78 p->ainsn.handler((u32)p->opcode, (long)p->addr, regs);
80 /* single step simulated, now go for post processing */
81 post_kprobe_handler(kcb, regs);
84 int __kprobes arch_prepare_kprobe(struct kprobe *p)
86 unsigned long probe_addr = (unsigned long)p->addr;
87 extern char __start_rodata[];
88 extern char __end_rodata[];
93 /* copy instruction */
94 p->opcode = le32_to_cpu(*p->addr);
96 if (in_exception_text(probe_addr))
98 if (probe_addr >= (unsigned long) __start_rodata &&
99 probe_addr <= (unsigned long) __end_rodata)
102 /* decode instruction */
103 switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) {
104 case INSN_REJECTED: /* insn not supported */
107 case INSN_GOOD_NO_SLOT: /* insn need simulation */
108 p->ainsn.insn = NULL;
111 case INSN_GOOD: /* instruction uses slot */
112 p->ainsn.insn = get_insn_slot();
118 /* prepare the instruction */
120 arch_prepare_ss_slot(p);
122 arch_prepare_simulate(p);
127 static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
132 addrs[0] = (void *)addr;
133 insns[0] = (u32)opcode;
135 return aarch64_insn_patch_text(addrs, insns, 1);
138 /* arm kprobe: install breakpoint in text */
139 void __kprobes arch_arm_kprobe(struct kprobe *p)
141 patch_text(p->addr, BRK64_OPCODE_KPROBES);
144 /* disarm kprobe: remove breakpoint from text */
145 void __kprobes arch_disarm_kprobe(struct kprobe *p)
147 patch_text(p->addr, p->opcode);
150 void __kprobes arch_remove_kprobe(struct kprobe *p)
153 free_insn_slot(p->ainsn.insn, 0);
154 p->ainsn.insn = NULL;
158 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
160 kcb->prev_kprobe.kp = kprobe_running();
161 kcb->prev_kprobe.status = kcb->kprobe_status;
164 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
166 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
167 kcb->kprobe_status = kcb->prev_kprobe.status;
170 static void __kprobes set_current_kprobe(struct kprobe *p)
172 __this_cpu_write(current_kprobe, p);
176 * The D-flag (Debug mask) is set (masked) upon debug exception entry.
177 * Kprobes needs to clear (unmask) D-flag -ONLY- in case of recursive
178 * probe i.e. when probe hit from kprobe handler context upon
179 * executing the pre/post handlers. In this case we return with
180 * D-flag clear so that single-stepping can be carried-out.
182 * Leave D-flag set in all other cases.
184 static void __kprobes
185 spsr_set_debug_flag(struct pt_regs *regs, int mask)
187 unsigned long spsr = regs->pstate;
198 * Interrupts need to be disabled before single-step mode is set, and not
199 * reenabled until after single-step mode ends.
200 * Without disabling interrupt on local CPU, there is a chance of
201 * interrupt occurrence in the period of exception return and start of
202 * out-of-line single-step, that result in wrongly single stepping
203 * into the interrupt handler.
205 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
206 struct pt_regs *regs)
208 kcb->saved_irqflag = regs->pstate;
209 regs->pstate |= PSR_I_BIT;
212 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
213 struct pt_regs *regs)
215 if (kcb->saved_irqflag & PSR_I_BIT)
216 regs->pstate |= PSR_I_BIT;
218 regs->pstate &= ~PSR_I_BIT;
221 static void __kprobes
222 set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
224 kcb->ss_ctx.ss_pending = true;
225 kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
228 static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
230 kcb->ss_ctx.ss_pending = false;
231 kcb->ss_ctx.match_addr = 0;
234 static void __kprobes setup_singlestep(struct kprobe *p,
235 struct pt_regs *regs,
236 struct kprobe_ctlblk *kcb, int reenter)
241 save_previous_kprobe(kcb);
242 set_current_kprobe(p);
243 kcb->kprobe_status = KPROBE_REENTER;
245 kcb->kprobe_status = KPROBE_HIT_SS;
250 /* prepare for single stepping */
251 slot = (unsigned long)p->ainsn.insn;
253 set_ss_context(kcb, slot); /* mark pending ss */
255 if (kcb->kprobe_status == KPROBE_REENTER)
256 spsr_set_debug_flag(regs, 0);
258 /* IRQs and single stepping do not mix well. */
259 kprobes_save_local_irqflag(kcb, regs);
260 kernel_enable_single_step(regs);
261 instruction_pointer_set(regs, slot);
263 /* insn simulation */
264 arch_simulate_insn(p, regs);
268 static int __kprobes reenter_kprobe(struct kprobe *p,
269 struct pt_regs *regs,
270 struct kprobe_ctlblk *kcb)
272 switch (kcb->kprobe_status) {
273 case KPROBE_HIT_SSDONE:
274 case KPROBE_HIT_ACTIVE:
275 kprobes_inc_nmissed_count(p);
276 setup_singlestep(p, regs, kcb, 1);
280 pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
292 static void __kprobes
293 post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
295 struct kprobe *cur = kprobe_running();
300 /* return addr restore if non-branching insn */
301 if (cur->ainsn.restore != 0)
302 instruction_pointer_set(regs, cur->ainsn.restore);
304 /* restore back original saved kprobe variables and continue */
305 if (kcb->kprobe_status == KPROBE_REENTER) {
306 restore_previous_kprobe(kcb);
309 /* call post handler */
310 kcb->kprobe_status = KPROBE_HIT_SSDONE;
311 if (cur->post_handler) {
312 /* post_handler can hit breakpoint and single step
313 * again, so we enable D-flag for recursive exception.
315 cur->post_handler(cur, regs, 0);
318 reset_current_kprobe();
321 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
323 struct kprobe *cur = kprobe_running();
324 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
326 switch (kcb->kprobe_status) {
330 * We are here because the instruction being single
331 * stepped caused a page fault. We reset the current
332 * kprobe and the ip points back to the probe address
333 * and allow the page fault handler to continue as a
336 instruction_pointer_set(regs, (unsigned long) cur->addr);
337 if (!instruction_pointer(regs))
340 kernel_disable_single_step();
341 if (kcb->kprobe_status == KPROBE_REENTER)
342 spsr_set_debug_flag(regs, 1);
344 if (kcb->kprobe_status == KPROBE_REENTER)
345 restore_previous_kprobe(kcb);
347 reset_current_kprobe();
350 case KPROBE_HIT_ACTIVE:
351 case KPROBE_HIT_SSDONE:
353 * We increment the nmissed count for accounting,
354 * we can also use npre/npostfault count for accounting
355 * these specific fault cases.
357 kprobes_inc_nmissed_count(cur);
360 * We come here because instructions in the pre/post
361 * handler caused the page_fault, this could happen
362 * if handler tries to access user space by
363 * copy_from_user(), get_user() etc. Let the
364 * user-specified handler try to fix it first.
366 if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
370 * In case the user-specified fault handler returned
371 * zero, try to fix up.
373 if (fixup_exception(regs))
379 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
380 unsigned long val, void *data)
385 static void __kprobes kprobe_handler(struct pt_regs *regs)
387 struct kprobe *p, *cur_kprobe;
388 struct kprobe_ctlblk *kcb;
389 unsigned long addr = instruction_pointer(regs);
391 kcb = get_kprobe_ctlblk();
392 cur_kprobe = kprobe_running();
394 p = get_kprobe((kprobe_opcode_t *) addr);
398 if (reenter_kprobe(p, regs, kcb))
402 set_current_kprobe(p);
403 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
406 * If we have no pre-handler or it returned 0, we
407 * continue with normal processing. If we have a
408 * pre-handler and it returned non-zero, it prepped
409 * for calling the break_handler below on re-entry,
410 * so get out doing nothing more here.
412 * pre_handler can hit a breakpoint and can step thru
413 * before return, keep PSTATE D-flag enabled until
414 * pre_handler return back.
416 if (!p->pre_handler || !p->pre_handler(p, regs)) {
417 setup_singlestep(p, regs, kcb, 0);
421 } else if ((le32_to_cpu(*(kprobe_opcode_t *) addr) ==
422 BRK64_OPCODE_KPROBES) && cur_kprobe) {
423 /* We probably hit a jprobe. Call its break handler. */
424 if (cur_kprobe->break_handler &&
425 cur_kprobe->break_handler(cur_kprobe, regs)) {
426 setup_singlestep(cur_kprobe, regs, kcb, 0);
431 * The breakpoint instruction was removed right
432 * after we hit it. Another cpu has removed
433 * either a probepoint or a debugger breakpoint
434 * at this address. In either case, no further
435 * handling of this interrupt is appropriate.
436 * Return back to original instruction, and continue.
441 kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
443 if ((kcb->ss_ctx.ss_pending)
444 && (kcb->ss_ctx.match_addr == addr)) {
445 clear_ss_context(kcb); /* clear pending ss */
446 return DBG_HOOK_HANDLED;
448 /* not ours, kprobes should ignore it */
449 return DBG_HOOK_ERROR;
453 kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
455 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
458 /* return error if this is not our step */
459 retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
461 if (retval == DBG_HOOK_HANDLED) {
462 kprobes_restore_local_irqflag(kcb, regs);
463 kernel_disable_single_step();
465 if (kcb->kprobe_status == KPROBE_REENTER)
466 spsr_set_debug_flag(regs, 1);
468 post_kprobe_handler(kcb, regs);
475 kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
477 kprobe_handler(regs);
478 return DBG_HOOK_HANDLED;
481 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
483 struct jprobe *jp = container_of(p, struct jprobe, kp);
484 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
485 long stack_ptr = kernel_stack_pointer(regs);
487 kcb->jprobe_saved_regs = *regs;
489 * As Linus pointed out, gcc assumes that the callee
490 * owns the argument space and could overwrite it, e.g.
491 * tailcall optimization. So, to be absolutely safe
492 * we also save and restore enough stack bytes to cover
495 memcpy(kcb->jprobes_stack, (void *)stack_ptr,
496 MIN_STACK_SIZE(stack_ptr));
498 instruction_pointer_set(regs, (unsigned long) jp->entry);
500 pause_graph_tracing();
504 void __kprobes jprobe_return(void)
506 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
509 * Jprobe handler return by entering break exception,
510 * encoded same as kprobe, but with following conditions
511 * -a magic number in x0 to identify from rest of other kprobes.
512 * -restore stack addr to original saved pt_regs
514 asm volatile ("ldr x0, [%0]\n\t"
516 ".globl jprobe_return_break\n\t"
517 "jprobe_return_break:\n\t"
520 : "r"(&kcb->jprobe_saved_regs.sp),
521 "I"(BRK64_ESR_KPROBES)
525 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
527 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
528 long stack_addr = kcb->jprobe_saved_regs.sp;
529 long orig_sp = kernel_stack_pointer(regs);
530 struct jprobe *jp = container_of(p, struct jprobe, kp);
532 if (instruction_pointer(regs) != (u64) jprobe_return_break)
535 if (orig_sp != stack_addr) {
536 struct pt_regs *saved_regs =
537 (struct pt_regs *)kcb->jprobe_saved_regs.sp;
538 pr_err("current sp %lx does not match saved sp %lx\n",
539 orig_sp, stack_addr);
540 pr_err("Saved registers for jprobe %p\n", jp);
541 show_regs(saved_regs);
542 pr_err("Current registers\n");
546 unpause_graph_tracing();
547 *regs = kcb->jprobe_saved_regs;
548 memcpy((void *)stack_addr, kcb->jprobes_stack,
549 MIN_STACK_SIZE(stack_addr));
550 preempt_enable_no_resched();
554 bool arch_within_kprobe_blacklist(unsigned long addr)
556 extern char __idmap_text_start[], __idmap_text_end[];
557 extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
559 if ((addr >= (unsigned long)__kprobes_text_start &&
560 addr < (unsigned long)__kprobes_text_end) ||
561 (addr >= (unsigned long)__entry_text_start &&
562 addr < (unsigned long)__entry_text_end) ||
563 (addr >= (unsigned long)__idmap_text_start &&
564 addr < (unsigned long)__idmap_text_end) ||
565 !!search_exception_tables(addr))
568 if (!is_kernel_in_hyp_mode()) {
569 if ((addr >= (unsigned long)__hyp_text_start &&
570 addr < (unsigned long)__hyp_text_end) ||
571 (addr >= (unsigned long)__hyp_idmap_text_start &&
572 addr < (unsigned long)__hyp_idmap_text_end))
579 void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
581 struct kretprobe_instance *ri = NULL;
582 struct hlist_head *head, empty_rp;
583 struct hlist_node *tmp;
584 unsigned long flags, orig_ret_address = 0;
585 unsigned long trampoline_address =
586 (unsigned long)&kretprobe_trampoline;
587 kprobe_opcode_t *correct_ret_addr = NULL;
589 INIT_HLIST_HEAD(&empty_rp);
590 kretprobe_hash_lock(current, &head, &flags);
593 * It is possible to have multiple instances associated with a given
594 * task either because multiple functions in the call path have
595 * return probes installed on them, and/or more than one
596 * return probe was registered for a target function.
598 * We can handle this because:
599 * - instances are always pushed into the head of the list
600 * - when multiple return probes are registered for the same
601 * function, the (chronologically) first instance's ret_addr
602 * will be the real return address, and all the rest will
603 * point to kretprobe_trampoline.
605 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
606 if (ri->task != current)
607 /* another task is sharing our hash bucket */
610 orig_ret_address = (unsigned long)ri->ret_addr;
612 if (orig_ret_address != trampoline_address)
614 * This is the real return address. Any other
615 * instances associated with this task are for
616 * other calls deeper on the call stack
621 kretprobe_assert(ri, orig_ret_address, trampoline_address);
623 correct_ret_addr = ri->ret_addr;
624 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
625 if (ri->task != current)
626 /* another task is sharing our hash bucket */
629 orig_ret_address = (unsigned long)ri->ret_addr;
630 if (ri->rp && ri->rp->handler) {
631 __this_cpu_write(current_kprobe, &ri->rp->kp);
632 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
633 ri->ret_addr = correct_ret_addr;
634 ri->rp->handler(ri, regs);
635 __this_cpu_write(current_kprobe, NULL);
638 recycle_rp_inst(ri, &empty_rp);
640 if (orig_ret_address != trampoline_address)
642 * This is the real return address. Any other
643 * instances associated with this task are for
644 * other calls deeper on the call stack
649 kretprobe_hash_unlock(current, &flags);
651 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
652 hlist_del(&ri->hlist);
655 return (void *)orig_ret_address;
658 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
659 struct pt_regs *regs)
661 ri->ret_addr = (kprobe_opcode_t *)regs->regs[30];
663 /* replace return addr (x30) with trampoline */
664 regs->regs[30] = (long)&kretprobe_trampoline;
667 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
672 int __init arch_init_kprobes(void)