2 * Kernel Probes (KProbes)
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/module.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/kdebug.h>
47 #include <asm-generic/sections.h>
48 #include <asm/cacheflush.h>
49 #include <asm/errno.h>
50 #include <asm/uaccess.h>
52 #define KPROBE_HASH_BITS 6
53 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
57 * Some oddball architectures like 64bit powerpc have function descriptors
58 * so this must be overridable.
60 #ifndef kprobe_lookup_name
61 #define kprobe_lookup_name(name, addr) \
62 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
65 static int kprobes_initialized;
66 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
67 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
69 /* NOTE: change this value only with kprobe_mutex held */
70 static bool kprobe_enabled;
72 static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
73 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
75 spinlock_t lock ____cacheline_aligned_in_smp;
76 } kretprobe_table_locks[KPROBE_TABLE_SIZE];
78 static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
80 return &(kretprobe_table_locks[hash].lock);
84 * Normally, functions that we'd want to prohibit kprobes in, are marked
85 * __kprobes. But, there are cases where such functions already belong to
86 * a different section (__sched for preempt_schedule)
88 * For such cases, we now have a blacklist
90 static struct kprobe_blackpoint kprobe_blacklist[] = {
91 {"preempt_schedule",},
92 {NULL} /* Terminator */
95 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
97 * kprobe->ainsn.insn points to the copy of the instruction to be
98 * single-stepped. x86_64, POWER4 and above have no-exec support and
99 * stepping on the instruction on a vmalloced/kmalloced/data page
100 * is a recipe for disaster
102 #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
104 struct kprobe_insn_page {
105 struct hlist_node hlist;
106 kprobe_opcode_t *insns; /* Page of instruction slots */
107 char slot_used[INSNS_PER_PAGE];
112 enum kprobe_slot_state {
118 static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
119 static struct hlist_head kprobe_insn_pages;
120 static int kprobe_garbage_slots;
121 static int collect_garbage_slots(void);
123 static int __kprobes check_safety(void)
126 #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
127 ret = freeze_processes();
129 struct task_struct *p, *q;
130 do_each_thread(p, q) {
131 if (p != current && p->state == TASK_RUNNING &&
133 printk("Check failed: %s is running\n",p->comm);
137 } while_each_thread(p, q);
148 * __get_insn_slot() - Find a slot on an executable page for an instruction.
149 * We allocate an executable page if there's no room on existing ones.
151 static kprobe_opcode_t __kprobes *__get_insn_slot(void)
153 struct kprobe_insn_page *kip;
154 struct hlist_node *pos;
157 hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
158 if (kip->nused < INSNS_PER_PAGE) {
160 for (i = 0; i < INSNS_PER_PAGE; i++) {
161 if (kip->slot_used[i] == SLOT_CLEAN) {
162 kip->slot_used[i] = SLOT_USED;
164 return kip->insns + (i * MAX_INSN_SIZE);
167 /* Surprise! No unused slots. Fix kip->nused. */
168 kip->nused = INSNS_PER_PAGE;
172 /* If there are any garbage slots, collect it and try again. */
173 if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
176 /* All out of space. Need to allocate a new page. Use slot 0. */
177 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
182 * Use module_alloc so this page is within +/- 2GB of where the
183 * kernel image and loaded module images reside. This is required
184 * so x86_64 can correctly handle the %rip-relative fixups.
186 kip->insns = module_alloc(PAGE_SIZE);
191 INIT_HLIST_NODE(&kip->hlist);
192 hlist_add_head(&kip->hlist, &kprobe_insn_pages);
193 memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
194 kip->slot_used[0] = SLOT_USED;
200 kprobe_opcode_t __kprobes *get_insn_slot(void)
202 kprobe_opcode_t *ret;
203 mutex_lock(&kprobe_insn_mutex);
204 ret = __get_insn_slot();
205 mutex_unlock(&kprobe_insn_mutex);
209 /* Return 1 if all garbages are collected, otherwise 0. */
210 static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
212 kip->slot_used[idx] = SLOT_CLEAN;
214 if (kip->nused == 0) {
216 * Page is no longer in use. Free it unless
217 * it's the last one. We keep the last one
218 * so as not to have to set it up again the
219 * next time somebody inserts a probe.
221 hlist_del(&kip->hlist);
222 if (hlist_empty(&kprobe_insn_pages)) {
223 INIT_HLIST_NODE(&kip->hlist);
224 hlist_add_head(&kip->hlist,
227 module_free(NULL, kip->insns);
235 static int __kprobes collect_garbage_slots(void)
237 struct kprobe_insn_page *kip;
238 struct hlist_node *pos, *next;
241 /* Ensure no-one is preepmted on the garbages */
242 mutex_unlock(&kprobe_insn_mutex);
243 safety = check_safety();
244 mutex_lock(&kprobe_insn_mutex);
248 hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
250 if (kip->ngarbage == 0)
252 kip->ngarbage = 0; /* we will collect all garbages */
253 for (i = 0; i < INSNS_PER_PAGE; i++) {
254 if (kip->slot_used[i] == SLOT_DIRTY &&
255 collect_one_slot(kip, i))
259 kprobe_garbage_slots = 0;
263 void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
265 struct kprobe_insn_page *kip;
266 struct hlist_node *pos;
268 mutex_lock(&kprobe_insn_mutex);
269 hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
270 if (kip->insns <= slot &&
271 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
272 int i = (slot - kip->insns) / MAX_INSN_SIZE;
274 kip->slot_used[i] = SLOT_DIRTY;
277 collect_one_slot(kip, i);
283 if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
284 collect_garbage_slots();
286 mutex_unlock(&kprobe_insn_mutex);
290 /* We have preemption disabled.. so it is safe to use __ versions */
291 static inline void set_kprobe_instance(struct kprobe *kp)
293 __get_cpu_var(kprobe_instance) = kp;
296 static inline void reset_kprobe_instance(void)
298 __get_cpu_var(kprobe_instance) = NULL;
302 * This routine is called either:
303 * - under the kprobe_mutex - during kprobe_[un]register()
305 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
307 struct kprobe __kprobes *get_kprobe(void *addr)
309 struct hlist_head *head;
310 struct hlist_node *node;
313 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
314 hlist_for_each_entry_rcu(p, node, head, hlist) {
322 * Aggregate handlers for multiple kprobes support - these handlers
323 * take care of invoking the individual kprobe handlers on p->list
325 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
329 list_for_each_entry_rcu(kp, &p->list, list) {
330 if (kp->pre_handler) {
331 set_kprobe_instance(kp);
332 if (kp->pre_handler(kp, regs))
335 reset_kprobe_instance();
340 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
345 list_for_each_entry_rcu(kp, &p->list, list) {
346 if (kp->post_handler) {
347 set_kprobe_instance(kp);
348 kp->post_handler(kp, regs, flags);
349 reset_kprobe_instance();
354 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
357 struct kprobe *cur = __get_cpu_var(kprobe_instance);
360 * if we faulted "during" the execution of a user specified
361 * probe handler, invoke just that probe's fault handler
363 if (cur && cur->fault_handler) {
364 if (cur->fault_handler(cur, regs, trapnr))
370 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
372 struct kprobe *cur = __get_cpu_var(kprobe_instance);
375 if (cur && cur->break_handler) {
376 if (cur->break_handler(cur, regs))
379 reset_kprobe_instance();
383 /* Walks the list and increments nmissed count for multiprobe case */
384 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
387 if (p->pre_handler != aggr_pre_handler) {
390 list_for_each_entry_rcu(kp, &p->list, list)
396 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
397 struct hlist_head *head)
399 struct kretprobe *rp = ri->rp;
401 /* remove rp inst off the rprobe_inst_table */
402 hlist_del(&ri->hlist);
403 INIT_HLIST_NODE(&ri->hlist);
405 spin_lock(&rp->lock);
406 hlist_add_head(&ri->hlist, &rp->free_instances);
407 spin_unlock(&rp->lock);
410 hlist_add_head(&ri->hlist, head);
413 void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
414 struct hlist_head **head, unsigned long *flags)
416 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
417 spinlock_t *hlist_lock;
419 *head = &kretprobe_inst_table[hash];
420 hlist_lock = kretprobe_table_lock_ptr(hash);
421 spin_lock_irqsave(hlist_lock, *flags);
424 static void __kprobes kretprobe_table_lock(unsigned long hash,
425 unsigned long *flags)
427 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
428 spin_lock_irqsave(hlist_lock, *flags);
431 void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
432 unsigned long *flags)
434 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
435 spinlock_t *hlist_lock;
437 hlist_lock = kretprobe_table_lock_ptr(hash);
438 spin_unlock_irqrestore(hlist_lock, *flags);
441 void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
443 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
444 spin_unlock_irqrestore(hlist_lock, *flags);
448 * This function is called from finish_task_switch when task tk becomes dead,
449 * so that we can recycle any function-return probe instances associated
450 * with this task. These left over instances represent probed functions
451 * that have been called but will never return.
453 void __kprobes kprobe_flush_task(struct task_struct *tk)
455 struct kretprobe_instance *ri;
456 struct hlist_head *head, empty_rp;
457 struct hlist_node *node, *tmp;
458 unsigned long hash, flags = 0;
460 if (unlikely(!kprobes_initialized))
461 /* Early boot. kretprobe_table_locks not yet initialized. */
464 hash = hash_ptr(tk, KPROBE_HASH_BITS);
465 head = &kretprobe_inst_table[hash];
466 kretprobe_table_lock(hash, &flags);
467 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
469 recycle_rp_inst(ri, &empty_rp);
471 kretprobe_table_unlock(hash, &flags);
472 INIT_HLIST_HEAD(&empty_rp);
473 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
474 hlist_del(&ri->hlist);
479 static inline void free_rp_inst(struct kretprobe *rp)
481 struct kretprobe_instance *ri;
482 struct hlist_node *pos, *next;
484 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
485 hlist_del(&ri->hlist);
490 static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
492 unsigned long flags, hash;
493 struct kretprobe_instance *ri;
494 struct hlist_node *pos, *next;
495 struct hlist_head *head;
498 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
499 kretprobe_table_lock(hash, &flags);
500 head = &kretprobe_inst_table[hash];
501 hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
505 kretprobe_table_unlock(hash, &flags);
511 * Keep all fields in the kprobe consistent
513 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
515 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
516 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
520 * Add the new probe to old_p->list. Fail if this is the
521 * second jprobe at the address - two jprobes can't coexist
523 static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
525 if (p->break_handler) {
526 if (old_p->break_handler)
528 list_add_tail_rcu(&p->list, &old_p->list);
529 old_p->break_handler = aggr_break_handler;
531 list_add_rcu(&p->list, &old_p->list);
532 if (p->post_handler && !old_p->post_handler)
533 old_p->post_handler = aggr_post_handler;
538 * Fill in the required fields of the "manager kprobe". Replace the
539 * earlier kprobe in the hlist with the manager kprobe
541 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
546 ap->pre_handler = aggr_pre_handler;
547 ap->fault_handler = aggr_fault_handler;
549 ap->post_handler = aggr_post_handler;
550 if (p->break_handler)
551 ap->break_handler = aggr_break_handler;
553 INIT_LIST_HEAD(&ap->list);
554 list_add_rcu(&p->list, &ap->list);
556 hlist_replace_rcu(&p->hlist, &ap->hlist);
560 * This is the second or subsequent kprobe at the address - handle
563 static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
569 if (old_p->pre_handler == aggr_pre_handler) {
570 copy_kprobe(old_p, p);
571 ret = add_new_kprobe(old_p, p);
573 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
576 add_aggr_kprobe(ap, old_p);
578 ret = add_new_kprobe(ap, p);
583 static int __kprobes in_kprobes_functions(unsigned long addr)
585 struct kprobe_blackpoint *kb;
587 if (addr >= (unsigned long)__kprobes_text_start &&
588 addr < (unsigned long)__kprobes_text_end)
591 * If there exists a kprobe_blacklist, verify and
592 * fail any probe registration in the prohibited area
594 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
595 if (kb->start_addr) {
596 if (addr >= kb->start_addr &&
597 addr < (kb->start_addr + kb->range))
605 * If we have a symbol_name argument, look it up and add the offset field
606 * to it. This way, we can specify a relative address to a symbol.
608 static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
610 kprobe_opcode_t *addr = p->addr;
611 if (p->symbol_name) {
614 kprobe_lookup_name(p->symbol_name, addr);
619 return (kprobe_opcode_t *)(((char *)addr) + p->offset);
622 static int __kprobes __register_kprobe(struct kprobe *p,
623 unsigned long called_from)
626 struct kprobe *old_p;
627 struct module *probed_mod;
628 kprobe_opcode_t *addr;
630 addr = kprobe_addr(p);
636 if (!__kernel_text_address((unsigned long) p->addr) ||
637 in_kprobes_functions((unsigned long) p->addr)) {
642 p->mod_refcounted = 0;
645 * Check if are we probing a module.
647 probed_mod = __module_text_address((unsigned long) p->addr);
649 struct module *calling_mod;
650 calling_mod = __module_text_address(called_from);
652 * We must allow modules to probe themself and in this case
653 * avoid incrementing the module refcount, so as to allow
654 * unloading of self probing modules.
656 if (calling_mod != probed_mod) {
657 if (unlikely(!try_module_get(probed_mod))) {
661 p->mod_refcounted = 1;
668 INIT_LIST_HEAD(&p->list);
669 mutex_lock(&kprobe_mutex);
670 old_p = get_kprobe(p->addr);
672 ret = register_aggr_kprobe(old_p, p);
676 ret = arch_prepare_kprobe(p);
680 INIT_HLIST_NODE(&p->hlist);
681 hlist_add_head_rcu(&p->hlist,
682 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
688 mutex_unlock(&kprobe_mutex);
690 if (ret && probed_mod)
691 module_put(probed_mod);
696 * Unregister a kprobe without a scheduler synchronization.
698 static int __kprobes __unregister_kprobe_top(struct kprobe *p)
700 struct kprobe *old_p, *list_p;
702 old_p = get_kprobe(p->addr);
703 if (unlikely(!old_p))
707 list_for_each_entry_rcu(list_p, &old_p->list, list)
709 /* kprobe p is a valid probe */
715 (old_p->pre_handler == aggr_pre_handler &&
716 list_is_singular(&old_p->list))) {
718 * Only probe on the hash list. Disarm only if kprobes are
719 * enabled - otherwise, the breakpoint would already have
720 * been removed. We save on flushing icache.
723 arch_disarm_kprobe(p);
724 hlist_del_rcu(&old_p->hlist);
726 if (p->break_handler)
727 old_p->break_handler = NULL;
728 if (p->post_handler) {
729 list_for_each_entry_rcu(list_p, &old_p->list, list) {
730 if ((list_p != p) && (list_p->post_handler))
733 old_p->post_handler = NULL;
736 list_del_rcu(&p->list);
741 static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
744 struct kprobe *old_p;
746 if (p->mod_refcounted) {
748 * Since we've already incremented refcount,
749 * we don't need to disable preemption.
751 mod = module_text_address((unsigned long)p->addr);
756 if (list_empty(&p->list) || list_is_singular(&p->list)) {
757 if (!list_empty(&p->list)) {
758 /* "p" is the last child of an aggr_kprobe */
759 old_p = list_entry(p->list.next, struct kprobe, list);
763 arch_remove_kprobe(p);
767 static int __kprobes __register_kprobes(struct kprobe **kps, int num,
768 unsigned long called_from)
774 for (i = 0; i < num; i++) {
775 ret = __register_kprobe(kps[i], called_from);
778 unregister_kprobes(kps, i);
786 * Registration and unregistration functions for kprobe.
788 int __kprobes register_kprobe(struct kprobe *p)
790 return __register_kprobes(&p, 1,
791 (unsigned long)__builtin_return_address(0));
794 void __kprobes unregister_kprobe(struct kprobe *p)
796 unregister_kprobes(&p, 1);
799 int __kprobes register_kprobes(struct kprobe **kps, int num)
801 return __register_kprobes(kps, num,
802 (unsigned long)__builtin_return_address(0));
805 void __kprobes unregister_kprobes(struct kprobe **kps, int num)
811 mutex_lock(&kprobe_mutex);
812 for (i = 0; i < num; i++)
813 if (__unregister_kprobe_top(kps[i]) < 0)
815 mutex_unlock(&kprobe_mutex);
818 for (i = 0; i < num; i++)
820 __unregister_kprobe_bottom(kps[i]);
823 static struct notifier_block kprobe_exceptions_nb = {
824 .notifier_call = kprobe_exceptions_notify,
825 .priority = 0x7fffffff /* we need to be notified first */
828 unsigned long __weak arch_deref_entry_point(void *entry)
830 return (unsigned long)entry;
833 static int __kprobes __register_jprobes(struct jprobe **jps, int num,
834 unsigned long called_from)
841 for (i = 0; i < num; i++) {
844 addr = arch_deref_entry_point(jp->entry);
846 if (!kernel_text_address(addr))
849 /* Todo: Verify probepoint is a function entry point */
850 jp->kp.pre_handler = setjmp_pre_handler;
851 jp->kp.break_handler = longjmp_break_handler;
852 ret = __register_kprobe(&jp->kp, called_from);
856 unregister_jprobes(jps, i);
863 int __kprobes register_jprobe(struct jprobe *jp)
865 return __register_jprobes(&jp, 1,
866 (unsigned long)__builtin_return_address(0));
869 void __kprobes unregister_jprobe(struct jprobe *jp)
871 unregister_jprobes(&jp, 1);
874 int __kprobes register_jprobes(struct jprobe **jps, int num)
876 return __register_jprobes(jps, num,
877 (unsigned long)__builtin_return_address(0));
880 void __kprobes unregister_jprobes(struct jprobe **jps, int num)
886 mutex_lock(&kprobe_mutex);
887 for (i = 0; i < num; i++)
888 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
889 jps[i]->kp.addr = NULL;
890 mutex_unlock(&kprobe_mutex);
893 for (i = 0; i < num; i++) {
895 __unregister_kprobe_bottom(&jps[i]->kp);
899 #ifdef CONFIG_KRETPROBES
901 * This kprobe pre_handler is registered with every kretprobe. When probe
902 * hits it will set up the return probe.
904 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
905 struct pt_regs *regs)
907 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
908 unsigned long hash, flags = 0;
909 struct kretprobe_instance *ri;
911 /*TODO: consider to only swap the RA after the last pre_handler fired */
912 hash = hash_ptr(current, KPROBE_HASH_BITS);
913 spin_lock_irqsave(&rp->lock, flags);
914 if (!hlist_empty(&rp->free_instances)) {
915 ri = hlist_entry(rp->free_instances.first,
916 struct kretprobe_instance, hlist);
917 hlist_del(&ri->hlist);
918 spin_unlock_irqrestore(&rp->lock, flags);
923 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
924 spin_unlock_irqrestore(&rp->lock, flags);
928 arch_prepare_kretprobe(ri, regs);
930 /* XXX(hch): why is there no hlist_move_head? */
931 INIT_HLIST_NODE(&ri->hlist);
932 kretprobe_table_lock(hash, &flags);
933 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
934 kretprobe_table_unlock(hash, &flags);
937 spin_unlock_irqrestore(&rp->lock, flags);
942 static int __kprobes __register_kretprobe(struct kretprobe *rp,
943 unsigned long called_from)
946 struct kretprobe_instance *inst;
950 if (kretprobe_blacklist_size) {
951 addr = kprobe_addr(&rp->kp);
955 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
956 if (kretprobe_blacklist[i].addr == addr)
961 rp->kp.pre_handler = pre_handler_kretprobe;
962 rp->kp.post_handler = NULL;
963 rp->kp.fault_handler = NULL;
964 rp->kp.break_handler = NULL;
966 /* Pre-allocate memory for max kretprobe instances */
967 if (rp->maxactive <= 0) {
968 #ifdef CONFIG_PREEMPT
969 rp->maxactive = max(10, 2 * NR_CPUS);
971 rp->maxactive = NR_CPUS;
974 spin_lock_init(&rp->lock);
975 INIT_HLIST_HEAD(&rp->free_instances);
976 for (i = 0; i < rp->maxactive; i++) {
977 inst = kmalloc(sizeof(struct kretprobe_instance) +
978 rp->data_size, GFP_KERNEL);
983 INIT_HLIST_NODE(&inst->hlist);
984 hlist_add_head(&inst->hlist, &rp->free_instances);
988 /* Establish function entry probe point */
989 ret = __register_kprobe(&rp->kp, called_from);
995 static int __kprobes __register_kretprobes(struct kretprobe **rps, int num,
996 unsigned long called_from)
1002 for (i = 0; i < num; i++) {
1003 ret = __register_kretprobe(rps[i], called_from);
1006 unregister_kretprobes(rps, i);
1013 int __kprobes register_kretprobe(struct kretprobe *rp)
1015 return __register_kretprobes(&rp, 1,
1016 (unsigned long)__builtin_return_address(0));
1019 void __kprobes unregister_kretprobe(struct kretprobe *rp)
1021 unregister_kretprobes(&rp, 1);
1024 int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1026 return __register_kretprobes(rps, num,
1027 (unsigned long)__builtin_return_address(0));
1030 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1036 mutex_lock(&kprobe_mutex);
1037 for (i = 0; i < num; i++)
1038 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1039 rps[i]->kp.addr = NULL;
1040 mutex_unlock(&kprobe_mutex);
1042 synchronize_sched();
1043 for (i = 0; i < num; i++) {
1044 if (rps[i]->kp.addr) {
1045 __unregister_kprobe_bottom(&rps[i]->kp);
1046 cleanup_rp_inst(rps[i]);
1051 #else /* CONFIG_KRETPROBES */
1052 int __kprobes register_kretprobe(struct kretprobe *rp)
1057 int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1061 void __kprobes unregister_kretprobe(struct kretprobe *rp)
1065 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1069 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1070 struct pt_regs *regs)
1075 #endif /* CONFIG_KRETPROBES */
1077 static int __init init_kprobes(void)
1080 unsigned long offset = 0, size = 0;
1081 char *modname, namebuf[128];
1082 const char *symbol_name;
1084 struct kprobe_blackpoint *kb;
1086 /* FIXME allocate the probe table, currently defined statically */
1087 /* initialize all list heads */
1088 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1089 INIT_HLIST_HEAD(&kprobe_table[i]);
1090 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
1091 spin_lock_init(&(kretprobe_table_locks[i].lock));
1095 * Lookup and populate the kprobe_blacklist.
1097 * Unlike the kretprobe blacklist, we'll need to determine
1098 * the range of addresses that belong to the said functions,
1099 * since a kprobe need not necessarily be at the beginning
1102 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1103 kprobe_lookup_name(kb->name, addr);
1107 kb->start_addr = (unsigned long)addr;
1108 symbol_name = kallsyms_lookup(kb->start_addr,
1109 &size, &offset, &modname, namebuf);
1116 if (kretprobe_blacklist_size) {
1117 /* lookup the function address from its name */
1118 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1119 kprobe_lookup_name(kretprobe_blacklist[i].name,
1120 kretprobe_blacklist[i].addr);
1121 if (!kretprobe_blacklist[i].addr)
1122 printk("kretprobe: lookup failed: %s\n",
1123 kretprobe_blacklist[i].name);
1127 /* By default, kprobes are enabled */
1128 kprobe_enabled = true;
1130 err = arch_init_kprobes();
1132 err = register_die_notifier(&kprobe_exceptions_nb);
1133 kprobes_initialized = (err == 0);
1140 #ifdef CONFIG_DEBUG_FS
1141 static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1142 const char *sym, int offset,char *modname)
1146 if (p->pre_handler == pre_handler_kretprobe)
1148 else if (p->pre_handler == setjmp_pre_handler)
1153 seq_printf(pi, "%p %s %s+0x%x %s\n", p->addr, kprobe_type,
1154 sym, offset, (modname ? modname : " "));
1156 seq_printf(pi, "%p %s %p\n", p->addr, kprobe_type, p->addr);
1159 static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
1161 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1164 static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
1167 if (*pos >= KPROBE_TABLE_SIZE)
1172 static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
1177 static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
1179 struct hlist_head *head;
1180 struct hlist_node *node;
1181 struct kprobe *p, *kp;
1182 const char *sym = NULL;
1183 unsigned int i = *(loff_t *) v;
1184 unsigned long offset = 0;
1185 char *modname, namebuf[128];
1187 head = &kprobe_table[i];
1189 hlist_for_each_entry_rcu(p, node, head, hlist) {
1190 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
1191 &offset, &modname, namebuf);
1192 if (p->pre_handler == aggr_pre_handler) {
1193 list_for_each_entry_rcu(kp, &p->list, list)
1194 report_probe(pi, kp, sym, offset, modname);
1196 report_probe(pi, p, sym, offset, modname);
1202 static struct seq_operations kprobes_seq_ops = {
1203 .start = kprobe_seq_start,
1204 .next = kprobe_seq_next,
1205 .stop = kprobe_seq_stop,
1206 .show = show_kprobe_addr
1209 static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
1211 return seq_open(filp, &kprobes_seq_ops);
1214 static struct file_operations debugfs_kprobes_operations = {
1215 .open = kprobes_open,
1217 .llseek = seq_lseek,
1218 .release = seq_release,
1221 static void __kprobes enable_all_kprobes(void)
1223 struct hlist_head *head;
1224 struct hlist_node *node;
1228 mutex_lock(&kprobe_mutex);
1230 /* If kprobes are already enabled, just return */
1232 goto already_enabled;
1234 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1235 head = &kprobe_table[i];
1236 hlist_for_each_entry_rcu(p, node, head, hlist)
1240 kprobe_enabled = true;
1241 printk(KERN_INFO "Kprobes globally enabled\n");
1244 mutex_unlock(&kprobe_mutex);
1248 static void __kprobes disable_all_kprobes(void)
1250 struct hlist_head *head;
1251 struct hlist_node *node;
1255 mutex_lock(&kprobe_mutex);
1257 /* If kprobes are already disabled, just return */
1258 if (!kprobe_enabled)
1259 goto already_disabled;
1261 kprobe_enabled = false;
1262 printk(KERN_INFO "Kprobes globally disabled\n");
1263 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1264 head = &kprobe_table[i];
1265 hlist_for_each_entry_rcu(p, node, head, hlist) {
1266 if (!arch_trampoline_kprobe(p))
1267 arch_disarm_kprobe(p);
1271 mutex_unlock(&kprobe_mutex);
1272 /* Allow all currently running kprobes to complete */
1273 synchronize_sched();
1277 mutex_unlock(&kprobe_mutex);
1282 * XXX: The debugfs bool file interface doesn't allow for callbacks
1283 * when the bool state is switched. We can reuse that facility when
1286 static ssize_t read_enabled_file_bool(struct file *file,
1287 char __user *user_buf, size_t count, loff_t *ppos)
1297 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
1300 static ssize_t write_enabled_file_bool(struct file *file,
1301 const char __user *user_buf, size_t count, loff_t *ppos)
1306 buf_size = min(count, (sizeof(buf)-1));
1307 if (copy_from_user(buf, user_buf, buf_size))
1314 enable_all_kprobes();
1319 disable_all_kprobes();
1326 static struct file_operations fops_kp = {
1327 .read = read_enabled_file_bool,
1328 .write = write_enabled_file_bool,
1331 static int __kprobes debugfs_kprobe_init(void)
1333 struct dentry *dir, *file;
1334 unsigned int value = 1;
1336 dir = debugfs_create_dir("kprobes", NULL);
1340 file = debugfs_create_file("list", 0444, dir, NULL,
1341 &debugfs_kprobes_operations);
1343 debugfs_remove(dir);
1347 file = debugfs_create_file("enabled", 0600, dir,
1350 debugfs_remove(dir);
1357 late_initcall(debugfs_kprobe_init);
1358 #endif /* CONFIG_DEBUG_FS */
1360 module_init(init_kprobes);
1362 EXPORT_SYMBOL_GPL(register_kprobe);
1363 EXPORT_SYMBOL_GPL(unregister_kprobe);
1364 EXPORT_SYMBOL_GPL(register_kprobes);
1365 EXPORT_SYMBOL_GPL(unregister_kprobes);
1366 EXPORT_SYMBOL_GPL(register_jprobe);
1367 EXPORT_SYMBOL_GPL(unregister_jprobe);
1368 EXPORT_SYMBOL_GPL(register_jprobes);
1369 EXPORT_SYMBOL_GPL(unregister_jprobes);
1370 EXPORT_SYMBOL_GPL(jprobe_return);
1371 EXPORT_SYMBOL_GPL(register_kretprobe);
1372 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1373 EXPORT_SYMBOL_GPL(register_kretprobes);
1374 EXPORT_SYMBOL_GPL(unregister_kretprobes);