ftrace/alternatives: Introducing *_text_reserved functions
[linux-2.6-block.git] / kernel / kprobes.c
CommitLineData
1da177e4
LT
1/*
2 * Kernel Probes (KProbes)
3 * kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
23 * Rusty Russell).
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
b94cce92
HN
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
1da177e4
LT
33 */
34#include <linux/kprobes.h>
1da177e4
LT
35#include <linux/hash.h>
36#include <linux/init.h>
4e57b681 37#include <linux/slab.h>
e3869792 38#include <linux/stddef.h>
1da177e4 39#include <linux/module.h>
9ec4b1f3 40#include <linux/moduleloader.h>
3a872d89 41#include <linux/kallsyms.h>
b4c6c34a 42#include <linux/freezer.h>
346fd59b
SD
43#include <linux/seq_file.h>
44#include <linux/debugfs.h>
1eeb66a1 45#include <linux/kdebug.h>
4460fdad 46#include <linux/memory.h>
bf8f6e5b 47
d0aaff97 48#include <asm-generic/sections.h>
1da177e4
LT
49#include <asm/cacheflush.h>
50#include <asm/errno.h>
bf8f6e5b 51#include <asm/uaccess.h>
1da177e4
LT
52
53#define KPROBE_HASH_BITS 6
54#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
55
3a872d89
AM
56
57/*
58 * Some oddball architectures like 64bit powerpc have function descriptors
59 * so this must be overridable.
60 */
61#ifndef kprobe_lookup_name
62#define kprobe_lookup_name(name, addr) \
63 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
64#endif
65
ef53d9c5 66static int kprobes_initialized;
1da177e4 67static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
b94cce92 68static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
1da177e4 69
bf8f6e5b 70/* NOTE: change this value only with kprobe_mutex held */
e579abeb 71static bool kprobes_all_disarmed;
bf8f6e5b 72
12941560 73static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
e6584523 74static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
ef53d9c5 75static struct {
7e036d04 76 spinlock_t lock ____cacheline_aligned_in_smp;
ef53d9c5
S
77} kretprobe_table_locks[KPROBE_TABLE_SIZE];
78
79static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
80{
81 return &(kretprobe_table_locks[hash].lock);
82}
1da177e4 83
3d8d996e
SD
84/*
85 * Normally, functions that we'd want to prohibit kprobes in, are marked
86 * __kprobes. But, there are cases where such functions already belong to
87 * a different section (__sched for preempt_schedule)
88 *
89 * For such cases, we now have a blacklist
90 */
544304b2 91static struct kprobe_blackpoint kprobe_blacklist[] = {
3d8d996e 92 {"preempt_schedule",},
65e234ec 93 {"native_get_debugreg",},
a00e817f
MH
94 {"irq_entries_start",},
95 {"common_interrupt",},
3d8d996e
SD
96 {NULL} /* Terminator */
97};
98
2d14e39d 99#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
9ec4b1f3
AM
100/*
101 * kprobe->ainsn.insn points to the copy of the instruction to be
102 * single-stepped. x86_64, POWER4 and above have no-exec support and
103 * stepping on the instruction on a vmalloced/kmalloced/data page
104 * is a recipe for disaster
105 */
106#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
107
108struct kprobe_insn_page {
c5cb5a2d 109 struct list_head list;
9ec4b1f3
AM
110 kprobe_opcode_t *insns; /* Page of instruction slots */
111 char slot_used[INSNS_PER_PAGE];
112 int nused;
b4c6c34a 113 int ngarbage;
9ec4b1f3
AM
114};
115
ab40c5c6
MH
116enum kprobe_slot_state {
117 SLOT_CLEAN = 0,
118 SLOT_DIRTY = 1,
119 SLOT_USED = 2,
120};
121
12941560 122static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
c5cb5a2d 123static LIST_HEAD(kprobe_insn_pages);
b4c6c34a
MH
124static int kprobe_garbage_slots;
125static int collect_garbage_slots(void);
126
9ec4b1f3 127/**
12941560 128 * __get_insn_slot() - Find a slot on an executable page for an instruction.
9ec4b1f3
AM
129 * We allocate an executable page if there's no room on existing ones.
130 */
12941560 131static kprobe_opcode_t __kprobes *__get_insn_slot(void)
9ec4b1f3
AM
132{
133 struct kprobe_insn_page *kip;
9ec4b1f3 134
6f716acd 135 retry:
c5cb5a2d 136 list_for_each_entry(kip, &kprobe_insn_pages, list) {
9ec4b1f3
AM
137 if (kip->nused < INSNS_PER_PAGE) {
138 int i;
139 for (i = 0; i < INSNS_PER_PAGE; i++) {
ab40c5c6
MH
140 if (kip->slot_used[i] == SLOT_CLEAN) {
141 kip->slot_used[i] = SLOT_USED;
9ec4b1f3
AM
142 kip->nused++;
143 return kip->insns + (i * MAX_INSN_SIZE);
144 }
145 }
146 /* Surprise! No unused slots. Fix kip->nused. */
147 kip->nused = INSNS_PER_PAGE;
148 }
149 }
150
b4c6c34a
MH
151 /* If there are any garbage slots, collect it and try again. */
152 if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
153 goto retry;
154 }
155 /* All out of space. Need to allocate a new page. Use slot 0. */
9ec4b1f3 156 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
6f716acd 157 if (!kip)
9ec4b1f3 158 return NULL;
9ec4b1f3
AM
159
160 /*
161 * Use module_alloc so this page is within +/- 2GB of where the
162 * kernel image and loaded module images reside. This is required
163 * so x86_64 can correctly handle the %rip-relative fixups.
164 */
165 kip->insns = module_alloc(PAGE_SIZE);
166 if (!kip->insns) {
167 kfree(kip);
168 return NULL;
169 }
c5cb5a2d
MH
170 INIT_LIST_HEAD(&kip->list);
171 list_add(&kip->list, &kprobe_insn_pages);
ab40c5c6
MH
172 memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
173 kip->slot_used[0] = SLOT_USED;
9ec4b1f3 174 kip->nused = 1;
b4c6c34a 175 kip->ngarbage = 0;
9ec4b1f3
AM
176 return kip->insns;
177}
178
12941560
MH
179kprobe_opcode_t __kprobes *get_insn_slot(void)
180{
181 kprobe_opcode_t *ret;
182 mutex_lock(&kprobe_insn_mutex);
183 ret = __get_insn_slot();
184 mutex_unlock(&kprobe_insn_mutex);
185 return ret;
186}
187
b4c6c34a
MH
188/* Return 1 if all garbages are collected, otherwise 0. */
189static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
190{
ab40c5c6 191 kip->slot_used[idx] = SLOT_CLEAN;
b4c6c34a
MH
192 kip->nused--;
193 if (kip->nused == 0) {
194 /*
195 * Page is no longer in use. Free it unless
196 * it's the last one. We keep the last one
197 * so as not to have to set it up again the
198 * next time somebody inserts a probe.
199 */
c5cb5a2d
MH
200 if (!list_is_singular(&kprobe_insn_pages)) {
201 list_del(&kip->list);
b4c6c34a
MH
202 module_free(NULL, kip->insns);
203 kfree(kip);
204 }
205 return 1;
206 }
207 return 0;
208}
209
210static int __kprobes collect_garbage_slots(void)
211{
c5cb5a2d 212 struct kprobe_insn_page *kip, *next;
b4c6c34a 213
615d0ebb
MH
214 /* Ensure no-one is interrupted on the garbages */
215 synchronize_sched();
b4c6c34a 216
c5cb5a2d 217 list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
b4c6c34a 218 int i;
b4c6c34a
MH
219 if (kip->ngarbage == 0)
220 continue;
221 kip->ngarbage = 0; /* we will collect all garbages */
222 for (i = 0; i < INSNS_PER_PAGE; i++) {
ab40c5c6 223 if (kip->slot_used[i] == SLOT_DIRTY &&
b4c6c34a
MH
224 collect_one_slot(kip, i))
225 break;
226 }
227 }
228 kprobe_garbage_slots = 0;
229 return 0;
230}
231
232void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
9ec4b1f3
AM
233{
234 struct kprobe_insn_page *kip;
9ec4b1f3 235
12941560 236 mutex_lock(&kprobe_insn_mutex);
c5cb5a2d 237 list_for_each_entry(kip, &kprobe_insn_pages, list) {
9ec4b1f3
AM
238 if (kip->insns <= slot &&
239 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
240 int i = (slot - kip->insns) / MAX_INSN_SIZE;
b4c6c34a 241 if (dirty) {
ab40c5c6 242 kip->slot_used[i] = SLOT_DIRTY;
b4c6c34a 243 kip->ngarbage++;
c5cb5a2d 244 } else
b4c6c34a 245 collect_one_slot(kip, i);
b4c6c34a 246 break;
9ec4b1f3
AM
247 }
248 }
6f716acd
CH
249
250 if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
b4c6c34a 251 collect_garbage_slots();
12941560
MH
252
253 mutex_unlock(&kprobe_insn_mutex);
9ec4b1f3 254}
2d14e39d 255#endif
9ec4b1f3 256
e6584523
AM
257/* We have preemption disabled.. so it is safe to use __ versions */
258static inline void set_kprobe_instance(struct kprobe *kp)
259{
260 __get_cpu_var(kprobe_instance) = kp;
261}
262
263static inline void reset_kprobe_instance(void)
264{
265 __get_cpu_var(kprobe_instance) = NULL;
266}
267
3516a460
AM
268/*
269 * This routine is called either:
49a2a1b8 270 * - under the kprobe_mutex - during kprobe_[un]register()
3516a460 271 * OR
d217d545 272 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
3516a460 273 */
d0aaff97 274struct kprobe __kprobes *get_kprobe(void *addr)
1da177e4
LT
275{
276 struct hlist_head *head;
277 struct hlist_node *node;
3516a460 278 struct kprobe *p;
1da177e4
LT
279
280 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
3516a460 281 hlist_for_each_entry_rcu(p, node, head, hlist) {
1da177e4
LT
282 if (p->addr == addr)
283 return p;
284 }
285 return NULL;
286}
287
201517a7
MH
288/* Arm a kprobe with text_mutex */
289static void __kprobes arm_kprobe(struct kprobe *kp)
290{
291 mutex_lock(&text_mutex);
292 arch_arm_kprobe(kp);
293 mutex_unlock(&text_mutex);
294}
295
296/* Disarm a kprobe with text_mutex */
297static void __kprobes disarm_kprobe(struct kprobe *kp)
298{
299 mutex_lock(&text_mutex);
300 arch_disarm_kprobe(kp);
301 mutex_unlock(&text_mutex);
302}
303
64f562c6
AM
304/*
305 * Aggregate handlers for multiple kprobes support - these handlers
306 * take care of invoking the individual kprobe handlers on p->list
307 */
d0aaff97 308static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
64f562c6
AM
309{
310 struct kprobe *kp;
311
3516a460 312 list_for_each_entry_rcu(kp, &p->list, list) {
de5bd88d 313 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
e6584523 314 set_kprobe_instance(kp);
8b0914ea
PP
315 if (kp->pre_handler(kp, regs))
316 return 1;
64f562c6 317 }
e6584523 318 reset_kprobe_instance();
64f562c6
AM
319 }
320 return 0;
321}
322
d0aaff97
PP
323static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
324 unsigned long flags)
64f562c6
AM
325{
326 struct kprobe *kp;
327
3516a460 328 list_for_each_entry_rcu(kp, &p->list, list) {
de5bd88d 329 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
e6584523 330 set_kprobe_instance(kp);
64f562c6 331 kp->post_handler(kp, regs, flags);
e6584523 332 reset_kprobe_instance();
64f562c6
AM
333 }
334 }
64f562c6
AM
335}
336
d0aaff97
PP
337static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
338 int trapnr)
64f562c6 339{
e6584523
AM
340 struct kprobe *cur = __get_cpu_var(kprobe_instance);
341
64f562c6
AM
342 /*
343 * if we faulted "during" the execution of a user specified
344 * probe handler, invoke just that probe's fault handler
345 */
e6584523
AM
346 if (cur && cur->fault_handler) {
347 if (cur->fault_handler(cur, regs, trapnr))
64f562c6
AM
348 return 1;
349 }
350 return 0;
351}
352
d0aaff97 353static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
8b0914ea 354{
e6584523
AM
355 struct kprobe *cur = __get_cpu_var(kprobe_instance);
356 int ret = 0;
357
358 if (cur && cur->break_handler) {
359 if (cur->break_handler(cur, regs))
360 ret = 1;
8b0914ea 361 }
e6584523
AM
362 reset_kprobe_instance();
363 return ret;
8b0914ea
PP
364}
365
bf8d5c52
KA
366/* Walks the list and increments nmissed count for multiprobe case */
367void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
368{
369 struct kprobe *kp;
370 if (p->pre_handler != aggr_pre_handler) {
371 p->nmissed++;
372 } else {
373 list_for_each_entry_rcu(kp, &p->list, list)
374 kp->nmissed++;
375 }
376 return;
377}
378
99219a3f 379void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
380 struct hlist_head *head)
b94cce92 381{
ef53d9c5
S
382 struct kretprobe *rp = ri->rp;
383
b94cce92
HN
384 /* remove rp inst off the rprobe_inst_table */
385 hlist_del(&ri->hlist);
ef53d9c5
S
386 INIT_HLIST_NODE(&ri->hlist);
387 if (likely(rp)) {
388 spin_lock(&rp->lock);
389 hlist_add_head(&ri->hlist, &rp->free_instances);
390 spin_unlock(&rp->lock);
b94cce92
HN
391 } else
392 /* Unregistering */
99219a3f 393 hlist_add_head(&ri->hlist, head);
b94cce92
HN
394}
395
017c39bd 396void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
ef53d9c5
S
397 struct hlist_head **head, unsigned long *flags)
398{
399 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
400 spinlock_t *hlist_lock;
401
402 *head = &kretprobe_inst_table[hash];
403 hlist_lock = kretprobe_table_lock_ptr(hash);
404 spin_lock_irqsave(hlist_lock, *flags);
405}
406
017c39bd
MH
407static void __kprobes kretprobe_table_lock(unsigned long hash,
408 unsigned long *flags)
b94cce92 409{
ef53d9c5
S
410 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
411 spin_lock_irqsave(hlist_lock, *flags);
412}
413
017c39bd
MH
414void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
415 unsigned long *flags)
ef53d9c5
S
416{
417 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
418 spinlock_t *hlist_lock;
419
420 hlist_lock = kretprobe_table_lock_ptr(hash);
421 spin_unlock_irqrestore(hlist_lock, *flags);
422}
423
017c39bd 424void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
ef53d9c5
S
425{
426 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
427 spin_unlock_irqrestore(hlist_lock, *flags);
b94cce92
HN
428}
429
b94cce92 430/*
c6fd91f0 431 * This function is called from finish_task_switch when task tk becomes dead,
432 * so that we can recycle any function-return probe instances associated
433 * with this task. These left over instances represent probed functions
434 * that have been called but will never return.
b94cce92 435 */
d0aaff97 436void __kprobes kprobe_flush_task(struct task_struct *tk)
b94cce92 437{
62c27be0 438 struct kretprobe_instance *ri;
99219a3f 439 struct hlist_head *head, empty_rp;
802eae7c 440 struct hlist_node *node, *tmp;
ef53d9c5 441 unsigned long hash, flags = 0;
802eae7c 442
ef53d9c5
S
443 if (unlikely(!kprobes_initialized))
444 /* Early boot. kretprobe_table_locks not yet initialized. */
445 return;
446
447 hash = hash_ptr(tk, KPROBE_HASH_BITS);
448 head = &kretprobe_inst_table[hash];
449 kretprobe_table_lock(hash, &flags);
62c27be0 450 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
451 if (ri->task == tk)
99219a3f 452 recycle_rp_inst(ri, &empty_rp);
62c27be0 453 }
ef53d9c5
S
454 kretprobe_table_unlock(hash, &flags);
455 INIT_HLIST_HEAD(&empty_rp);
99219a3f 456 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
457 hlist_del(&ri->hlist);
458 kfree(ri);
459 }
b94cce92
HN
460}
461
b94cce92
HN
462static inline void free_rp_inst(struct kretprobe *rp)
463{
464 struct kretprobe_instance *ri;
4c4308cb
CH
465 struct hlist_node *pos, *next;
466
ef53d9c5
S
467 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
468 hlist_del(&ri->hlist);
b94cce92
HN
469 kfree(ri);
470 }
471}
472
4a296e07
MH
473static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
474{
ef53d9c5 475 unsigned long flags, hash;
4a296e07
MH
476 struct kretprobe_instance *ri;
477 struct hlist_node *pos, *next;
ef53d9c5
S
478 struct hlist_head *head;
479
4a296e07 480 /* No race here */
ef53d9c5
S
481 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
482 kretprobe_table_lock(hash, &flags);
483 head = &kretprobe_inst_table[hash];
484 hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
485 if (ri->rp == rp)
486 ri->rp = NULL;
487 }
488 kretprobe_table_unlock(hash, &flags);
4a296e07 489 }
4a296e07
MH
490 free_rp_inst(rp);
491}
492
8b0914ea
PP
493/*
494 * Keep all fields in the kprobe consistent
495 */
496static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
497{
498 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
499 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
500}
501
502/*
b918e5e6 503* Add the new probe to ap->list. Fail if this is the
8b0914ea
PP
504* second jprobe at the address - two jprobes can't coexist
505*/
b918e5e6 506static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
8b0914ea 507{
de5bd88d 508 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
8b0914ea 509 if (p->break_handler) {
b918e5e6 510 if (ap->break_handler)
36721656 511 return -EEXIST;
b918e5e6
MH
512 list_add_tail_rcu(&p->list, &ap->list);
513 ap->break_handler = aggr_break_handler;
8b0914ea 514 } else
b918e5e6
MH
515 list_add_rcu(&p->list, &ap->list);
516 if (p->post_handler && !ap->post_handler)
517 ap->post_handler = aggr_post_handler;
de5bd88d
MH
518
519 if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
520 ap->flags &= ~KPROBE_FLAG_DISABLED;
521 if (!kprobes_all_disarmed)
522 /* Arm the breakpoint again. */
201517a7 523 arm_kprobe(ap);
de5bd88d 524 }
8b0914ea
PP
525 return 0;
526}
527
64f562c6
AM
528/*
529 * Fill in the required fields of the "manager kprobe". Replace the
530 * earlier kprobe in the hlist with the manager kprobe
531 */
532static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
533{
8b0914ea 534 copy_kprobe(p, ap);
a9ad965e 535 flush_insn_slot(ap);
64f562c6 536 ap->addr = p->addr;
b918e5e6 537 ap->flags = p->flags;
64f562c6 538 ap->pre_handler = aggr_pre_handler;
64f562c6 539 ap->fault_handler = aggr_fault_handler;
e8386a0c
MH
540 /* We don't care the kprobe which has gone. */
541 if (p->post_handler && !kprobe_gone(p))
36721656 542 ap->post_handler = aggr_post_handler;
e8386a0c 543 if (p->break_handler && !kprobe_gone(p))
36721656 544 ap->break_handler = aggr_break_handler;
64f562c6
AM
545
546 INIT_LIST_HEAD(&ap->list);
3516a460 547 list_add_rcu(&p->list, &ap->list);
64f562c6 548
adad0f33 549 hlist_replace_rcu(&p->hlist, &ap->hlist);
64f562c6
AM
550}
551
552/*
553 * This is the second or subsequent kprobe at the address - handle
554 * the intricacies
64f562c6 555 */
d0aaff97
PP
556static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
557 struct kprobe *p)
64f562c6
AM
558{
559 int ret = 0;
b918e5e6 560 struct kprobe *ap = old_p;
64f562c6 561
b918e5e6
MH
562 if (old_p->pre_handler != aggr_pre_handler) {
563 /* If old_p is not an aggr_probe, create new aggr_kprobe. */
564 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
565 if (!ap)
566 return -ENOMEM;
567 add_aggr_kprobe(ap, old_p);
568 }
569
570 if (kprobe_gone(ap)) {
e8386a0c
MH
571 /*
572 * Attempting to insert new probe at the same location that
573 * had a probe in the module vaddr area which already
574 * freed. So, the instruction slot has already been
575 * released. We need a new slot for the new probe.
576 */
b918e5e6 577 ret = arch_prepare_kprobe(ap);
e8386a0c 578 if (ret)
b918e5e6
MH
579 /*
580 * Even if fail to allocate new slot, don't need to
581 * free aggr_probe. It will be used next time, or
582 * freed by unregister_kprobe.
583 */
e8386a0c 584 return ret;
de5bd88d 585
e8386a0c 586 /*
de5bd88d
MH
587 * Clear gone flag to prevent allocating new slot again, and
588 * set disabled flag because it is not armed yet.
e8386a0c 589 */
de5bd88d
MH
590 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
591 | KPROBE_FLAG_DISABLED;
e8386a0c 592 }
b918e5e6
MH
593
594 copy_kprobe(ap, p);
595 return add_new_kprobe(ap, p);
64f562c6
AM
596}
597
de5bd88d
MH
598/* Try to disable aggr_kprobe, and return 1 if succeeded.*/
599static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
600{
601 struct kprobe *kp;
602
603 list_for_each_entry_rcu(kp, &p->list, list) {
604 if (!kprobe_disabled(kp))
605 /*
606 * There is an active probe on the list.
607 * We can't disable aggr_kprobe.
608 */
609 return 0;
610 }
611 p->flags |= KPROBE_FLAG_DISABLED;
612 return 1;
613}
614
d0aaff97
PP
615static int __kprobes in_kprobes_functions(unsigned long addr)
616{
3d8d996e
SD
617 struct kprobe_blackpoint *kb;
618
6f716acd
CH
619 if (addr >= (unsigned long)__kprobes_text_start &&
620 addr < (unsigned long)__kprobes_text_end)
d0aaff97 621 return -EINVAL;
3d8d996e
SD
622 /*
623 * If there exists a kprobe_blacklist, verify and
624 * fail any probe registration in the prohibited area
625 */
626 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
627 if (kb->start_addr) {
628 if (addr >= kb->start_addr &&
629 addr < (kb->start_addr + kb->range))
630 return -EINVAL;
631 }
632 }
d0aaff97
PP
633 return 0;
634}
635
b2a5cd69
MH
636/*
637 * If we have a symbol_name argument, look it up and add the offset field
638 * to it. This way, we can specify a relative address to a symbol.
639 */
640static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
641{
642 kprobe_opcode_t *addr = p->addr;
643 if (p->symbol_name) {
644 if (addr)
645 return NULL;
646 kprobe_lookup_name(p->symbol_name, addr);
647 }
648
649 if (!addr)
650 return NULL;
651 return (kprobe_opcode_t *)(((char *)addr) + p->offset);
652}
653
1f0ab409
AM
654/* Check passed kprobe is valid and return kprobe in kprobe_table. */
655static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
656{
657 struct kprobe *old_p, *list_p;
658
659 old_p = get_kprobe(p->addr);
660 if (unlikely(!old_p))
661 return NULL;
662
663 if (p != old_p) {
664 list_for_each_entry_rcu(list_p, &old_p->list, list)
665 if (list_p == p)
666 /* kprobe p is a valid probe */
667 goto valid;
668 return NULL;
669 }
670valid:
671 return old_p;
672}
673
674/* Return error if the kprobe is being re-registered */
675static inline int check_kprobe_rereg(struct kprobe *p)
676{
677 int ret = 0;
678 struct kprobe *old_p;
679
680 mutex_lock(&kprobe_mutex);
681 old_p = __get_valid_kprobe(p);
682 if (old_p)
683 ret = -EINVAL;
684 mutex_unlock(&kprobe_mutex);
685 return ret;
686}
687
49ad2fd7 688int __kprobes register_kprobe(struct kprobe *p)
1da177e4
LT
689{
690 int ret = 0;
64f562c6 691 struct kprobe *old_p;
df019b1d 692 struct module *probed_mod;
b2a5cd69 693 kprobe_opcode_t *addr;
b3e55c72 694
b2a5cd69
MH
695 addr = kprobe_addr(p);
696 if (!addr)
3a872d89 697 return -EINVAL;
b2a5cd69 698 p->addr = addr;
3a872d89 699
1f0ab409
AM
700 ret = check_kprobe_rereg(p);
701 if (ret)
702 return ret;
703
a189d035 704 preempt_disable();
ec30c5f3 705 if (!kernel_text_address((unsigned long) p->addr) ||
a189d035
MH
706 in_kprobes_functions((unsigned long) p->addr)) {
707 preempt_enable();
b3e55c72 708 return -EINVAL;
a189d035 709 }
b3e55c72 710
de5bd88d
MH
711 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
712 p->flags &= KPROBE_FLAG_DISABLED;
713
6f716acd
CH
714 /*
715 * Check if are we probing a module.
716 */
a189d035 717 probed_mod = __module_text_address((unsigned long) p->addr);
6f716acd 718 if (probed_mod) {
6f716acd 719 /*
e8386a0c
MH
720 * We must hold a refcount of the probed module while updating
721 * its code to prohibit unexpected unloading.
df019b1d 722 */
49ad2fd7
MH
723 if (unlikely(!try_module_get(probed_mod))) {
724 preempt_enable();
725 return -EINVAL;
726 }
f24659d9
MH
727 /*
728 * If the module freed .init.text, we couldn't insert
729 * kprobes in there.
730 */
731 if (within_module_init((unsigned long)p->addr, probed_mod) &&
732 probed_mod->state != MODULE_STATE_COMING) {
733 module_put(probed_mod);
734 preempt_enable();
735 return -EINVAL;
736 }
df019b1d 737 }
a189d035 738 preempt_enable();
1da177e4 739
3516a460 740 p->nmissed = 0;
9861668f 741 INIT_LIST_HEAD(&p->list);
7a7d1cf9 742 mutex_lock(&kprobe_mutex);
64f562c6
AM
743 old_p = get_kprobe(p->addr);
744 if (old_p) {
745 ret = register_aggr_kprobe(old_p, p);
1da177e4
LT
746 goto out;
747 }
1da177e4 748
4460fdad 749 mutex_lock(&text_mutex);
6f716acd
CH
750 ret = arch_prepare_kprobe(p);
751 if (ret)
4460fdad 752 goto out_unlock_text;
49a2a1b8 753
64f562c6 754 INIT_HLIST_NODE(&p->hlist);
3516a460 755 hlist_add_head_rcu(&p->hlist,
1da177e4
LT
756 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
757
de5bd88d 758 if (!kprobes_all_disarmed && !kprobe_disabled(p))
bf8f6e5b 759 arch_arm_kprobe(p);
74a0b576 760
4460fdad
MD
761out_unlock_text:
762 mutex_unlock(&text_mutex);
1da177e4 763out:
7a7d1cf9 764 mutex_unlock(&kprobe_mutex);
49a2a1b8 765
e8386a0c 766 if (probed_mod)
df019b1d 767 module_put(probed_mod);
e8386a0c 768
1da177e4
LT
769 return ret;
770}
99081ab5 771EXPORT_SYMBOL_GPL(register_kprobe);
1da177e4 772
de5bd88d
MH
773/*
774 * Unregister a kprobe without a scheduler synchronization.
775 */
776static int __kprobes __unregister_kprobe_top(struct kprobe *p)
777{
778 struct kprobe *old_p, *list_p;
779
780 old_p = __get_valid_kprobe(p);
781 if (old_p == NULL)
782 return -EINVAL;
783
6f716acd
CH
784 if (old_p == p ||
785 (old_p->pre_handler == aggr_pre_handler &&
9861668f 786 list_is_singular(&old_p->list))) {
bf8f6e5b
AM
787 /*
788 * Only probe on the hash list. Disarm only if kprobes are
e8386a0c
MH
789 * enabled and not gone - otherwise, the breakpoint would
790 * already have been removed. We save on flushing icache.
bf8f6e5b 791 */
201517a7
MH
792 if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
793 disarm_kprobe(p);
49a2a1b8 794 hlist_del_rcu(&old_p->hlist);
49a2a1b8 795 } else {
e8386a0c 796 if (p->break_handler && !kprobe_gone(p))
9861668f 797 old_p->break_handler = NULL;
e8386a0c 798 if (p->post_handler && !kprobe_gone(p)) {
9861668f
MH
799 list_for_each_entry_rcu(list_p, &old_p->list, list) {
800 if ((list_p != p) && (list_p->post_handler))
801 goto noclean;
802 }
803 old_p->post_handler = NULL;
804 }
805noclean:
49a2a1b8 806 list_del_rcu(&p->list);
de5bd88d
MH
807 if (!kprobe_disabled(old_p)) {
808 try_to_disable_aggr_kprobe(old_p);
809 if (!kprobes_all_disarmed && kprobe_disabled(old_p))
201517a7 810 disarm_kprobe(old_p);
de5bd88d 811 }
49a2a1b8 812 }
9861668f
MH
813 return 0;
814}
3516a460 815
9861668f
MH
816static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
817{
9861668f 818 struct kprobe *old_p;
b3e55c72 819
e8386a0c 820 if (list_empty(&p->list))
0498b635 821 arch_remove_kprobe(p);
e8386a0c
MH
822 else if (list_is_singular(&p->list)) {
823 /* "p" is the last child of an aggr_kprobe */
824 old_p = list_entry(p->list.next, struct kprobe, list);
825 list_del(&p->list);
826 arch_remove_kprobe(old_p);
827 kfree(old_p);
9861668f
MH
828 }
829}
830
49ad2fd7 831int __kprobes register_kprobes(struct kprobe **kps, int num)
9861668f
MH
832{
833 int i, ret = 0;
834
835 if (num <= 0)
836 return -EINVAL;
837 for (i = 0; i < num; i++) {
49ad2fd7 838 ret = register_kprobe(kps[i]);
67dddaad
MH
839 if (ret < 0) {
840 if (i > 0)
841 unregister_kprobes(kps, i);
9861668f 842 break;
36721656 843 }
49a2a1b8 844 }
9861668f
MH
845 return ret;
846}
99081ab5 847EXPORT_SYMBOL_GPL(register_kprobes);
9861668f 848
9861668f
MH
849void __kprobes unregister_kprobe(struct kprobe *p)
850{
851 unregister_kprobes(&p, 1);
852}
99081ab5 853EXPORT_SYMBOL_GPL(unregister_kprobe);
9861668f 854
9861668f
MH
855void __kprobes unregister_kprobes(struct kprobe **kps, int num)
856{
857 int i;
858
859 if (num <= 0)
860 return;
861 mutex_lock(&kprobe_mutex);
862 for (i = 0; i < num; i++)
863 if (__unregister_kprobe_top(kps[i]) < 0)
864 kps[i]->addr = NULL;
865 mutex_unlock(&kprobe_mutex);
866
867 synchronize_sched();
868 for (i = 0; i < num; i++)
869 if (kps[i]->addr)
870 __unregister_kprobe_bottom(kps[i]);
1da177e4 871}
99081ab5 872EXPORT_SYMBOL_GPL(unregister_kprobes);
1da177e4
LT
873
874static struct notifier_block kprobe_exceptions_nb = {
3d5631e0
AK
875 .notifier_call = kprobe_exceptions_notify,
876 .priority = 0x7fffffff /* we need to be notified first */
877};
878
3d7e3382
ME
879unsigned long __weak arch_deref_entry_point(void *entry)
880{
881 return (unsigned long)entry;
882}
1da177e4 883
49ad2fd7 884int __kprobes register_jprobes(struct jprobe **jps, int num)
1da177e4 885{
26b31c19
MH
886 struct jprobe *jp;
887 int ret = 0, i;
3d7e3382 888
26b31c19 889 if (num <= 0)
3d7e3382 890 return -EINVAL;
26b31c19
MH
891 for (i = 0; i < num; i++) {
892 unsigned long addr;
893 jp = jps[i];
894 addr = arch_deref_entry_point(jp->entry);
895
896 if (!kernel_text_address(addr))
897 ret = -EINVAL;
898 else {
899 /* Todo: Verify probepoint is a function entry point */
900 jp->kp.pre_handler = setjmp_pre_handler;
901 jp->kp.break_handler = longjmp_break_handler;
49ad2fd7 902 ret = register_kprobe(&jp->kp);
26b31c19 903 }
67dddaad
MH
904 if (ret < 0) {
905 if (i > 0)
906 unregister_jprobes(jps, i);
26b31c19
MH
907 break;
908 }
909 }
910 return ret;
911}
99081ab5 912EXPORT_SYMBOL_GPL(register_jprobes);
3d7e3382 913
26b31c19
MH
914int __kprobes register_jprobe(struct jprobe *jp)
915{
49ad2fd7 916 return register_jprobes(&jp, 1);
1da177e4 917}
99081ab5 918EXPORT_SYMBOL_GPL(register_jprobe);
1da177e4 919
d0aaff97 920void __kprobes unregister_jprobe(struct jprobe *jp)
1da177e4 921{
26b31c19
MH
922 unregister_jprobes(&jp, 1);
923}
99081ab5 924EXPORT_SYMBOL_GPL(unregister_jprobe);
26b31c19 925
26b31c19
MH
926void __kprobes unregister_jprobes(struct jprobe **jps, int num)
927{
928 int i;
929
930 if (num <= 0)
931 return;
932 mutex_lock(&kprobe_mutex);
933 for (i = 0; i < num; i++)
934 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
935 jps[i]->kp.addr = NULL;
936 mutex_unlock(&kprobe_mutex);
937
938 synchronize_sched();
939 for (i = 0; i < num; i++) {
940 if (jps[i]->kp.addr)
941 __unregister_kprobe_bottom(&jps[i]->kp);
942 }
1da177e4 943}
99081ab5 944EXPORT_SYMBOL_GPL(unregister_jprobes);
1da177e4 945
9edddaa2 946#ifdef CONFIG_KRETPROBES
e65cefe8
AB
947/*
948 * This kprobe pre_handler is registered with every kretprobe. When probe
949 * hits it will set up the return probe.
950 */
951static int __kprobes pre_handler_kretprobe(struct kprobe *p,
952 struct pt_regs *regs)
953{
954 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
ef53d9c5
S
955 unsigned long hash, flags = 0;
956 struct kretprobe_instance *ri;
e65cefe8
AB
957
958 /*TODO: consider to only swap the RA after the last pre_handler fired */
ef53d9c5
S
959 hash = hash_ptr(current, KPROBE_HASH_BITS);
960 spin_lock_irqsave(&rp->lock, flags);
4c4308cb 961 if (!hlist_empty(&rp->free_instances)) {
4c4308cb 962 ri = hlist_entry(rp->free_instances.first,
ef53d9c5
S
963 struct kretprobe_instance, hlist);
964 hlist_del(&ri->hlist);
965 spin_unlock_irqrestore(&rp->lock, flags);
966
4c4308cb
CH
967 ri->rp = rp;
968 ri->task = current;
f47cd9b5 969
f02b8624 970 if (rp->entry_handler && rp->entry_handler(ri, regs))
f47cd9b5 971 return 0;
f47cd9b5 972
4c4308cb
CH
973 arch_prepare_kretprobe(ri, regs);
974
975 /* XXX(hch): why is there no hlist_move_head? */
ef53d9c5
S
976 INIT_HLIST_NODE(&ri->hlist);
977 kretprobe_table_lock(hash, &flags);
978 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
979 kretprobe_table_unlock(hash, &flags);
980 } else {
4c4308cb 981 rp->nmissed++;
ef53d9c5
S
982 spin_unlock_irqrestore(&rp->lock, flags);
983 }
e65cefe8
AB
984 return 0;
985}
986
49ad2fd7 987int __kprobes register_kretprobe(struct kretprobe *rp)
b94cce92
HN
988{
989 int ret = 0;
990 struct kretprobe_instance *inst;
991 int i;
b2a5cd69 992 void *addr;
f438d914
MH
993
994 if (kretprobe_blacklist_size) {
b2a5cd69
MH
995 addr = kprobe_addr(&rp->kp);
996 if (!addr)
997 return -EINVAL;
f438d914
MH
998
999 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1000 if (kretprobe_blacklist[i].addr == addr)
1001 return -EINVAL;
1002 }
1003 }
b94cce92
HN
1004
1005 rp->kp.pre_handler = pre_handler_kretprobe;
7522a842
AM
1006 rp->kp.post_handler = NULL;
1007 rp->kp.fault_handler = NULL;
1008 rp->kp.break_handler = NULL;
b94cce92
HN
1009
1010 /* Pre-allocate memory for max kretprobe instances */
1011 if (rp->maxactive <= 0) {
1012#ifdef CONFIG_PREEMPT
c2ef6661 1013 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
b94cce92 1014#else
4dae560f 1015 rp->maxactive = num_possible_cpus();
b94cce92
HN
1016#endif
1017 }
ef53d9c5 1018 spin_lock_init(&rp->lock);
b94cce92
HN
1019 INIT_HLIST_HEAD(&rp->free_instances);
1020 for (i = 0; i < rp->maxactive; i++) {
f47cd9b5
AS
1021 inst = kmalloc(sizeof(struct kretprobe_instance) +
1022 rp->data_size, GFP_KERNEL);
b94cce92
HN
1023 if (inst == NULL) {
1024 free_rp_inst(rp);
1025 return -ENOMEM;
1026 }
ef53d9c5
S
1027 INIT_HLIST_NODE(&inst->hlist);
1028 hlist_add_head(&inst->hlist, &rp->free_instances);
b94cce92
HN
1029 }
1030
1031 rp->nmissed = 0;
1032 /* Establish function entry probe point */
49ad2fd7 1033 ret = register_kprobe(&rp->kp);
4a296e07 1034 if (ret != 0)
b94cce92
HN
1035 free_rp_inst(rp);
1036 return ret;
1037}
99081ab5 1038EXPORT_SYMBOL_GPL(register_kretprobe);
b94cce92 1039
49ad2fd7 1040int __kprobes register_kretprobes(struct kretprobe **rps, int num)
4a296e07
MH
1041{
1042 int ret = 0, i;
1043
1044 if (num <= 0)
1045 return -EINVAL;
1046 for (i = 0; i < num; i++) {
49ad2fd7 1047 ret = register_kretprobe(rps[i]);
67dddaad
MH
1048 if (ret < 0) {
1049 if (i > 0)
1050 unregister_kretprobes(rps, i);
4a296e07
MH
1051 break;
1052 }
1053 }
1054 return ret;
1055}
99081ab5 1056EXPORT_SYMBOL_GPL(register_kretprobes);
4a296e07 1057
4a296e07
MH
1058void __kprobes unregister_kretprobe(struct kretprobe *rp)
1059{
1060 unregister_kretprobes(&rp, 1);
1061}
99081ab5 1062EXPORT_SYMBOL_GPL(unregister_kretprobe);
4a296e07 1063
4a296e07
MH
1064void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1065{
1066 int i;
1067
1068 if (num <= 0)
1069 return;
1070 mutex_lock(&kprobe_mutex);
1071 for (i = 0; i < num; i++)
1072 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1073 rps[i]->kp.addr = NULL;
1074 mutex_unlock(&kprobe_mutex);
1075
1076 synchronize_sched();
1077 for (i = 0; i < num; i++) {
1078 if (rps[i]->kp.addr) {
1079 __unregister_kprobe_bottom(&rps[i]->kp);
1080 cleanup_rp_inst(rps[i]);
1081 }
1082 }
1083}
99081ab5 1084EXPORT_SYMBOL_GPL(unregister_kretprobes);
4a296e07 1085
9edddaa2 1086#else /* CONFIG_KRETPROBES */
d0aaff97 1087int __kprobes register_kretprobe(struct kretprobe *rp)
b94cce92
HN
1088{
1089 return -ENOSYS;
1090}
99081ab5 1091EXPORT_SYMBOL_GPL(register_kretprobe);
b94cce92 1092
4a296e07 1093int __kprobes register_kretprobes(struct kretprobe **rps, int num)
346fd59b 1094{
4a296e07 1095 return -ENOSYS;
346fd59b 1096}
99081ab5
MH
1097EXPORT_SYMBOL_GPL(register_kretprobes);
1098
d0aaff97 1099void __kprobes unregister_kretprobe(struct kretprobe *rp)
b94cce92 1100{
4a296e07 1101}
99081ab5 1102EXPORT_SYMBOL_GPL(unregister_kretprobe);
b94cce92 1103
4a296e07
MH
1104void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1105{
1106}
99081ab5 1107EXPORT_SYMBOL_GPL(unregister_kretprobes);
4c4308cb 1108
4a296e07
MH
1109static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1110 struct pt_regs *regs)
1111{
1112 return 0;
b94cce92
HN
1113}
1114
4a296e07
MH
1115#endif /* CONFIG_KRETPROBES */
1116
e8386a0c
MH
1117/* Set the kprobe gone and remove its instruction buffer. */
1118static void __kprobes kill_kprobe(struct kprobe *p)
1119{
1120 struct kprobe *kp;
de5bd88d 1121
e8386a0c
MH
1122 p->flags |= KPROBE_FLAG_GONE;
1123 if (p->pre_handler == aggr_pre_handler) {
1124 /*
1125 * If this is an aggr_kprobe, we have to list all the
1126 * chained probes and mark them GONE.
1127 */
1128 list_for_each_entry_rcu(kp, &p->list, list)
1129 kp->flags |= KPROBE_FLAG_GONE;
1130 p->post_handler = NULL;
1131 p->break_handler = NULL;
1132 }
1133 /*
1134 * Here, we can remove insn_slot safely, because no thread calls
1135 * the original probed function (which will be freed soon) any more.
1136 */
1137 arch_remove_kprobe(p);
1138}
1139
24851d24
FW
1140void __kprobes dump_kprobe(struct kprobe *kp)
1141{
1142 printk(KERN_WARNING "Dumping kprobe:\n");
1143 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
1144 kp->symbol_name, kp->addr, kp->offset);
1145}
1146
e8386a0c
MH
1147/* Module notifier call back, checking kprobes on the module */
1148static int __kprobes kprobes_module_callback(struct notifier_block *nb,
1149 unsigned long val, void *data)
1150{
1151 struct module *mod = data;
1152 struct hlist_head *head;
1153 struct hlist_node *node;
1154 struct kprobe *p;
1155 unsigned int i;
f24659d9 1156 int checkcore = (val == MODULE_STATE_GOING);
e8386a0c 1157
f24659d9 1158 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
e8386a0c
MH
1159 return NOTIFY_DONE;
1160
1161 /*
f24659d9
MH
1162 * When MODULE_STATE_GOING was notified, both of module .text and
1163 * .init.text sections would be freed. When MODULE_STATE_LIVE was
1164 * notified, only .init.text section would be freed. We need to
1165 * disable kprobes which have been inserted in the sections.
e8386a0c
MH
1166 */
1167 mutex_lock(&kprobe_mutex);
1168 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1169 head = &kprobe_table[i];
1170 hlist_for_each_entry_rcu(p, node, head, hlist)
f24659d9
MH
1171 if (within_module_init((unsigned long)p->addr, mod) ||
1172 (checkcore &&
1173 within_module_core((unsigned long)p->addr, mod))) {
e8386a0c
MH
1174 /*
1175 * The vaddr this probe is installed will soon
1176 * be vfreed buy not synced to disk. Hence,
1177 * disarming the breakpoint isn't needed.
1178 */
1179 kill_kprobe(p);
1180 }
1181 }
1182 mutex_unlock(&kprobe_mutex);
1183 return NOTIFY_DONE;
1184}
1185
1186static struct notifier_block kprobe_module_nb = {
1187 .notifier_call = kprobes_module_callback,
1188 .priority = 0
1189};
1190
1da177e4
LT
1191static int __init init_kprobes(void)
1192{
1193 int i, err = 0;
3d8d996e
SD
1194 unsigned long offset = 0, size = 0;
1195 char *modname, namebuf[128];
1196 const char *symbol_name;
1197 void *addr;
1198 struct kprobe_blackpoint *kb;
1da177e4
LT
1199
1200 /* FIXME allocate the probe table, currently defined statically */
1201 /* initialize all list heads */
b94cce92 1202 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1da177e4 1203 INIT_HLIST_HEAD(&kprobe_table[i]);
b94cce92 1204 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
ef53d9c5 1205 spin_lock_init(&(kretprobe_table_locks[i].lock));
b94cce92 1206 }
1da177e4 1207
3d8d996e
SD
1208 /*
1209 * Lookup and populate the kprobe_blacklist.
1210 *
1211 * Unlike the kretprobe blacklist, we'll need to determine
1212 * the range of addresses that belong to the said functions,
1213 * since a kprobe need not necessarily be at the beginning
1214 * of a function.
1215 */
1216 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1217 kprobe_lookup_name(kb->name, addr);
1218 if (!addr)
1219 continue;
1220
1221 kb->start_addr = (unsigned long)addr;
1222 symbol_name = kallsyms_lookup(kb->start_addr,
1223 &size, &offset, &modname, namebuf);
1224 if (!symbol_name)
1225 kb->range = 0;
1226 else
1227 kb->range = size;
1228 }
1229
f438d914
MH
1230 if (kretprobe_blacklist_size) {
1231 /* lookup the function address from its name */
1232 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1233 kprobe_lookup_name(kretprobe_blacklist[i].name,
1234 kretprobe_blacklist[i].addr);
1235 if (!kretprobe_blacklist[i].addr)
1236 printk("kretprobe: lookup failed: %s\n",
1237 kretprobe_blacklist[i].name);
1238 }
1239 }
1240
e579abeb
MH
1241 /* By default, kprobes are armed */
1242 kprobes_all_disarmed = false;
bf8f6e5b 1243
6772926b 1244 err = arch_init_kprobes();
802eae7c
RL
1245 if (!err)
1246 err = register_die_notifier(&kprobe_exceptions_nb);
e8386a0c
MH
1247 if (!err)
1248 err = register_module_notifier(&kprobe_module_nb);
1249
ef53d9c5 1250 kprobes_initialized = (err == 0);
802eae7c 1251
8c1c9356
AM
1252 if (!err)
1253 init_test_probes();
1da177e4
LT
1254 return err;
1255}
1256
346fd59b
SD
1257#ifdef CONFIG_DEBUG_FS
1258static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
bf8f6e5b 1259 const char *sym, int offset,char *modname)
346fd59b
SD
1260{
1261 char *kprobe_type;
1262
1263 if (p->pre_handler == pre_handler_kretprobe)
1264 kprobe_type = "r";
1265 else if (p->pre_handler == setjmp_pre_handler)
1266 kprobe_type = "j";
1267 else
1268 kprobe_type = "k";
1269 if (sym)
de5bd88d
MH
1270 seq_printf(pi, "%p %s %s+0x%x %s %s%s\n",
1271 p->addr, kprobe_type, sym, offset,
1272 (modname ? modname : " "),
1273 (kprobe_gone(p) ? "[GONE]" : ""),
1274 ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1275 "[DISABLED]" : ""));
346fd59b 1276 else
de5bd88d
MH
1277 seq_printf(pi, "%p %s %p %s%s\n",
1278 p->addr, kprobe_type, p->addr,
1279 (kprobe_gone(p) ? "[GONE]" : ""),
1280 ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1281 "[DISABLED]" : ""));
346fd59b
SD
1282}
1283
1284static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
1285{
1286 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1287}
1288
1289static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
1290{
1291 (*pos)++;
1292 if (*pos >= KPROBE_TABLE_SIZE)
1293 return NULL;
1294 return pos;
1295}
1296
1297static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
1298{
1299 /* Nothing to do */
1300}
1301
1302static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
1303{
1304 struct hlist_head *head;
1305 struct hlist_node *node;
1306 struct kprobe *p, *kp;
1307 const char *sym = NULL;
1308 unsigned int i = *(loff_t *) v;
ffb45122 1309 unsigned long offset = 0;
346fd59b
SD
1310 char *modname, namebuf[128];
1311
1312 head = &kprobe_table[i];
1313 preempt_disable();
1314 hlist_for_each_entry_rcu(p, node, head, hlist) {
ffb45122 1315 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
346fd59b
SD
1316 &offset, &modname, namebuf);
1317 if (p->pre_handler == aggr_pre_handler) {
1318 list_for_each_entry_rcu(kp, &p->list, list)
1319 report_probe(pi, kp, sym, offset, modname);
1320 } else
1321 report_probe(pi, p, sym, offset, modname);
1322 }
1323 preempt_enable();
1324 return 0;
1325}
1326
88e9d34c 1327static const struct seq_operations kprobes_seq_ops = {
346fd59b
SD
1328 .start = kprobe_seq_start,
1329 .next = kprobe_seq_next,
1330 .stop = kprobe_seq_stop,
1331 .show = show_kprobe_addr
1332};
1333
1334static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
1335{
1336 return seq_open(filp, &kprobes_seq_ops);
1337}
1338
828c0950 1339static const struct file_operations debugfs_kprobes_operations = {
346fd59b
SD
1340 .open = kprobes_open,
1341 .read = seq_read,
1342 .llseek = seq_lseek,
1343 .release = seq_release,
1344};
1345
de5bd88d
MH
1346/* Disable one kprobe */
1347int __kprobes disable_kprobe(struct kprobe *kp)
1348{
1349 int ret = 0;
1350 struct kprobe *p;
1351
1352 mutex_lock(&kprobe_mutex);
1353
1354 /* Check whether specified probe is valid. */
1355 p = __get_valid_kprobe(kp);
1356 if (unlikely(p == NULL)) {
1357 ret = -EINVAL;
1358 goto out;
1359 }
1360
1361 /* If the probe is already disabled (or gone), just return */
1362 if (kprobe_disabled(kp))
1363 goto out;
1364
1365 kp->flags |= KPROBE_FLAG_DISABLED;
1366 if (p != kp)
1367 /* When kp != p, p is always enabled. */
1368 try_to_disable_aggr_kprobe(p);
1369
1370 if (!kprobes_all_disarmed && kprobe_disabled(p))
201517a7 1371 disarm_kprobe(p);
de5bd88d
MH
1372out:
1373 mutex_unlock(&kprobe_mutex);
1374 return ret;
1375}
1376EXPORT_SYMBOL_GPL(disable_kprobe);
1377
1378/* Enable one kprobe */
1379int __kprobes enable_kprobe(struct kprobe *kp)
1380{
1381 int ret = 0;
1382 struct kprobe *p;
1383
1384 mutex_lock(&kprobe_mutex);
1385
1386 /* Check whether specified probe is valid. */
1387 p = __get_valid_kprobe(kp);
1388 if (unlikely(p == NULL)) {
1389 ret = -EINVAL;
1390 goto out;
1391 }
1392
1393 if (kprobe_gone(kp)) {
1394 /* This kprobe has gone, we couldn't enable it. */
1395 ret = -EINVAL;
1396 goto out;
1397 }
1398
1399 if (!kprobes_all_disarmed && kprobe_disabled(p))
201517a7 1400 arm_kprobe(p);
de5bd88d
MH
1401
1402 p->flags &= ~KPROBE_FLAG_DISABLED;
1403 if (p != kp)
1404 kp->flags &= ~KPROBE_FLAG_DISABLED;
1405out:
1406 mutex_unlock(&kprobe_mutex);
1407 return ret;
1408}
1409EXPORT_SYMBOL_GPL(enable_kprobe);
1410
e579abeb 1411static void __kprobes arm_all_kprobes(void)
bf8f6e5b
AM
1412{
1413 struct hlist_head *head;
1414 struct hlist_node *node;
1415 struct kprobe *p;
1416 unsigned int i;
1417
1418 mutex_lock(&kprobe_mutex);
1419
e579abeb
MH
1420 /* If kprobes are armed, just return */
1421 if (!kprobes_all_disarmed)
bf8f6e5b
AM
1422 goto already_enabled;
1423
4460fdad 1424 mutex_lock(&text_mutex);
bf8f6e5b
AM
1425 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1426 head = &kprobe_table[i];
1427 hlist_for_each_entry_rcu(p, node, head, hlist)
de5bd88d 1428 if (!kprobe_disabled(p))
e8386a0c 1429 arch_arm_kprobe(p);
bf8f6e5b 1430 }
4460fdad 1431 mutex_unlock(&text_mutex);
bf8f6e5b 1432
e579abeb 1433 kprobes_all_disarmed = false;
bf8f6e5b
AM
1434 printk(KERN_INFO "Kprobes globally enabled\n");
1435
1436already_enabled:
1437 mutex_unlock(&kprobe_mutex);
1438 return;
1439}
1440
e579abeb 1441static void __kprobes disarm_all_kprobes(void)
bf8f6e5b
AM
1442{
1443 struct hlist_head *head;
1444 struct hlist_node *node;
1445 struct kprobe *p;
1446 unsigned int i;
1447
1448 mutex_lock(&kprobe_mutex);
1449
e579abeb
MH
1450 /* If kprobes are already disarmed, just return */
1451 if (kprobes_all_disarmed)
bf8f6e5b
AM
1452 goto already_disabled;
1453
e579abeb 1454 kprobes_all_disarmed = true;
bf8f6e5b 1455 printk(KERN_INFO "Kprobes globally disabled\n");
4460fdad 1456 mutex_lock(&text_mutex);
bf8f6e5b
AM
1457 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1458 head = &kprobe_table[i];
1459 hlist_for_each_entry_rcu(p, node, head, hlist) {
de5bd88d 1460 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
bf8f6e5b
AM
1461 arch_disarm_kprobe(p);
1462 }
1463 }
1464
4460fdad 1465 mutex_unlock(&text_mutex);
bf8f6e5b
AM
1466 mutex_unlock(&kprobe_mutex);
1467 /* Allow all currently running kprobes to complete */
1468 synchronize_sched();
74a0b576 1469 return;
bf8f6e5b
AM
1470
1471already_disabled:
1472 mutex_unlock(&kprobe_mutex);
1473 return;
1474}
1475
1476/*
1477 * XXX: The debugfs bool file interface doesn't allow for callbacks
1478 * when the bool state is switched. We can reuse that facility when
1479 * available
1480 */
1481static ssize_t read_enabled_file_bool(struct file *file,
1482 char __user *user_buf, size_t count, loff_t *ppos)
1483{
1484 char buf[3];
1485
e579abeb 1486 if (!kprobes_all_disarmed)
bf8f6e5b
AM
1487 buf[0] = '1';
1488 else
1489 buf[0] = '0';
1490 buf[1] = '\n';
1491 buf[2] = 0x00;
1492 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
1493}
1494
1495static ssize_t write_enabled_file_bool(struct file *file,
1496 const char __user *user_buf, size_t count, loff_t *ppos)
1497{
1498 char buf[32];
1499 int buf_size;
1500
1501 buf_size = min(count, (sizeof(buf)-1));
1502 if (copy_from_user(buf, user_buf, buf_size))
1503 return -EFAULT;
1504
1505 switch (buf[0]) {
1506 case 'y':
1507 case 'Y':
1508 case '1':
e579abeb 1509 arm_all_kprobes();
bf8f6e5b
AM
1510 break;
1511 case 'n':
1512 case 'N':
1513 case '0':
e579abeb 1514 disarm_all_kprobes();
bf8f6e5b
AM
1515 break;
1516 }
1517
1518 return count;
1519}
1520
828c0950 1521static const struct file_operations fops_kp = {
bf8f6e5b
AM
1522 .read = read_enabled_file_bool,
1523 .write = write_enabled_file_bool,
1524};
1525
346fd59b
SD
1526static int __kprobes debugfs_kprobe_init(void)
1527{
1528 struct dentry *dir, *file;
bf8f6e5b 1529 unsigned int value = 1;
346fd59b
SD
1530
1531 dir = debugfs_create_dir("kprobes", NULL);
1532 if (!dir)
1533 return -ENOMEM;
1534
e3869792 1535 file = debugfs_create_file("list", 0444, dir, NULL,
346fd59b
SD
1536 &debugfs_kprobes_operations);
1537 if (!file) {
1538 debugfs_remove(dir);
1539 return -ENOMEM;
1540 }
1541
bf8f6e5b
AM
1542 file = debugfs_create_file("enabled", 0600, dir,
1543 &value, &fops_kp);
1544 if (!file) {
1545 debugfs_remove(dir);
1546 return -ENOMEM;
1547 }
1548
346fd59b
SD
1549 return 0;
1550}
1551
1552late_initcall(debugfs_kprobe_init);
1553#endif /* CONFIG_DEBUG_FS */
1554
1555module_init(init_kprobes);
1da177e4 1556
99081ab5 1557/* defined in arch/.../kernel/kprobes.c */
1da177e4 1558EXPORT_SYMBOL_GPL(jprobe_return);