kprobes: remove dependency on CONFIG_MODULES
[linux-2.6-block.git] / kernel / kprobes.c
CommitLineData
1a59d1b8 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * Kernel Probes (KProbes)
1da177e4 4 *
1da177e4
LT
5 * Copyright (C) IBM Corporation, 2002, 2004
6 *
7 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
8 * Probes initial implementation (includes suggestions from
9 * Rusty Russell).
10 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
11 * hlists and exceptions notifier as suggested by Andi Kleen.
12 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
13 * interface to access function arguments.
14 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
15 * exceptions notifier to be first on the priority list.
b94cce92
HN
16 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
17 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
18 * <prasanna@in.ibm.com> added function-return probes.
1da177e4 19 */
9c89bb8e
MH
20
21#define pr_fmt(fmt) "kprobes: " fmt
22
1da177e4 23#include <linux/kprobes.h>
1da177e4
LT
24#include <linux/hash.h>
25#include <linux/init.h>
4e57b681 26#include <linux/slab.h>
e3869792 27#include <linux/stddef.h>
9984de1a 28#include <linux/export.h>
3a872d89 29#include <linux/kallsyms.h>
b4c6c34a 30#include <linux/freezer.h>
346fd59b
SD
31#include <linux/seq_file.h>
32#include <linux/debugfs.h>
b2be84df 33#include <linux/sysctl.h>
1eeb66a1 34#include <linux/kdebug.h>
4460fdad 35#include <linux/memory.h>
4554dbcb 36#include <linux/ftrace.h>
afd66255 37#include <linux/cpu.h>
bf5438fc 38#include <linux/jump_label.h>
fa68bd09 39#include <linux/static_call.h>
69e49088 40#include <linux/perf_event.h>
12af2b83 41#include <linux/execmem.h>
bf8f6e5b 42
bfd45be0 43#include <asm/sections.h>
1da177e4
LT
44#include <asm/cacheflush.h>
45#include <asm/errno.h>
7c0f6ba6 46#include <linux/uaccess.h>
1da177e4
LT
47
48#define KPROBE_HASH_BITS 6
49#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
50
a737a3c6
XN
51#if !defined(CONFIG_OPTPROBES) || !defined(CONFIG_SYSCTL)
52#define kprobe_sysctls_init() do { } while (0)
53#endif
3a872d89 54
ef53d9c5 55static int kprobes_initialized;
7e6a71d8 56/* kprobe_table can be accessed by
223a76b2 57 * - Normal hlist traversal and RCU add/del under 'kprobe_mutex' is held.
7e6a71d8
MH
58 * Or
59 * - RCU hlist traversal under disabling preempt (breakpoint handlers)
60 */
1da177e4
LT
61static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
62
223a76b2 63/* NOTE: change this value only with 'kprobe_mutex' held */
e579abeb 64static bool kprobes_all_disarmed;
bf8f6e5b 65
223a76b2 66/* This protects 'kprobe_table' and 'optimizing_list' */
43948f50 67static DEFINE_MUTEX(kprobe_mutex);
223a76b2 68static DEFINE_PER_CPU(struct kprobe *, kprobe_instance);
ef53d9c5 69
290e3070
NR
70kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
71 unsigned int __unused)
49e0b465
NR
72{
73 return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
74}
75
223a76b2
MH
76/*
77 * Blacklist -- list of 'struct kprobe_blacklist_entry' to store info where
78 * kprobes can not probe.
79 */
376e2424 80static LIST_HEAD(kprobe_blacklist);
3d8d996e 81
2d14e39d 82#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
9ec4b1f3 83/*
223a76b2 84 * 'kprobe::ainsn.insn' points to the copy of the instruction to be
9ec4b1f3
AM
85 * single-stepped. x86_64, POWER4 and above have no-exec support and
86 * stepping on the instruction on a vmalloced/kmalloced/data page
87 * is a recipe for disaster
88 */
9ec4b1f3 89struct kprobe_insn_page {
c5cb5a2d 90 struct list_head list;
9ec4b1f3 91 kprobe_opcode_t *insns; /* Page of instruction slots */
af96397d 92 struct kprobe_insn_cache *cache;
9ec4b1f3 93 int nused;
b4c6c34a 94 int ngarbage;
4610ee1d 95 char slot_used[];
9ec4b1f3
AM
96};
97
4610ee1d
MH
98#define KPROBE_INSN_PAGE_SIZE(slots) \
99 (offsetof(struct kprobe_insn_page, slot_used) + \
100 (sizeof(char) * (slots)))
101
4610ee1d
MH
102static int slots_per_page(struct kprobe_insn_cache *c)
103{
104 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
105}
106
ab40c5c6
MH
107enum kprobe_slot_state {
108 SLOT_CLEAN = 0,
109 SLOT_DIRTY = 1,
110 SLOT_USED = 2,
111};
112
63fef14f 113void __weak *alloc_insn_page(void)
af96397d 114{
223a76b2 115 /*
12af2b83 116 * Use execmem_alloc() so this page is within +/- 2GB of where the
223a76b2
MH
117 * kernel image and loaded module images reside. This is required
118 * for most of the architectures.
119 * (e.g. x86-64 needs this to handle the %rip-relative fixups.)
120 */
12af2b83 121 return execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE);
af96397d
HC
122}
123
66ce7514 124static void free_insn_page(void *page)
af96397d 125{
12af2b83 126 execmem_free(page);
af96397d
HC
127}
128
c802d64a
HC
129struct kprobe_insn_cache kprobe_insn_slots = {
130 .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
af96397d
HC
131 .alloc = alloc_insn_page,
132 .free = free_insn_page,
d002b8bc 133 .sym = KPROBE_INSN_PAGE_SYM,
4610ee1d
MH
134 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
135 .insn_size = MAX_INSN_SIZE,
136 .nr_garbage = 0,
137};
55479f64 138static int collect_garbage_slots(struct kprobe_insn_cache *c);
b4c6c34a 139
9ec4b1f3 140/**
12941560 141 * __get_insn_slot() - Find a slot on an executable page for an instruction.
9ec4b1f3
AM
142 * We allocate an executable page if there's no room on existing ones.
143 */
55479f64 144kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
9ec4b1f3
AM
145{
146 struct kprobe_insn_page *kip;
c802d64a 147 kprobe_opcode_t *slot = NULL;
9ec4b1f3 148
5b485629 149 /* Since the slot array is not protected by rcu, we need a mutex */
c802d64a 150 mutex_lock(&c->mutex);
6f716acd 151 retry:
5b485629
MH
152 rcu_read_lock();
153 list_for_each_entry_rcu(kip, &c->pages, list) {
4610ee1d 154 if (kip->nused < slots_per_page(c)) {
9ec4b1f3 155 int i;
223a76b2 156
4610ee1d 157 for (i = 0; i < slots_per_page(c); i++) {
ab40c5c6
MH
158 if (kip->slot_used[i] == SLOT_CLEAN) {
159 kip->slot_used[i] = SLOT_USED;
9ec4b1f3 160 kip->nused++;
c802d64a 161 slot = kip->insns + (i * c->insn_size);
5b485629 162 rcu_read_unlock();
c802d64a 163 goto out;
9ec4b1f3
AM
164 }
165 }
4610ee1d
MH
166 /* kip->nused is broken. Fix it. */
167 kip->nused = slots_per_page(c);
168 WARN_ON(1);
9ec4b1f3
AM
169 }
170 }
5b485629 171 rcu_read_unlock();
9ec4b1f3 172
b4c6c34a 173 /* If there are any garbage slots, collect it and try again. */
4610ee1d 174 if (c->nr_garbage && collect_garbage_slots(c) == 0)
b4c6c34a 175 goto retry;
4610ee1d
MH
176
177 /* All out of space. Need to allocate a new page. */
178 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
6f716acd 179 if (!kip)
c802d64a 180 goto out;
9ec4b1f3 181
af96397d 182 kip->insns = c->alloc();
9ec4b1f3
AM
183 if (!kip->insns) {
184 kfree(kip);
c802d64a 185 goto out;
9ec4b1f3 186 }
c5cb5a2d 187 INIT_LIST_HEAD(&kip->list);
4610ee1d 188 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
ab40c5c6 189 kip->slot_used[0] = SLOT_USED;
9ec4b1f3 190 kip->nused = 1;
b4c6c34a 191 kip->ngarbage = 0;
af96397d 192 kip->cache = c;
5b485629 193 list_add_rcu(&kip->list, &c->pages);
c802d64a 194 slot = kip->insns;
69e49088
AH
195
196 /* Record the perf ksymbol register event after adding the page */
197 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns,
198 PAGE_SIZE, false, c->sym);
c802d64a
HC
199out:
200 mutex_unlock(&c->mutex);
201 return slot;
12941560
MH
202}
203
29e8077a
MH
204/* Return true if all garbages are collected, otherwise false. */
205static bool collect_one_slot(struct kprobe_insn_page *kip, int idx)
b4c6c34a 206{
ab40c5c6 207 kip->slot_used[idx] = SLOT_CLEAN;
b4c6c34a
MH
208 kip->nused--;
209 if (kip->nused == 0) {
210 /*
211 * Page is no longer in use. Free it unless
212 * it's the last one. We keep the last one
213 * so as not to have to set it up again the
214 * next time somebody inserts a probe.
215 */
4610ee1d 216 if (!list_is_singular(&kip->list)) {
69e49088
AH
217 /*
218 * Record perf ksymbol unregister event before removing
219 * the page.
220 */
221 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
222 (unsigned long)kip->insns, PAGE_SIZE, true,
223 kip->cache->sym);
5b485629
MH
224 list_del_rcu(&kip->list);
225 synchronize_rcu();
af96397d 226 kip->cache->free(kip->insns);
b4c6c34a
MH
227 kfree(kip);
228 }
29e8077a 229 return true;
b4c6c34a 230 }
29e8077a 231 return false;
b4c6c34a
MH
232}
233
55479f64 234static int collect_garbage_slots(struct kprobe_insn_cache *c)
b4c6c34a 235{
c5cb5a2d 236 struct kprobe_insn_page *kip, *next;
b4c6c34a 237
615d0ebb 238 /* Ensure no-one is interrupted on the garbages */
ae8b7ce7 239 synchronize_rcu();
b4c6c34a 240
4610ee1d 241 list_for_each_entry_safe(kip, next, &c->pages, list) {
b4c6c34a 242 int i;
223a76b2 243
b4c6c34a
MH
244 if (kip->ngarbage == 0)
245 continue;
246 kip->ngarbage = 0; /* we will collect all garbages */
4610ee1d 247 for (i = 0; i < slots_per_page(c); i++) {
5b485629 248 if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
b4c6c34a
MH
249 break;
250 }
251 }
4610ee1d 252 c->nr_garbage = 0;
b4c6c34a
MH
253 return 0;
254}
255
55479f64
MH
256void __free_insn_slot(struct kprobe_insn_cache *c,
257 kprobe_opcode_t *slot, int dirty)
9ec4b1f3
AM
258{
259 struct kprobe_insn_page *kip;
5b485629 260 long idx;
9ec4b1f3 261
c802d64a 262 mutex_lock(&c->mutex);
5b485629
MH
263 rcu_read_lock();
264 list_for_each_entry_rcu(kip, &c->pages, list) {
265 idx = ((long)slot - (long)kip->insns) /
266 (c->insn_size * sizeof(kprobe_opcode_t));
267 if (idx >= 0 && idx < slots_per_page(c))
c802d64a 268 goto out;
9ec4b1f3 269 }
5b485629 270 /* Could not find this slot. */
4610ee1d 271 WARN_ON(1);
5b485629 272 kip = NULL;
c802d64a 273out:
5b485629
MH
274 rcu_read_unlock();
275 /* Mark and sweep: this may sleep */
276 if (kip) {
277 /* Check double free */
278 WARN_ON(kip->slot_used[idx] != SLOT_USED);
279 if (dirty) {
280 kip->slot_used[idx] = SLOT_DIRTY;
281 kip->ngarbage++;
282 if (++c->nr_garbage > slots_per_page(c))
283 collect_garbage_slots(c);
284 } else {
285 collect_one_slot(kip, idx);
286 }
287 }
c802d64a 288 mutex_unlock(&c->mutex);
4610ee1d 289}
6f716acd 290
5b485629
MH
291/*
292 * Check given address is on the page of kprobe instruction slots.
293 * This will be used for checking whether the address on a stack
294 * is on a text area or not.
295 */
296bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
297{
298 struct kprobe_insn_page *kip;
299 bool ret = false;
300
301 rcu_read_lock();
302 list_for_each_entry_rcu(kip, &c->pages, list) {
303 if (addr >= (unsigned long)kip->insns &&
304 addr < (unsigned long)kip->insns + PAGE_SIZE) {
305 ret = true;
306 break;
307 }
308 }
309 rcu_read_unlock();
310
311 return ret;
312}
313
d002b8bc
AH
314int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
315 unsigned long *value, char *type, char *sym)
316{
317 struct kprobe_insn_page *kip;
318 int ret = -ERANGE;
319
320 rcu_read_lock();
321 list_for_each_entry_rcu(kip, &c->pages, list) {
322 if ((*symnum)--)
323 continue;
223a76b2 324 strscpy(sym, c->sym, KSYM_NAME_LEN);
d002b8bc
AH
325 *type = 't';
326 *value = (unsigned long)kip->insns;
327 ret = 0;
328 break;
329 }
330 rcu_read_unlock();
331
332 return ret;
333}
334
afd66255 335#ifdef CONFIG_OPTPROBES
7ee3e97e
CL
336void __weak *alloc_optinsn_page(void)
337{
338 return alloc_insn_page();
339}
340
341void __weak free_optinsn_page(void *page)
342{
343 free_insn_page(page);
344}
345
afd66255 346/* For optimized_kprobe buffer */
c802d64a
HC
347struct kprobe_insn_cache kprobe_optinsn_slots = {
348 .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
7ee3e97e
CL
349 .alloc = alloc_optinsn_page,
350 .free = free_optinsn_page,
d002b8bc 351 .sym = KPROBE_OPTINSN_PAGE_SYM,
afd66255
MH
352 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
353 /* .insn_size is initialized later */
354 .nr_garbage = 0,
355};
afd66255 356#endif
2d14e39d 357#endif
9ec4b1f3 358
e6584523
AM
359/* We have preemption disabled.. so it is safe to use __ versions */
360static inline void set_kprobe_instance(struct kprobe *kp)
361{
b76834bc 362 __this_cpu_write(kprobe_instance, kp);
e6584523
AM
363}
364
365static inline void reset_kprobe_instance(void)
366{
b76834bc 367 __this_cpu_write(kprobe_instance, NULL);
e6584523
AM
368}
369
3516a460
AM
370/*
371 * This routine is called either:
223a76b2
MH
372 * - under the 'kprobe_mutex' - during kprobe_[un]register().
373 * OR
374 * - with preemption disabled - from architecture specific code.
3516a460 375 */
820aede0 376struct kprobe *get_kprobe(void *addr)
1da177e4
LT
377{
378 struct hlist_head *head;
3516a460 379 struct kprobe *p;
1da177e4
LT
380
381 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
6743ad43
MH
382 hlist_for_each_entry_rcu(p, head, hlist,
383 lockdep_is_held(&kprobe_mutex)) {
1da177e4
LT
384 if (p->addr == addr)
385 return p;
386 }
afd66255 387
1da177e4
LT
388 return NULL;
389}
820aede0 390NOKPROBE_SYMBOL(get_kprobe);
1da177e4 391
820aede0 392static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
afd66255 393
223a76b2 394/* Return true if 'p' is an aggregator */
29e8077a 395static inline bool kprobe_aggrprobe(struct kprobe *p)
afd66255
MH
396{
397 return p->pre_handler == aggr_pre_handler;
398}
399
223a76b2 400/* Return true if 'p' is unused */
29e8077a 401static inline bool kprobe_unused(struct kprobe *p)
6274de49
MH
402{
403 return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
404 list_empty(&p->list);
405}
406
223a76b2 407/* Keep all fields in the kprobe consistent. */
6d8e40a8 408static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
afd66255 409{
6d8e40a8
MH
410 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
411 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
afd66255
MH
412}
413
414#ifdef CONFIG_OPTPROBES
223a76b2 415/* NOTE: This is protected by 'kprobe_mutex'. */
b2be84df
MH
416static bool kprobes_allow_optimization;
417
afd66255 418/*
223a76b2 419 * Call all 'kprobe::pre_handler' on the list, but ignores its return value.
afd66255
MH
420 * This must be called from arch-dep optimized caller.
421 */
820aede0 422void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
afd66255
MH
423{
424 struct kprobe *kp;
425
426 list_for_each_entry_rcu(kp, &p->list, list) {
427 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
428 set_kprobe_instance(kp);
4f3a8714 429 kp->pre_handler(kp, regs);
afd66255
MH
430 }
431 reset_kprobe_instance();
432 }
433}
820aede0 434NOKPROBE_SYMBOL(opt_pre_handler);
afd66255 435
6274de49 436/* Free optimized instructions and optimized_kprobe */
55479f64 437static void free_aggr_kprobe(struct kprobe *p)
6274de49
MH
438{
439 struct optimized_kprobe *op;
440
441 op = container_of(p, struct optimized_kprobe, kp);
442 arch_remove_optimized_kprobe(op);
443 arch_remove_kprobe(p);
444 kfree(op);
445}
446
223a76b2 447/* Return true if the kprobe is ready for optimization. */
afd66255
MH
448static inline int kprobe_optready(struct kprobe *p)
449{
450 struct optimized_kprobe *op;
451
452 if (kprobe_aggrprobe(p)) {
453 op = container_of(p, struct optimized_kprobe, kp);
454 return arch_prepared_optinsn(&op->optinsn);
455 }
456
457 return 0;
458}
459
223a76b2 460/* Return true if the kprobe is disarmed. Note: p must be on hash list */
f1c97a1b 461bool kprobe_disarmed(struct kprobe *p)
6274de49
MH
462{
463 struct optimized_kprobe *op;
464
465 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
466 if (!kprobe_aggrprobe(p))
467 return kprobe_disabled(p);
468
469 op = container_of(p, struct optimized_kprobe, kp);
470
471 return kprobe_disabled(p) && list_empty(&op->list);
472}
473
223a76b2 474/* Return true if the probe is queued on (un)optimizing lists */
29e8077a 475static bool kprobe_queued(struct kprobe *p)
6274de49
MH
476{
477 struct optimized_kprobe *op;
478
479 if (kprobe_aggrprobe(p)) {
480 op = container_of(p, struct optimized_kprobe, kp);
481 if (!list_empty(&op->list))
29e8077a 482 return true;
6274de49 483 }
29e8077a 484 return false;
6274de49
MH
485}
486
afd66255
MH
487/*
488 * Return an optimized kprobe whose optimizing code replaces
223a76b2 489 * instructions including 'addr' (exclude breakpoint).
afd66255 490 */
c42421e2 491static struct kprobe *get_optimized_kprobe(kprobe_opcode_t *addr)
afd66255
MH
492{
493 int i;
494 struct kprobe *p = NULL;
495 struct optimized_kprobe *op;
496
497 /* Don't check i == 0, since that is a breakpoint case. */
c42421e2
MH
498 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH / sizeof(kprobe_opcode_t); i++)
499 p = get_kprobe(addr - i);
afd66255
MH
500
501 if (p && kprobe_optready(p)) {
502 op = container_of(p, struct optimized_kprobe, kp);
503 if (arch_within_optimized_kprobe(op, addr))
504 return p;
505 }
506
507 return NULL;
508}
509
223a76b2 510/* Optimization staging list, protected by 'kprobe_mutex' */
afd66255 511static LIST_HEAD(optimizing_list);
6274de49 512static LIST_HEAD(unoptimizing_list);
7b959fc5 513static LIST_HEAD(freeing_list);
afd66255
MH
514
515static void kprobe_optimizer(struct work_struct *work);
516static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
517#define OPTIMIZE_DELAY 5
518
61f4e13f
MH
519/*
520 * Optimize (replace a breakpoint with a jump) kprobes listed on
223a76b2 521 * 'optimizing_list'.
61f4e13f 522 */
55479f64 523static void do_optimize_kprobes(void)
afd66255 524{
f1c6ece2 525 lockdep_assert_held(&text_mutex);
afd66255 526 /*
223a76b2
MH
527 * The optimization/unoptimization refers 'online_cpus' via
528 * stop_machine() and cpu-hotplug modifies the 'online_cpus'.
529 * And same time, 'text_mutex' will be held in cpu-hotplug and here.
530 * This combination can cause a deadlock (cpu-hotplug tries to lock
531 * 'text_mutex' but stop_machine() can not be done because
532 * the 'online_cpus' has been changed)
533 * To avoid this deadlock, caller must have locked cpu-hotplug
534 * for preventing cpu-hotplug outside of 'text_mutex' locking.
afd66255 535 */
2d1e38f5
TG
536 lockdep_assert_cpus_held();
537
538 /* Optimization never be done when disarmed */
539 if (kprobes_all_disarmed || !kprobes_allow_optimization ||
540 list_empty(&optimizing_list))
541 return;
542
cd7ebe22 543 arch_optimize_kprobes(&optimizing_list);
61f4e13f
MH
544}
545
6274de49
MH
546/*
547 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
223a76b2 548 * if need) kprobes listed on 'unoptimizing_list'.
6274de49 549 */
55479f64 550static void do_unoptimize_kprobes(void)
6274de49
MH
551{
552 struct optimized_kprobe *op, *tmp;
553
f1c6ece2 554 lockdep_assert_held(&text_mutex);
2d1e38f5
TG
555 /* See comment in do_optimize_kprobes() */
556 lockdep_assert_cpus_held();
557
4fbd2f83
MHG
558 if (!list_empty(&unoptimizing_list))
559 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
6274de49 560
4fbd2f83 561 /* Loop on 'freeing_list' for disarming and removing from kprobe hash list */
7b959fc5 562 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
f66c0447
MH
563 /* Switching from detour code to origin */
564 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
4fbd2f83
MHG
565 /* Disarm probes if marked disabled and not gone */
566 if (kprobe_disabled(&op->kp) && !kprobe_gone(&op->kp))
6274de49
MH
567 arch_disarm_kprobe(&op->kp);
568 if (kprobe_unused(&op->kp)) {
569 /*
570 * Remove unused probes from hash list. After waiting
571 * for synchronization, these probes are reclaimed.
223a76b2 572 * (reclaiming is done by do_free_cleaned_kprobes().)
6274de49
MH
573 */
574 hlist_del_rcu(&op->kp.hlist);
6274de49
MH
575 } else
576 list_del_init(&op->list);
577 }
6274de49
MH
578}
579
223a76b2 580/* Reclaim all kprobes on the 'freeing_list' */
55479f64 581static void do_free_cleaned_kprobes(void)
6274de49
MH
582{
583 struct optimized_kprobe *op, *tmp;
584
7b959fc5 585 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
6274de49 586 list_del_init(&op->list);
cbdd96f5
MH
587 if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
588 /*
589 * This must not happen, but if there is a kprobe
590 * still in use, keep it on kprobes hash list.
591 */
592 continue;
593 }
6274de49
MH
594 free_aggr_kprobe(&op->kp);
595 }
596}
597
598/* Start optimizer after OPTIMIZE_DELAY passed */
55479f64 599static void kick_kprobe_optimizer(void)
6274de49 600{
ad72b3be 601 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
6274de49
MH
602}
603
61f4e13f 604/* Kprobe jump optimizer */
55479f64 605static void kprobe_optimizer(struct work_struct *work)
61f4e13f 606{
72ef3794 607 mutex_lock(&kprobe_mutex);
2d1e38f5 608 cpus_read_lock();
f1c6ece2 609 mutex_lock(&text_mutex);
61f4e13f
MH
610
611 /*
6274de49
MH
612 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
613 * kprobes before waiting for quiesence period.
614 */
7b959fc5 615 do_unoptimize_kprobes();
6274de49
MH
616
617 /*
a30b85df
MH
618 * Step 2: Wait for quiesence period to ensure all potentially
619 * preempted tasks to have normally scheduled. Because optprobe
620 * may modify multiple instructions, there is a chance that Nth
621 * instruction is preempted. In that case, such tasks can return
622 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
623 * Note that on non-preemptive kernel, this is transparently converted
624 * to synchronoze_sched() to wait for all interrupts to have completed.
61f4e13f 625 */
a30b85df 626 synchronize_rcu_tasks();
61f4e13f 627
6274de49 628 /* Step 3: Optimize kprobes after quiesence period */
61f4e13f 629 do_optimize_kprobes();
6274de49
MH
630
631 /* Step 4: Free cleaned kprobes after quiesence period */
7b959fc5 632 do_free_cleaned_kprobes();
6274de49 633
f1c6ece2 634 mutex_unlock(&text_mutex);
2d1e38f5 635 cpus_read_unlock();
6274de49 636
cd7ebe22 637 /* Step 5: Kick optimizer again if needed */
f984ba4e 638 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
cd7ebe22 639 kick_kprobe_optimizer();
1a0aa991
MH
640
641 mutex_unlock(&kprobe_mutex);
6274de49
MH
642}
643
644/* Wait for completing optimization and unoptimization */
30e7d894 645void wait_for_kprobe_optimizer(void)
6274de49 646{
ad72b3be
TH
647 mutex_lock(&kprobe_mutex);
648
649 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
650 mutex_unlock(&kprobe_mutex);
651
223a76b2 652 /* This will also make 'optimizing_work' execute immmediately */
ad72b3be 653 flush_delayed_work(&optimizing_work);
223a76b2 654 /* 'optimizing_work' might not have been queued yet, relax */
ad72b3be
TH
655 cpu_relax();
656
657 mutex_lock(&kprobe_mutex);
658 }
659
660 mutex_unlock(&kprobe_mutex);
afd66255
MH
661}
662
868a6fc0 663bool optprobe_queued_unopt(struct optimized_kprobe *op)
e4add247
MH
664{
665 struct optimized_kprobe *_op;
666
667 list_for_each_entry(_op, &unoptimizing_list, list) {
668 if (op == _op)
669 return true;
670 }
671
672 return false;
673}
674
afd66255 675/* Optimize kprobe if p is ready to be optimized */
55479f64 676static void optimize_kprobe(struct kprobe *p)
afd66255
MH
677{
678 struct optimized_kprobe *op;
679
680 /* Check if the kprobe is disabled or not ready for optimization. */
b2be84df 681 if (!kprobe_optready(p) || !kprobes_allow_optimization ||
afd66255
MH
682 (kprobe_disabled(p) || kprobes_all_disarmed))
683 return;
684
223a76b2 685 /* kprobes with 'post_handler' can not be optimized */
059053a2 686 if (p->post_handler)
afd66255
MH
687 return;
688
689 op = container_of(p, struct optimized_kprobe, kp);
690
691 /* Check there is no other kprobes at the optimized instructions */
692 if (arch_check_optimized_kprobe(op) < 0)
693 return;
694
695 /* Check if it is already optimized. */
e4add247
MH
696 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
697 if (optprobe_queued_unopt(op)) {
698 /* This is under unoptimizing. Just dequeue the probe */
699 list_del_init(&op->list);
700 }
afd66255 701 return;
e4add247 702 }
afd66255 703 op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
6274de49 704
223a76b2
MH
705 /*
706 * On the 'unoptimizing_list' and 'optimizing_list',
707 * 'op' must have OPTIMIZED flag
708 */
e4add247
MH
709 if (WARN_ON_ONCE(!list_empty(&op->list)))
710 return;
711
712 list_add(&op->list, &optimizing_list);
713 kick_kprobe_optimizer();
6274de49
MH
714}
715
716/* Short cut to direct unoptimizing */
55479f64 717static void force_unoptimize_kprobe(struct optimized_kprobe *op)
6274de49 718{
2d1e38f5 719 lockdep_assert_cpus_held();
6274de49 720 arch_unoptimize_kprobe(op);
f66c0447 721 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
afd66255
MH
722}
723
724/* Unoptimize a kprobe if p is optimized */
55479f64 725static void unoptimize_kprobe(struct kprobe *p, bool force)
afd66255
MH
726{
727 struct optimized_kprobe *op;
728
6274de49
MH
729 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
730 return; /* This is not an optprobe nor optimized */
731
732 op = container_of(p, struct optimized_kprobe, kp);
e4add247 733 if (!kprobe_optimized(p))
6274de49 734 return;
6274de49 735
6274de49 736 if (!list_empty(&op->list)) {
e4add247
MH
737 if (optprobe_queued_unopt(op)) {
738 /* Queued in unoptimizing queue */
739 if (force) {
740 /*
741 * Forcibly unoptimize the kprobe here, and queue it
742 * in the freeing list for release afterwards.
743 */
744 force_unoptimize_kprobe(op);
745 list_move(&op->list, &freeing_list);
746 }
747 } else {
748 /* Dequeue from the optimizing queue */
749 list_del_init(&op->list);
750 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
751 }
6274de49
MH
752 return;
753 }
e4add247 754
6274de49 755 /* Optimized kprobe case */
e4add247 756 if (force) {
6274de49
MH
757 /* Forcibly update the code: this is a special case */
758 force_unoptimize_kprobe(op);
e4add247 759 } else {
6274de49
MH
760 list_add(&op->list, &unoptimizing_list);
761 kick_kprobe_optimizer();
afd66255
MH
762 }
763}
764
0490cd1f 765/* Cancel unoptimizing for reusing */
819319fc 766static int reuse_unused_kprobe(struct kprobe *ap)
0490cd1f
MH
767{
768 struct optimized_kprobe *op;
769
0490cd1f
MH
770 /*
771 * Unused kprobe MUST be on the way of delayed unoptimizing (means
772 * there is still a relative jump) and disabled.
773 */
774 op = container_of(ap, struct optimized_kprobe, kp);
4458515b 775 WARN_ON_ONCE(list_empty(&op->list));
0490cd1f
MH
776 /* Enable the probe again */
777 ap->flags &= ~KPROBE_FLAG_DISABLED;
223a76b2 778 /* Optimize it again. (remove from 'op->list') */
5f843ed4
MH
779 if (!kprobe_optready(ap))
780 return -EINVAL;
819319fc 781
0490cd1f 782 optimize_kprobe(ap);
819319fc 783 return 0;
0490cd1f
MH
784}
785
afd66255 786/* Remove optimized instructions */
55479f64 787static void kill_optimized_kprobe(struct kprobe *p)
afd66255
MH
788{
789 struct optimized_kprobe *op;
790
791 op = container_of(p, struct optimized_kprobe, kp);
6274de49
MH
792 if (!list_empty(&op->list))
793 /* Dequeue from the (un)optimization queue */
afd66255 794 list_del_init(&op->list);
6274de49 795 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
7b959fc5
MH
796
797 if (kprobe_unused(p)) {
7b959fc5 798 /*
4fbd2f83
MHG
799 * Unused kprobe is on unoptimizing or freeing list. We move it
800 * to freeing_list and let the kprobe_optimizer() remove it from
801 * the kprobe hash list and free it.
7b959fc5 802 */
4fbd2f83
MHG
803 if (optprobe_queued_unopt(op))
804 list_move(&op->list, &freeing_list);
7b959fc5
MH
805 }
806
6274de49 807 /* Don't touch the code, because it is already freed. */
afd66255
MH
808 arch_remove_optimized_kprobe(op);
809}
810
a460246c
MH
811static inline
812void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
813{
814 if (!kprobe_ftrace(p))
815 arch_prepare_optimized_kprobe(op, p);
816}
817
afd66255 818/* Try to prepare optimized instructions */
55479f64 819static void prepare_optimized_kprobe(struct kprobe *p)
afd66255
MH
820{
821 struct optimized_kprobe *op;
822
823 op = container_of(p, struct optimized_kprobe, kp);
a460246c 824 __prepare_optimized_kprobe(op, p);
afd66255
MH
825}
826
223a76b2 827/* Allocate new optimized_kprobe and try to prepare optimized instructions. */
55479f64 828static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
afd66255
MH
829{
830 struct optimized_kprobe *op;
831
832 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
833 if (!op)
834 return NULL;
835
836 INIT_LIST_HEAD(&op->list);
837 op->kp.addr = p->addr;
a460246c 838 __prepare_optimized_kprobe(op, p);
afd66255
MH
839
840 return &op->kp;
841}
842
55479f64 843static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
afd66255
MH
844
845/*
223a76b2
MH
846 * Prepare an optimized_kprobe and optimize it.
847 * NOTE: 'p' must be a normal registered kprobe.
afd66255 848 */
55479f64 849static void try_to_optimize_kprobe(struct kprobe *p)
afd66255
MH
850{
851 struct kprobe *ap;
852 struct optimized_kprobe *op;
853
223a76b2 854 /* Impossible to optimize ftrace-based kprobe. */
ae6aa16f
MH
855 if (kprobe_ftrace(p))
856 return;
857
223a76b2 858 /* For preparing optimization, jump_label_text_reserved() is called. */
2d1e38f5 859 cpus_read_lock();
25764288
MH
860 jump_label_lock();
861 mutex_lock(&text_mutex);
862
afd66255
MH
863 ap = alloc_aggr_kprobe(p);
864 if (!ap)
25764288 865 goto out;
afd66255
MH
866
867 op = container_of(ap, struct optimized_kprobe, kp);
868 if (!arch_prepared_optinsn(&op->optinsn)) {
223a76b2 869 /* If failed to setup optimizing, fallback to kprobe. */
6274de49
MH
870 arch_remove_optimized_kprobe(op);
871 kfree(op);
25764288 872 goto out;
afd66255
MH
873 }
874
875 init_aggr_kprobe(ap, p);
223a76b2 876 optimize_kprobe(ap); /* This just kicks optimizer thread. */
25764288
MH
877
878out:
879 mutex_unlock(&text_mutex);
880 jump_label_unlock();
2d1e38f5 881 cpus_read_unlock();
afd66255
MH
882}
883
55479f64 884static void optimize_all_kprobes(void)
b2be84df
MH
885{
886 struct hlist_head *head;
b2be84df
MH
887 struct kprobe *p;
888 unsigned int i;
889
5c51543b 890 mutex_lock(&kprobe_mutex);
223a76b2 891 /* If optimization is already allowed, just return. */
b2be84df 892 if (kprobes_allow_optimization)
5c51543b 893 goto out;
b2be84df 894
2d1e38f5 895 cpus_read_lock();
b2be84df 896 kprobes_allow_optimization = true;
b2be84df
MH
897 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
898 head = &kprobe_table[i];
7e6a71d8 899 hlist_for_each_entry(p, head, hlist)
b2be84df
MH
900 if (!kprobe_disabled(p))
901 optimize_kprobe(p);
902 }
2d1e38f5 903 cpus_read_unlock();
9c89bb8e 904 pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n");
5c51543b
MH
905out:
906 mutex_unlock(&kprobe_mutex);
b2be84df
MH
907}
908
c85c9a2c 909#ifdef CONFIG_SYSCTL
55479f64 910static void unoptimize_all_kprobes(void)
b2be84df
MH
911{
912 struct hlist_head *head;
b2be84df
MH
913 struct kprobe *p;
914 unsigned int i;
915
5c51543b 916 mutex_lock(&kprobe_mutex);
223a76b2 917 /* If optimization is already prohibited, just return. */
5c51543b
MH
918 if (!kprobes_allow_optimization) {
919 mutex_unlock(&kprobe_mutex);
b2be84df 920 return;
5c51543b 921 }
b2be84df 922
2d1e38f5 923 cpus_read_lock();
b2be84df 924 kprobes_allow_optimization = false;
b2be84df
MH
925 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
926 head = &kprobe_table[i];
7e6a71d8 927 hlist_for_each_entry(p, head, hlist) {
b2be84df 928 if (!kprobe_disabled(p))
6274de49 929 unoptimize_kprobe(p, false);
b2be84df
MH
930 }
931 }
2d1e38f5 932 cpus_read_unlock();
5c51543b
MH
933 mutex_unlock(&kprobe_mutex);
934
223a76b2 935 /* Wait for unoptimizing completion. */
6274de49 936 wait_for_kprobe_optimizer();
9c89bb8e 937 pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n");
b2be84df
MH
938}
939
5c51543b 940static DEFINE_MUTEX(kprobe_sysctl_mutex);
a737a3c6
XN
941static int sysctl_kprobes_optimization;
942static int proc_kprobes_optimization_handler(struct ctl_table *table,
943 int write, void *buffer,
944 size_t *length, loff_t *ppos)
b2be84df
MH
945{
946 int ret;
947
5c51543b 948 mutex_lock(&kprobe_sysctl_mutex);
b2be84df
MH
949 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
950 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
951
952 if (sysctl_kprobes_optimization)
953 optimize_all_kprobes();
954 else
955 unoptimize_all_kprobes();
5c51543b 956 mutex_unlock(&kprobe_sysctl_mutex);
b2be84df
MH
957
958 return ret;
959}
a737a3c6
XN
960
961static struct ctl_table kprobe_sysctls[] = {
962 {
963 .procname = "kprobes-optimization",
964 .data = &sysctl_kprobes_optimization,
965 .maxlen = sizeof(int),
966 .mode = 0644,
967 .proc_handler = proc_kprobes_optimization_handler,
968 .extra1 = SYSCTL_ZERO,
969 .extra2 = SYSCTL_ONE,
970 },
971 {}
972};
973
974static void __init kprobe_sysctls_init(void)
975{
976 register_sysctl_init("debug", kprobe_sysctls);
977}
b2be84df
MH
978#endif /* CONFIG_SYSCTL */
979
57d4e317 980/* Put a breakpoint for a probe. */
55479f64 981static void __arm_kprobe(struct kprobe *p)
afd66255 982{
6d8e40a8 983 struct kprobe *_p;
afd66255 984
57d4e317
MH
985 lockdep_assert_held(&text_mutex);
986
223a76b2 987 /* Find the overlapping optimized kprobes. */
c42421e2 988 _p = get_optimized_kprobe(p->addr);
6d8e40a8 989 if (unlikely(_p))
6274de49
MH
990 /* Fallback to unoptimized kprobe */
991 unoptimize_kprobe(_p, true);
afd66255
MH
992
993 arch_arm_kprobe(p);
994 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
995}
996
57d4e317 997/* Remove the breakpoint of a probe. */
55479f64 998static void __disarm_kprobe(struct kprobe *p, bool reopt)
afd66255 999{
6d8e40a8 1000 struct kprobe *_p;
afd66255 1001
57d4e317
MH
1002 lockdep_assert_held(&text_mutex);
1003
69d54b91
WN
1004 /* Try to unoptimize */
1005 unoptimize_kprobe(p, kprobes_all_disarmed);
afd66255 1006
6274de49
MH
1007 if (!kprobe_queued(p)) {
1008 arch_disarm_kprobe(p);
223a76b2 1009 /* If another kprobe was blocked, re-optimize it. */
c42421e2 1010 _p = get_optimized_kprobe(p->addr);
6274de49
MH
1011 if (unlikely(_p) && reopt)
1012 optimize_kprobe(_p);
1013 }
223a76b2
MH
1014 /*
1015 * TODO: Since unoptimization and real disarming will be done by
1016 * the worker thread, we can not check whether another probe are
1017 * unoptimized because of this probe here. It should be re-optimized
1018 * by the worker thread.
1019 */
afd66255
MH
1020}
1021
1022#else /* !CONFIG_OPTPROBES */
1023
1024#define optimize_kprobe(p) do {} while (0)
6274de49 1025#define unoptimize_kprobe(p, f) do {} while (0)
afd66255
MH
1026#define kill_optimized_kprobe(p) do {} while (0)
1027#define prepare_optimized_kprobe(p) do {} while (0)
1028#define try_to_optimize_kprobe(p) do {} while (0)
1029#define __arm_kprobe(p) arch_arm_kprobe(p)
6274de49
MH
1030#define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
1031#define kprobe_disarmed(p) kprobe_disabled(p)
1032#define wait_for_kprobe_optimizer() do {} while (0)
afd66255 1033
819319fc 1034static int reuse_unused_kprobe(struct kprobe *ap)
0490cd1f 1035{
819319fc
MH
1036 /*
1037 * If the optimized kprobe is NOT supported, the aggr kprobe is
1038 * released at the same time that the last aggregated kprobe is
1039 * unregistered.
1040 * Thus there should be no chance to reuse unused kprobe.
1041 */
9c89bb8e 1042 WARN_ON_ONCE(1);
819319fc 1043 return -EINVAL;
0490cd1f
MH
1044}
1045
55479f64 1046static void free_aggr_kprobe(struct kprobe *p)
afd66255 1047{
6274de49 1048 arch_remove_kprobe(p);
afd66255
MH
1049 kfree(p);
1050}
1051
55479f64 1052static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
afd66255
MH
1053{
1054 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
1055}
1056#endif /* CONFIG_OPTPROBES */
1057
e7dbfe34 1058#ifdef CONFIG_KPROBES_ON_FTRACE
ae6aa16f 1059static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
0bc11ed5
MH
1060 .func = kprobe_ftrace_handler,
1061 .flags = FTRACE_OPS_FL_SAVE_REGS,
1062};
1063
1064static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = {
e5253896 1065 .func = kprobe_ftrace_handler,
1d70be34 1066 .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
ae6aa16f 1067};
0bc11ed5
MH
1068
1069static int kprobe_ipmodify_enabled;
ae6aa16f
MH
1070static int kprobe_ftrace_enabled;
1071
0bc11ed5
MH
1072static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1073 int *cnt)
ae6aa16f 1074{
ed9492df 1075 int ret;
ae6aa16f 1076
57d4e317
MH
1077 lockdep_assert_held(&kprobe_mutex);
1078
0bc11ed5 1079 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0);
9c89bb8e 1080 if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error %d)\n", p->addr, ret))
12310e34 1081 return ret;
12310e34 1082
0bc11ed5
MH
1083 if (*cnt == 0) {
1084 ret = register_ftrace_function(ops);
9c89bb8e 1085 if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret))
12310e34 1086 goto err_ftrace;
ae6aa16f 1087 }
12310e34 1088
0bc11ed5 1089 (*cnt)++;
12310e34
JY
1090 return ret;
1091
1092err_ftrace:
1093 /*
0bc11ed5
MH
1094 * At this point, sinec ops is not registered, we should be sefe from
1095 * registering empty filter.
12310e34 1096 */
0bc11ed5 1097 ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
12310e34 1098 return ret;
ae6aa16f
MH
1099}
1100
0bc11ed5
MH
1101static int arm_kprobe_ftrace(struct kprobe *p)
1102{
1103 bool ipmodify = (p->post_handler != NULL);
1104
1105 return __arm_kprobe_ftrace(p,
1106 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1107 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1108}
1109
0bc11ed5
MH
1110static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1111 int *cnt)
ae6aa16f 1112{
ed9492df 1113 int ret;
ae6aa16f 1114
57d4e317
MH
1115 lockdep_assert_held(&kprobe_mutex);
1116
0bc11ed5
MH
1117 if (*cnt == 1) {
1118 ret = unregister_ftrace_function(ops);
9c89bb8e 1119 if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (error %d)\n", ret))
297f9233 1120 return ret;
ae6aa16f 1121 }
297f9233 1122
0bc11ed5 1123 (*cnt)--;
297f9233 1124
0bc11ed5 1125 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
9c89bb8e 1126 WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (error %d)\n",
4458515b 1127 p->addr, ret);
297f9233 1128 return ret;
ae6aa16f 1129}
0bc11ed5
MH
1130
1131static int disarm_kprobe_ftrace(struct kprobe *p)
1132{
1133 bool ipmodify = (p->post_handler != NULL);
1134
1135 return __disarm_kprobe_ftrace(p,
1136 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1137 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1138}
e7dbfe34 1139#else /* !CONFIG_KPROBES_ON_FTRACE */
10de795a
MS
1140static inline int arm_kprobe_ftrace(struct kprobe *p)
1141{
1142 return -ENODEV;
1143}
1144
1145static inline int disarm_kprobe_ftrace(struct kprobe *p)
1146{
1147 return -ENODEV;
1148}
ae6aa16f
MH
1149#endif
1150
02afb8d6
PA
1151static int prepare_kprobe(struct kprobe *p)
1152{
1153 /* Must ensure p->addr is really on ftrace */
1154 if (kprobe_ftrace(p))
1155 return arch_prepare_kprobe_ftrace(p);
1156
1157 return arch_prepare_kprobe(p);
1158}
1159
12310e34 1160static int arm_kprobe(struct kprobe *kp)
201517a7 1161{
12310e34
JY
1162 if (unlikely(kprobe_ftrace(kp)))
1163 return arm_kprobe_ftrace(kp);
1164
2d1e38f5 1165 cpus_read_lock();
201517a7 1166 mutex_lock(&text_mutex);
afd66255 1167 __arm_kprobe(kp);
201517a7 1168 mutex_unlock(&text_mutex);
2d1e38f5 1169 cpus_read_unlock();
12310e34
JY
1170
1171 return 0;
201517a7
MH
1172}
1173
297f9233 1174static int disarm_kprobe(struct kprobe *kp, bool reopt)
201517a7 1175{
297f9233
JY
1176 if (unlikely(kprobe_ftrace(kp)))
1177 return disarm_kprobe_ftrace(kp);
2d1e38f5
TG
1178
1179 cpus_read_lock();
201517a7 1180 mutex_lock(&text_mutex);
ae6aa16f 1181 __disarm_kprobe(kp, reopt);
201517a7 1182 mutex_unlock(&text_mutex);
2d1e38f5 1183 cpus_read_unlock();
297f9233
JY
1184
1185 return 0;
201517a7
MH
1186}
1187
64f562c6
AM
1188/*
1189 * Aggregate handlers for multiple kprobes support - these handlers
1190 * take care of invoking the individual kprobe handlers on p->list
1191 */
820aede0 1192static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
64f562c6
AM
1193{
1194 struct kprobe *kp;
1195
3516a460 1196 list_for_each_entry_rcu(kp, &p->list, list) {
de5bd88d 1197 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
e6584523 1198 set_kprobe_instance(kp);
8b0914ea
PP
1199 if (kp->pre_handler(kp, regs))
1200 return 1;
64f562c6 1201 }
e6584523 1202 reset_kprobe_instance();
64f562c6
AM
1203 }
1204 return 0;
1205}
820aede0 1206NOKPROBE_SYMBOL(aggr_pre_handler);
64f562c6 1207
820aede0
MH
1208static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1209 unsigned long flags)
64f562c6
AM
1210{
1211 struct kprobe *kp;
1212
3516a460 1213 list_for_each_entry_rcu(kp, &p->list, list) {
de5bd88d 1214 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
e6584523 1215 set_kprobe_instance(kp);
64f562c6 1216 kp->post_handler(kp, regs, flags);
e6584523 1217 reset_kprobe_instance();
64f562c6
AM
1218 }
1219 }
64f562c6 1220}
820aede0 1221NOKPROBE_SYMBOL(aggr_post_handler);
64f562c6 1222
223a76b2 1223/* Walks the list and increments 'nmissed' if 'p' has child probes. */
820aede0 1224void kprobes_inc_nmissed_count(struct kprobe *p)
bf8d5c52
KA
1225{
1226 struct kprobe *kp;
223a76b2 1227
afd66255 1228 if (!kprobe_aggrprobe(p)) {
bf8d5c52
KA
1229 p->nmissed++;
1230 } else {
1231 list_for_each_entry_rcu(kp, &p->list, list)
1232 kp->nmissed++;
1233 }
bf8d5c52 1234}
820aede0 1235NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
bf8d5c52 1236
73f9b911
MH
1237static struct kprobe kprobe_busy = {
1238 .addr = (void *) get_kprobe,
1239};
1240
1241void kprobe_busy_begin(void)
1242{
1243 struct kprobe_ctlblk *kcb;
1244
1245 preempt_disable();
1246 __this_cpu_write(current_kprobe, &kprobe_busy);
1247 kcb = get_kprobe_ctlblk();
1248 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1249}
1250
1251void kprobe_busy_end(void)
1252{
1253 __this_cpu_write(current_kprobe, NULL);
1254 preempt_enable();
1255}
1256
223a76b2 1257/* Add the new probe to 'ap->list'. */
55479f64 1258static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
8b0914ea 1259{
059053a2 1260 if (p->post_handler)
6274de49 1261 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
afd66255 1262
059053a2 1263 list_add_rcu(&p->list, &ap->list);
b918e5e6
MH
1264 if (p->post_handler && !ap->post_handler)
1265 ap->post_handler = aggr_post_handler;
de5bd88d 1266
8b0914ea
PP
1267 return 0;
1268}
1269
64f562c6 1270/*
223a76b2
MH
1271 * Fill in the required fields of the aggregator kprobe. Replace the
1272 * earlier kprobe in the hlist with the aggregator kprobe.
64f562c6 1273 */
55479f64 1274static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
64f562c6 1275{
223a76b2 1276 /* Copy the insn slot of 'p' to 'ap'. */
8b0914ea 1277 copy_kprobe(p, ap);
a9ad965e 1278 flush_insn_slot(ap);
64f562c6 1279 ap->addr = p->addr;
afd66255 1280 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
64f562c6 1281 ap->pre_handler = aggr_pre_handler;
e8386a0c
MH
1282 /* We don't care the kprobe which has gone. */
1283 if (p->post_handler && !kprobe_gone(p))
36721656 1284 ap->post_handler = aggr_post_handler;
64f562c6
AM
1285
1286 INIT_LIST_HEAD(&ap->list);
afd66255 1287 INIT_HLIST_NODE(&ap->hlist);
64f562c6 1288
afd66255 1289 list_add_rcu(&p->list, &ap->list);
adad0f33 1290 hlist_replace_rcu(&p->hlist, &ap->hlist);
64f562c6
AM
1291}
1292
1293/*
223a76b2 1294 * This registers the second or subsequent kprobe at the same address.
64f562c6 1295 */
55479f64 1296static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
64f562c6
AM
1297{
1298 int ret = 0;
6d8e40a8 1299 struct kprobe *ap = orig_p;
64f562c6 1300
2d1e38f5
TG
1301 cpus_read_lock();
1302
25764288
MH
1303 /* For preparing optimization, jump_label_text_reserved() is called */
1304 jump_label_lock();
25764288
MH
1305 mutex_lock(&text_mutex);
1306
6d8e40a8 1307 if (!kprobe_aggrprobe(orig_p)) {
223a76b2 1308 /* If 'orig_p' is not an 'aggr_kprobe', create new one. */
6d8e40a8 1309 ap = alloc_aggr_kprobe(orig_p);
25764288
MH
1310 if (!ap) {
1311 ret = -ENOMEM;
1312 goto out;
1313 }
6d8e40a8 1314 init_aggr_kprobe(ap, orig_p);
819319fc 1315 } else if (kprobe_unused(ap)) {
0490cd1f 1316 /* This probe is going to die. Rescue it */
819319fc
MH
1317 ret = reuse_unused_kprobe(ap);
1318 if (ret)
1319 goto out;
1320 }
b918e5e6
MH
1321
1322 if (kprobe_gone(ap)) {
e8386a0c
MH
1323 /*
1324 * Attempting to insert new probe at the same location that
1325 * had a probe in the module vaddr area which already
1326 * freed. So, the instruction slot has already been
1327 * released. We need a new slot for the new probe.
1328 */
b918e5e6 1329 ret = arch_prepare_kprobe(ap);
e8386a0c 1330 if (ret)
b918e5e6
MH
1331 /*
1332 * Even if fail to allocate new slot, don't need to
223a76b2
MH
1333 * free the 'ap'. It will be used next time, or
1334 * freed by unregister_kprobe().
b918e5e6 1335 */
25764288 1336 goto out;
de5bd88d 1337
afd66255
MH
1338 /* Prepare optimized instructions if possible. */
1339 prepare_optimized_kprobe(ap);
1340
e8386a0c 1341 /*
de5bd88d
MH
1342 * Clear gone flag to prevent allocating new slot again, and
1343 * set disabled flag because it is not armed yet.
e8386a0c 1344 */
de5bd88d
MH
1345 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1346 | KPROBE_FLAG_DISABLED;
e8386a0c 1347 }
b918e5e6 1348
223a76b2 1349 /* Copy the insn slot of 'p' to 'ap'. */
b918e5e6 1350 copy_kprobe(ap, p);
25764288
MH
1351 ret = add_new_kprobe(ap, p);
1352
1353out:
1354 mutex_unlock(&text_mutex);
25764288 1355 jump_label_unlock();
2d1e38f5 1356 cpus_read_unlock();
25764288
MH
1357
1358 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1359 ap->flags &= ~KPROBE_FLAG_DISABLED;
12310e34 1360 if (!kprobes_all_disarmed) {
25764288 1361 /* Arm the breakpoint again. */
12310e34
JY
1362 ret = arm_kprobe(ap);
1363 if (ret) {
1364 ap->flags |= KPROBE_FLAG_DISABLED;
1365 list_del_rcu(&p->list);
ae8b7ce7 1366 synchronize_rcu();
12310e34
JY
1367 }
1368 }
25764288
MH
1369 }
1370 return ret;
64f562c6
AM
1371}
1372
be8f2743
MH
1373bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1374{
223a76b2 1375 /* The '__kprobes' functions and entry code must not be probed. */
be8f2743
MH
1376 return addr >= (unsigned long)__kprobes_text_start &&
1377 addr < (unsigned long)__kprobes_text_end;
1378}
1379
6143c6fb 1380static bool __within_kprobe_blacklist(unsigned long addr)
d0aaff97 1381{
376e2424 1382 struct kprobe_blacklist_entry *ent;
3d8d996e 1383
be8f2743 1384 if (arch_within_kprobe_blacklist(addr))
376e2424 1385 return true;
3d8d996e 1386 /*
223a76b2
MH
1387 * If 'kprobe_blacklist' is defined, check the address and
1388 * reject any probe registration in the prohibited area.
3d8d996e 1389 */
376e2424
MH
1390 list_for_each_entry(ent, &kprobe_blacklist, list) {
1391 if (addr >= ent->start_addr && addr < ent->end_addr)
1392 return true;
3d8d996e 1393 }
6143c6fb
MH
1394 return false;
1395}
376e2424 1396
6143c6fb
MH
1397bool within_kprobe_blacklist(unsigned long addr)
1398{
1399 char symname[KSYM_NAME_LEN], *p;
1400
1401 if (__within_kprobe_blacklist(addr))
1402 return true;
1403
1404 /* Check if the address is on a suffixed-symbol */
1405 if (!lookup_symbol_name(addr, symname)) {
1406 p = strchr(symname, '.');
1407 if (!p)
1408 return false;
1409 *p = '\0';
1410 addr = (unsigned long)kprobe_lookup_name(symname, 0);
1411 if (addr)
1412 return __within_kprobe_blacklist(addr);
1413 }
376e2424 1414 return false;
d0aaff97
PP
1415}
1416
cc66bb91
PZ
1417/*
1418 * arch_adjust_kprobe_addr - adjust the address
1419 * @addr: symbol base address
1420 * @offset: offset within the symbol
1421 * @on_func_entry: was this @addr+@offset on the function entry
1422 *
1423 * Typically returns @addr + @offset, except for special cases where the
1424 * function might be prefixed by a CFI landing pad, in that case any offset
1425 * inside the landing pad is mapped to the first 'real' instruction of the
1426 * symbol.
1427 *
1428 * Specifically, for things like IBT/BTI, skip the resp. ENDBR/BTI.C
1429 * instruction at +0.
1430 */
1431kprobe_opcode_t *__weak arch_adjust_kprobe_addr(unsigned long addr,
1432 unsigned long offset,
1433 bool *on_func_entry)
1434{
1435 *on_func_entry = !offset;
1436 return (kprobe_opcode_t *)(addr + offset);
1437}
1438
b2a5cd69 1439/*
223a76b2 1440 * If 'symbol_name' is specified, look it up and add the 'offset'
b2a5cd69 1441 * to it. This way, we can specify a relative address to a symbol.
bc81d48d
MH
1442 * This returns encoded errors if it fails to look up symbol or invalid
1443 * combination of parameters.
b2a5cd69 1444 */
cc66bb91
PZ
1445static kprobe_opcode_t *
1446_kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
1447 unsigned long offset, bool *on_func_entry)
b2a5cd69 1448{
1d585e70 1449 if ((symbol_name && addr) || (!symbol_name && !addr))
bc81d48d
MH
1450 goto invalid;
1451
1d585e70 1452 if (symbol_name) {
cc66bb91
PZ
1453 /*
1454 * Input: @sym + @offset
1455 * Output: @addr + @offset
1456 *
1457 * NOTE: kprobe_lookup_name() does *NOT* fold the offset
1458 * argument into it's output!
1459 */
7246f600 1460 addr = kprobe_lookup_name(symbol_name, offset);
bc81d48d
MH
1461 if (!addr)
1462 return ERR_PTR(-ENOENT);
b2a5cd69
MH
1463 }
1464
cc66bb91
PZ
1465 /*
1466 * So here we have @addr + @offset, displace it into a new
1467 * @addr' + @offset' where @addr' is the symbol start address.
1468 */
1469 addr = (void *)addr + offset;
1470 if (!kallsyms_lookup_size_offset((unsigned long)addr, NULL, &offset))
1471 return ERR_PTR(-ENOENT);
1472 addr = (void *)addr - offset;
1473
1474 /*
1475 * Then ask the architecture to re-combine them, taking care of
1476 * magical function entry details while telling us if this was indeed
1477 * at the start of the function.
1478 */
1479 addr = arch_adjust_kprobe_addr((unsigned long)addr, offset, on_func_entry);
bc81d48d
MH
1480 if (addr)
1481 return addr;
1482
1483invalid:
1484 return ERR_PTR(-EINVAL);
b2a5cd69
MH
1485}
1486
1d585e70
NR
1487static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1488{
cc66bb91
PZ
1489 bool on_func_entry;
1490 return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
1d585e70
NR
1491}
1492
223a76b2
MH
1493/*
1494 * Check the 'p' is valid and return the aggregator kprobe
1495 * at the same address.
1496 */
55479f64 1497static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1f0ab409 1498{
6d8e40a8 1499 struct kprobe *ap, *list_p;
1f0ab409 1500
7e6a71d8
MH
1501 lockdep_assert_held(&kprobe_mutex);
1502
6d8e40a8
MH
1503 ap = get_kprobe(p->addr);
1504 if (unlikely(!ap))
1f0ab409
AM
1505 return NULL;
1506
6d8e40a8 1507 if (p != ap) {
7e6a71d8 1508 list_for_each_entry(list_p, &ap->list, list)
1f0ab409
AM
1509 if (list_p == p)
1510 /* kprobe p is a valid probe */
1511 goto valid;
1512 return NULL;
1513 }
1514valid:
6d8e40a8 1515 return ap;
1f0ab409
AM
1516}
1517
33b1d146
MH
1518/*
1519 * Warn and return error if the kprobe is being re-registered since
1520 * there must be a software bug.
1521 */
1522static inline int warn_kprobe_rereg(struct kprobe *p)
1f0ab409
AM
1523{
1524 int ret = 0;
1f0ab409
AM
1525
1526 mutex_lock(&kprobe_mutex);
33b1d146 1527 if (WARN_ON_ONCE(__get_valid_kprobe(p)))
1f0ab409
AM
1528 ret = -EINVAL;
1529 mutex_unlock(&kprobe_mutex);
6d8e40a8 1530
1f0ab409
AM
1531 return ret;
1532}
1533
4402deae 1534static int check_ftrace_location(struct kprobe *p)
1da177e4 1535{
aebfd125 1536 unsigned long addr = (unsigned long)p->addr;
ae6aa16f 1537
aebfd125 1538 if (ftrace_location(addr) == addr) {
e7dbfe34 1539#ifdef CONFIG_KPROBES_ON_FTRACE
ae6aa16f 1540 p->flags |= KPROBE_FLAG_FTRACE;
e7dbfe34 1541#else /* !CONFIG_KPROBES_ON_FTRACE */
ae6aa16f
MH
1542 return -EINVAL;
1543#endif
1544 }
f7f242ff
HC
1545 return 0;
1546}
1547
de02f2ac
MHG
1548static bool is_cfi_preamble_symbol(unsigned long addr)
1549{
1550 char symbuf[KSYM_NAME_LEN];
1551
1552 if (lookup_symbol_name(addr, symbuf))
1553 return false;
1554
1555 return str_has_prefix("__cfi_", symbuf) ||
1556 str_has_prefix("__pfx_", symbuf);
1557}
1558
f7f242ff
HC
1559static int check_kprobe_address_safe(struct kprobe *p,
1560 struct module **probed_mod)
1561{
1562 int ret;
1f0ab409 1563
4402deae 1564 ret = check_ftrace_location(p);
f7f242ff
HC
1565 if (ret)
1566 return ret;
91bad2f8 1567 jump_label_lock();
de31c3ca 1568 preempt_disable();
f7fa6ef0 1569
325f3fb5
ZY
1570 /* Ensure the address is in a text area, and find a module if exists. */
1571 *probed_mod = NULL;
1572 if (!core_kernel_text((unsigned long) p->addr)) {
1573 *probed_mod = __module_text_address((unsigned long) p->addr);
1574 if (!(*probed_mod)) {
1575 ret = -EINVAL;
1576 goto out;
1577 }
1578 }
1579 /* Ensure it is not in reserved area. */
1580 if (in_gate_area_no_mm((unsigned long) p->addr) ||
376e2424 1581 within_kprobe_blacklist((unsigned long) p->addr) ||
e336b402 1582 jump_label_text_reserved(p->addr, p->addr) ||
fa68bd09 1583 static_call_text_reserved(p->addr, p->addr) ||
de02f2ac
MHG
1584 find_bug((unsigned long)p->addr) ||
1585 is_cfi_preamble_symbol((unsigned long)p->addr)) {
f986a499 1586 ret = -EINVAL;
f7fa6ef0 1587 goto out;
f986a499 1588 }
b3e55c72 1589
325f3fb5 1590 /* Get module refcount and reject __init functions for loaded modules. */
7582b7be 1591 if (IS_ENABLED(CONFIG_MODULES) && *probed_mod) {
6f716acd 1592 /*
e8386a0c
MH
1593 * We must hold a refcount of the probed module while updating
1594 * its code to prohibit unexpected unloading.
df019b1d 1595 */
f7fa6ef0
MH
1596 if (unlikely(!try_module_get(*probed_mod))) {
1597 ret = -ENOENT;
1598 goto out;
1599 }
de31c3ca 1600
f24659d9 1601 /*
223a76b2 1602 * If the module freed '.init.text', we couldn't insert
f24659d9
MH
1603 * kprobes in there.
1604 */
f7fa6ef0 1605 if (within_module_init((unsigned long)p->addr, *probed_mod) &&
7582b7be 1606 !module_is_coming(*probed_mod)) {
f7fa6ef0
MH
1607 module_put(*probed_mod);
1608 *probed_mod = NULL;
1609 ret = -ENOENT;
f24659d9 1610 }
df019b1d 1611 }
7582b7be 1612
f7fa6ef0 1613out:
a189d035 1614 preempt_enable();
de31c3ca 1615 jump_label_unlock();
1da177e4 1616
f7fa6ef0
MH
1617 return ret;
1618}
1619
55479f64 1620int register_kprobe(struct kprobe *p)
f7fa6ef0
MH
1621{
1622 int ret;
1623 struct kprobe *old_p;
1624 struct module *probed_mod;
1625 kprobe_opcode_t *addr;
bf7a87f1 1626 bool on_func_entry;
f7fa6ef0
MH
1627
1628 /* Adjust probe address from symbol */
bf7a87f1 1629 addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
f7fa6ef0
MH
1630 if (IS_ERR(addr))
1631 return PTR_ERR(addr);
1632 p->addr = addr;
1633
33b1d146 1634 ret = warn_kprobe_rereg(p);
f7fa6ef0
MH
1635 if (ret)
1636 return ret;
1637
1638 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1639 p->flags &= KPROBE_FLAG_DISABLED;
3516a460 1640 p->nmissed = 0;
9861668f 1641 INIT_LIST_HEAD(&p->list);
afd66255 1642
f7fa6ef0
MH
1643 ret = check_kprobe_address_safe(p, &probed_mod);
1644 if (ret)
1645 return ret;
1646
1647 mutex_lock(&kprobe_mutex);
afd66255 1648
bf7a87f1
JO
1649 if (on_func_entry)
1650 p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY;
1651
64f562c6
AM
1652 old_p = get_kprobe(p->addr);
1653 if (old_p) {
223a76b2 1654 /* Since this may unoptimize 'old_p', locking 'text_mutex'. */
64f562c6 1655 ret = register_aggr_kprobe(old_p, p);
1da177e4
LT
1656 goto out;
1657 }
1da177e4 1658
2d1e38f5
TG
1659 cpus_read_lock();
1660 /* Prevent text modification */
1661 mutex_lock(&text_mutex);
ae6aa16f 1662 ret = prepare_kprobe(p);
25764288 1663 mutex_unlock(&text_mutex);
2d1e38f5 1664 cpus_read_unlock();
6f716acd 1665 if (ret)
afd66255 1666 goto out;
49a2a1b8 1667
64f562c6 1668 INIT_HLIST_NODE(&p->hlist);
3516a460 1669 hlist_add_head_rcu(&p->hlist,
1da177e4
LT
1670 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1671
12310e34
JY
1672 if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
1673 ret = arm_kprobe(p);
1674 if (ret) {
1675 hlist_del_rcu(&p->hlist);
ae8b7ce7 1676 synchronize_rcu();
12310e34
JY
1677 goto out;
1678 }
1679 }
afd66255
MH
1680
1681 /* Try to optimize kprobe */
1682 try_to_optimize_kprobe(p);
1da177e4 1683out:
7a7d1cf9 1684 mutex_unlock(&kprobe_mutex);
49a2a1b8 1685
e8386a0c 1686 if (probed_mod)
df019b1d 1687 module_put(probed_mod);
e8386a0c 1688
1da177e4
LT
1689 return ret;
1690}
99081ab5 1691EXPORT_SYMBOL_GPL(register_kprobe);
1da177e4 1692
223a76b2 1693/* Check if all probes on the 'ap' are disabled. */
29e8077a 1694static bool aggr_kprobe_disabled(struct kprobe *ap)
6f0f1dd7
MH
1695{
1696 struct kprobe *kp;
1697
7e6a71d8
MH
1698 lockdep_assert_held(&kprobe_mutex);
1699
1700 list_for_each_entry(kp, &ap->list, list)
6f0f1dd7
MH
1701 if (!kprobe_disabled(kp))
1702 /*
223a76b2
MH
1703 * Since there is an active probe on the list,
1704 * we can't disable this 'ap'.
6f0f1dd7 1705 */
29e8077a 1706 return false;
6f0f1dd7 1707
29e8077a 1708 return true;
6f0f1dd7
MH
1709}
1710
55479f64 1711static struct kprobe *__disable_kprobe(struct kprobe *p)
6f0f1dd7
MH
1712{
1713 struct kprobe *orig_p;
297f9233 1714 int ret;
6f0f1dd7 1715
57d4e317
MH
1716 lockdep_assert_held(&kprobe_mutex);
1717
6f0f1dd7
MH
1718 /* Get an original kprobe for return */
1719 orig_p = __get_valid_kprobe(p);
1720 if (unlikely(orig_p == NULL))
297f9233 1721 return ERR_PTR(-EINVAL);
6f0f1dd7
MH
1722
1723 if (!kprobe_disabled(p)) {
1724 /* Disable probe if it is a child probe */
1725 if (p != orig_p)
1726 p->flags |= KPROBE_FLAG_DISABLED;
1727
1728 /* Try to disarm and disable this/parent probe */
1729 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
69d54b91 1730 /*
9c80e799
KI
1731 * Don't be lazy here. Even if 'kprobes_all_disarmed'
1732 * is false, 'orig_p' might not have been armed yet.
1733 * Note arm_all_kprobes() __tries__ to arm all kprobes
1734 * on the best effort basis.
69d54b91 1735 */
9c80e799 1736 if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) {
297f9233
JY
1737 ret = disarm_kprobe(orig_p, true);
1738 if (ret) {
1739 p->flags &= ~KPROBE_FLAG_DISABLED;
1740 return ERR_PTR(ret);
1741 }
1742 }
6f0f1dd7
MH
1743 orig_p->flags |= KPROBE_FLAG_DISABLED;
1744 }
1745 }
1746
1747 return orig_p;
1748}
1749
de5bd88d
MH
1750/*
1751 * Unregister a kprobe without a scheduler synchronization.
1752 */
55479f64 1753static int __unregister_kprobe_top(struct kprobe *p)
de5bd88d 1754{
6d8e40a8 1755 struct kprobe *ap, *list_p;
de5bd88d 1756
6f0f1dd7
MH
1757 /* Disable kprobe. This will disarm it if needed. */
1758 ap = __disable_kprobe(p);
297f9233
JY
1759 if (IS_ERR(ap))
1760 return PTR_ERR(ap);
de5bd88d 1761
6f0f1dd7 1762 if (ap == p)
bf8f6e5b 1763 /*
6f0f1dd7
MH
1764 * This probe is an independent(and non-optimized) kprobe
1765 * (not an aggrprobe). Remove from the hash list.
bf8f6e5b 1766 */
6f0f1dd7
MH
1767 goto disarmed;
1768
1769 /* Following process expects this probe is an aggrprobe */
1770 WARN_ON(!kprobe_aggrprobe(ap));
1771
6274de49
MH
1772 if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1773 /*
1774 * !disarmed could be happen if the probe is under delayed
1775 * unoptimizing.
1776 */
6f0f1dd7
MH
1777 goto disarmed;
1778 else {
1779 /* If disabling probe has special handlers, update aggrprobe */
e8386a0c 1780 if (p->post_handler && !kprobe_gone(p)) {
7e6a71d8 1781 list_for_each_entry(list_p, &ap->list, list) {
9861668f
MH
1782 if ((list_p != p) && (list_p->post_handler))
1783 goto noclean;
1784 }
5dd7caf0
LH
1785 /*
1786 * For the kprobe-on-ftrace case, we keep the
1787 * post_handler setting to identify this aggrprobe
1788 * armed with kprobe_ipmodify_ops.
1789 */
1790 if (!kprobe_ftrace(ap))
1791 ap->post_handler = NULL;
9861668f
MH
1792 }
1793noclean:
6f0f1dd7
MH
1794 /*
1795 * Remove from the aggrprobe: this path will do nothing in
1796 * __unregister_kprobe_bottom().
1797 */
49a2a1b8 1798 list_del_rcu(&p->list);
6f0f1dd7
MH
1799 if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1800 /*
1801 * Try to optimize this probe again, because post
1802 * handler may have been changed.
1803 */
1804 optimize_kprobe(ap);
49a2a1b8 1805 }
9861668f 1806 return 0;
6f0f1dd7
MH
1807
1808disarmed:
1809 hlist_del_rcu(&ap->hlist);
1810 return 0;
9861668f 1811}
3516a460 1812
55479f64 1813static void __unregister_kprobe_bottom(struct kprobe *p)
9861668f 1814{
6d8e40a8 1815 struct kprobe *ap;
b3e55c72 1816
e8386a0c 1817 if (list_empty(&p->list))
6274de49 1818 /* This is an independent kprobe */
0498b635 1819 arch_remove_kprobe(p);
e8386a0c 1820 else if (list_is_singular(&p->list)) {
6274de49 1821 /* This is the last child of an aggrprobe */
6d8e40a8 1822 ap = list_entry(p->list.next, struct kprobe, list);
e8386a0c 1823 list_del(&p->list);
6d8e40a8 1824 free_aggr_kprobe(ap);
9861668f 1825 }
6274de49 1826 /* Otherwise, do nothing. */
9861668f
MH
1827}
1828
55479f64 1829int register_kprobes(struct kprobe **kps, int num)
9861668f
MH
1830{
1831 int i, ret = 0;
1832
1833 if (num <= 0)
1834 return -EINVAL;
1835 for (i = 0; i < num; i++) {
49ad2fd7 1836 ret = register_kprobe(kps[i]);
67dddaad
MH
1837 if (ret < 0) {
1838 if (i > 0)
1839 unregister_kprobes(kps, i);
9861668f 1840 break;
36721656 1841 }
49a2a1b8 1842 }
9861668f
MH
1843 return ret;
1844}
99081ab5 1845EXPORT_SYMBOL_GPL(register_kprobes);
9861668f 1846
55479f64 1847void unregister_kprobe(struct kprobe *p)
9861668f
MH
1848{
1849 unregister_kprobes(&p, 1);
1850}
99081ab5 1851EXPORT_SYMBOL_GPL(unregister_kprobe);
9861668f 1852
55479f64 1853void unregister_kprobes(struct kprobe **kps, int num)
9861668f
MH
1854{
1855 int i;
1856
1857 if (num <= 0)
1858 return;
1859 mutex_lock(&kprobe_mutex);
1860 for (i = 0; i < num; i++)
1861 if (__unregister_kprobe_top(kps[i]) < 0)
1862 kps[i]->addr = NULL;
1863 mutex_unlock(&kprobe_mutex);
1864
ae8b7ce7 1865 synchronize_rcu();
9861668f
MH
1866 for (i = 0; i < num; i++)
1867 if (kps[i]->addr)
1868 __unregister_kprobe_bottom(kps[i]);
1da177e4 1869}
99081ab5 1870EXPORT_SYMBOL_GPL(unregister_kprobes);
1da177e4 1871
5f6bee34
NR
1872int __weak kprobe_exceptions_notify(struct notifier_block *self,
1873 unsigned long val, void *data)
fc62d020
NR
1874{
1875 return NOTIFY_DONE;
1876}
5f6bee34 1877NOKPROBE_SYMBOL(kprobe_exceptions_notify);
fc62d020 1878
1da177e4 1879static struct notifier_block kprobe_exceptions_nb = {
3d5631e0
AK
1880 .notifier_call = kprobe_exceptions_notify,
1881 .priority = 0x7fffffff /* we need to be notified first */
1882};
1883
9edddaa2 1884#ifdef CONFIG_KRETPROBES
66ada2cc 1885
73f9b911 1886#if !defined(CONFIG_KRETPROBE_ON_RETHOOK)
4bbd9345 1887
1888/* callbacks for objpool of kretprobe instances */
1889static int kretprobe_init_inst(void *nod, void *context)
1890{
1891 struct kretprobe_instance *ri = nod;
1892
1893 ri->rph = context;
1894 return 0;
1895}
1896static int kretprobe_fini_pool(struct objpool_head *head, void *context)
1897{
1898 kfree(context);
1899 return 0;
1900}
1901
43994049
MH
1902static void free_rp_inst_rcu(struct rcu_head *head)
1903{
1904 struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu);
4bbd9345 1905 struct kretprobe_holder *rph = ri->rph;
43994049 1906
4bbd9345 1907 objpool_drop(ri, &rph->pool);
43994049
MH
1908}
1909NOKPROBE_SYMBOL(free_rp_inst_rcu);
1910
1911static void recycle_rp_inst(struct kretprobe_instance *ri)
1912{
1913 struct kretprobe *rp = get_kretprobe(ri);
1914
1915 if (likely(rp))
4bbd9345 1916 objpool_push(ri, &rp->rph->pool);
43994049
MH
1917 else
1918 call_rcu(&ri->rcu, free_rp_inst_rcu);
1919}
1920NOKPROBE_SYMBOL(recycle_rp_inst);
1921
1922/*
1923 * This function is called from delayed_put_task_struct() when a task is
1924 * dead and cleaned up to recycle any kretprobe instances associated with
1925 * this task. These left over instances represent probed functions that
1926 * have been called but will never return.
1927 */
1928void kprobe_flush_task(struct task_struct *tk)
1929{
1930 struct kretprobe_instance *ri;
1931 struct llist_node *node;
1932
1933 /* Early boot, not yet initialized. */
1934 if (unlikely(!kprobes_initialized))
1935 return;
1936
1937 kprobe_busy_begin();
1938
1939 node = __llist_del_all(&tk->kretprobe_instances);
1940 while (node) {
1941 ri = container_of(node, struct kretprobe_instance, llist);
1942 node = node->next;
1943
1944 recycle_rp_inst(ri);
1945 }
1946
1947 kprobe_busy_end();
1948}
1949NOKPROBE_SYMBOL(kprobe_flush_task);
1950
1951static inline void free_rp_inst(struct kretprobe *rp)
1952{
4bbd9345 1953 struct kretprobe_holder *rph = rp->rph;
43994049 1954
4bbd9345 1955 if (!rph)
1956 return;
1957 rp->rph = NULL;
1958 objpool_fini(&rph->pool);
43994049
MH
1959}
1960
03bac0df
MH
1961/* This assumes the 'tsk' is the current task or the is not running. */
1962static kprobe_opcode_t *__kretprobe_find_ret_addr(struct task_struct *tsk,
1963 struct llist_node **cur)
3d7e3382 1964{
d741bf41 1965 struct kretprobe_instance *ri = NULL;
03bac0df
MH
1966 struct llist_node *node = *cur;
1967
1968 if (!node)
1969 node = tsk->kretprobe_instances.first;
1970 else
1971 node = node->next;
66ada2cc 1972
d741bf41
PZ
1973 while (node) {
1974 ri = container_of(node, struct kretprobe_instance, llist);
96fed8ac 1975 if (ri->ret_addr != kretprobe_trampoline_addr()) {
03bac0df
MH
1976 *cur = node;
1977 return ri->ret_addr;
d741bf41 1978 }
d741bf41 1979 node = node->next;
66ada2cc 1980 }
03bac0df 1981 return NULL;
3d7e3382 1982}
03bac0df 1983NOKPROBE_SYMBOL(__kretprobe_find_ret_addr);
1da177e4 1984
03bac0df
MH
1985/**
1986 * kretprobe_find_ret_addr -- Find correct return address modified by kretprobe
1987 * @tsk: Target task
1988 * @fp: A frame pointer
1989 * @cur: a storage of the loop cursor llist_node pointer for next call
1990 *
1991 * Find the correct return address modified by a kretprobe on @tsk in unsigned
1992 * long type. If it finds the return address, this returns that address value,
1993 * or this returns 0.
1994 * The @tsk must be 'current' or a task which is not running. @fp is a hint
1995 * to get the currect return address - which is compared with the
1996 * kretprobe_instance::fp field. The @cur is a loop cursor for searching the
1997 * kretprobe return addresses on the @tsk. The '*@cur' should be NULL at the
1998 * first call, but '@cur' itself must NOT NULL.
1999 */
2000unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
2001 struct llist_node **cur)
2002{
9efd24ec 2003 struct kretprobe_instance *ri;
03bac0df
MH
2004 kprobe_opcode_t *ret;
2005
2006 if (WARN_ON_ONCE(!cur))
2007 return 0;
66ada2cc 2008
03bac0df
MH
2009 do {
2010 ret = __kretprobe_find_ret_addr(tsk, cur);
2011 if (!ret)
2012 break;
2013 ri = container_of(*cur, struct kretprobe_instance, llist);
2014 } while (ri->fp != fp);
2015
2016 return (unsigned long)ret;
2017}
2018NOKPROBE_SYMBOL(kretprobe_find_ret_addr);
2019
bf094cff
MH
2020void __weak arch_kretprobe_fixup_return(struct pt_regs *regs,
2021 kprobe_opcode_t *correct_ret_addr)
2022{
2023 /*
2024 * Do nothing by default. Please fill this to update the fake return
2025 * address on the stack with the correct one on each arch if possible.
2026 */
2027}
66ada2cc
MH
2028
2029unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
66ada2cc
MH
2030 void *frame_pointer)
2031{
d741bf41 2032 struct kretprobe_instance *ri = NULL;
03bac0df 2033 struct llist_node *first, *node = NULL;
e1164787 2034 kprobe_opcode_t *correct_ret_addr;
d741bf41 2035 struct kretprobe *rp;
66ada2cc 2036
03bac0df
MH
2037 /* Find correct address and all nodes for this frame. */
2038 correct_ret_addr = __kretprobe_find_ret_addr(current, &node);
2039 if (!correct_ret_addr) {
2040 pr_err("kretprobe: Return address not found, not execute handler. Maybe there is a bug in the kernel.\n");
2041 BUG_ON(1);
66ada2cc
MH
2042 }
2043
df91c5bc
MH
2044 /*
2045 * Set the return address as the instruction pointer, because if the
2046 * user handler calls stack_trace_save_regs() with this 'regs',
2047 * the stack trace will start from the instruction pointer.
2048 */
2049 instruction_pointer_set(regs, (unsigned long)correct_ret_addr);
66ada2cc 2050
03bac0df
MH
2051 /* Run the user handler of the nodes. */
2052 first = current->kretprobe_instances.first;
d741bf41
PZ
2053 while (first) {
2054 ri = container_of(first, struct kretprobe_instance, llist);
03bac0df
MH
2055
2056 if (WARN_ON_ONCE(ri->fp != frame_pointer))
2057 break;
66ada2cc 2058
d741bf41
PZ
2059 rp = get_kretprobe(ri);
2060 if (rp && rp->handler) {
66ada2cc
MH
2061 struct kprobe *prev = kprobe_running();
2062
d741bf41 2063 __this_cpu_write(current_kprobe, &rp->kp);
66ada2cc 2064 ri->ret_addr = correct_ret_addr;
d741bf41 2065 rp->handler(ri, regs);
66ada2cc
MH
2066 __this_cpu_write(current_kprobe, prev);
2067 }
03bac0df
MH
2068 if (first == node)
2069 break;
2070
2071 first = first->next;
2072 }
2073
bf094cff
MH
2074 arch_kretprobe_fixup_return(regs, correct_ret_addr);
2075
03bac0df
MH
2076 /* Unlink all nodes for this frame. */
2077 first = current->kretprobe_instances.first;
2078 current->kretprobe_instances.first = node->next;
2079 node->next = NULL;
2080
2081 /* Recycle free instances. */
2082 while (first) {
2083 ri = container_of(first, struct kretprobe_instance, llist);
2084 first = first->next;
66ada2cc 2085
b3388178 2086 recycle_rp_inst(ri);
66ada2cc
MH
2087 }
2088
66ada2cc
MH
2089 return (unsigned long)correct_ret_addr;
2090}
2091NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
2092
e65cefe8
AB
2093/*
2094 * This kprobe pre_handler is registered with every kretprobe. When probe
2095 * hits it will set up the return probe.
2096 */
820aede0 2097static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
e65cefe8
AB
2098{
2099 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
4bbd9345 2100 struct kretprobe_holder *rph = rp->rph;
ef53d9c5 2101 struct kretprobe_instance *ri;
e65cefe8 2102
4bbd9345 2103 ri = objpool_pop(&rph->pool);
2104 if (!ri) {
6e426e0f
PZ
2105 rp->nmissed++;
2106 return 0;
2107 }
4c4308cb 2108
6e426e0f 2109 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
4bbd9345 2110 objpool_push(ri, &rph->pool);
6e426e0f 2111 return 0;
ef53d9c5 2112 }
6e426e0f
PZ
2113
2114 arch_prepare_kretprobe(ri, regs);
2115
2116 __llist_add(&ri->llist, &current->kretprobe_instances);
2117
e65cefe8
AB
2118 return 0;
2119}
820aede0 2120NOKPROBE_SYMBOL(pre_handler_kretprobe);
73f9b911
MH
2121#else /* CONFIG_KRETPROBE_ON_RETHOOK */
2122/*
2123 * This kprobe pre_handler is registered with every kretprobe. When probe
2124 * hits it will set up the return probe.
2125 */
2126static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2127{
2128 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
2129 struct kretprobe_instance *ri;
2130 struct rethook_node *rhn;
2131
2132 rhn = rethook_try_get(rp->rh);
2133 if (!rhn) {
2134 rp->nmissed++;
2135 return 0;
2136 }
2137
2138 ri = container_of(rhn, struct kretprobe_instance, node);
2139
2140 if (rp->entry_handler && rp->entry_handler(ri, regs))
2141 rethook_recycle(rhn);
2142 else
2143 rethook_hook(rhn, regs, kprobe_ftrace(p));
2144
2145 return 0;
2146}
2147NOKPROBE_SYMBOL(pre_handler_kretprobe);
2148
2149static void kretprobe_rethook_handler(struct rethook_node *rh, void *data,
cb16330d 2150 unsigned long ret_addr,
73f9b911
MH
2151 struct pt_regs *regs)
2152{
2153 struct kretprobe *rp = (struct kretprobe *)data;
2154 struct kretprobe_instance *ri;
2155 struct kprobe_ctlblk *kcb;
2156
2157 /* The data must NOT be null. This means rethook data structure is broken. */
1d661ed5 2158 if (WARN_ON_ONCE(!data) || !rp->handler)
73f9b911
MH
2159 return;
2160
2161 __this_cpu_write(current_kprobe, &rp->kp);
2162 kcb = get_kprobe_ctlblk();
2163 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
2164
2165 ri = container_of(rh, struct kretprobe_instance, node);
2166 rp->handler(ri, regs);
2167
2168 __this_cpu_write(current_kprobe, NULL);
2169}
2170NOKPROBE_SYMBOL(kretprobe_rethook_handler);
2171
2172#endif /* !CONFIG_KRETPROBE_ON_RETHOOK */
e65cefe8 2173
97c753e6
MH
2174/**
2175 * kprobe_on_func_entry() -- check whether given address is function entry
2176 * @addr: Target address
2177 * @sym: Target symbol name
2178 * @offset: The offset from the symbol or the address
2179 *
2180 * This checks whether the given @addr+@offset or @sym+@offset is on the
2181 * function entry address or not.
2182 * This returns 0 if it is the function entry, or -EINVAL if it is not.
2183 * And also it returns -ENOENT if it fails the symbol or address lookup.
2184 * Caller must pass @addr or @sym (either one must be NULL), or this
2185 * returns -EINVAL.
2186 */
2187int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
1d585e70 2188{
cc66bb91
PZ
2189 bool on_func_entry;
2190 kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset, &on_func_entry);
1d585e70
NR
2191
2192 if (IS_ERR(kp_addr))
97c753e6 2193 return PTR_ERR(kp_addr);
1d585e70 2194
cc66bb91 2195 if (!on_func_entry)
97c753e6
MH
2196 return -EINVAL;
2197
2198 return 0;
1d585e70
NR
2199}
2200
55479f64 2201int register_kretprobe(struct kretprobe *rp)
b94cce92 2202{
97c753e6 2203 int ret;
b94cce92 2204 int i;
b2a5cd69 2205 void *addr;
90ec5e89 2206
97c753e6
MH
2207 ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
2208 if (ret)
2209 return ret;
f438d914 2210
223a76b2 2211 /* If only 'rp->kp.addr' is specified, check reregistering kprobes */
33b1d146 2212 if (rp->kp.addr && warn_kprobe_rereg(&rp->kp))
0188b878
WS
2213 return -EINVAL;
2214
f438d914 2215 if (kretprobe_blacklist_size) {
b2a5cd69 2216 addr = kprobe_addr(&rp->kp);
bc81d48d
MH
2217 if (IS_ERR(addr))
2218 return PTR_ERR(addr);
f438d914
MH
2219
2220 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2221 if (kretprobe_blacklist[i].addr == addr)
2222 return -EINVAL;
2223 }
2224 }
b94cce92 2225
6bbfa441
MH
2226 if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
2227 return -E2BIG;
2228
b94cce92 2229 rp->kp.pre_handler = pre_handler_kretprobe;
7522a842 2230 rp->kp.post_handler = NULL;
b94cce92
HN
2231
2232 /* Pre-allocate memory for max kretprobe instances */
3b7ddab8 2233 if (rp->maxactive <= 0)
c2ef6661 2234 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
3b7ddab8 2235
73f9b911 2236#ifdef CONFIG_KRETPROBE_ON_RETHOOK
4bbd9345 2237 rp->rh = rethook_alloc((void *)rp, kretprobe_rethook_handler,
2238 sizeof(struct kretprobe_instance) +
2239 rp->data_size, rp->maxactive);
2240 if (IS_ERR(rp->rh))
2241 return PTR_ERR(rp->rh);
73f9b911 2242
73f9b911
MH
2243 rp->nmissed = 0;
2244 /* Establish function entry probe point */
2245 ret = register_kprobe(&rp->kp);
2246 if (ret != 0) {
2247 rethook_free(rp->rh);
2248 rp->rh = NULL;
2249 }
2250#else /* !CONFIG_KRETPROBE_ON_RETHOOK */
d741bf41
PZ
2251 rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL);
2252 if (!rp->rph)
2253 return -ENOMEM;
2254
4bbd9345 2255 if (objpool_init(&rp->rph->pool, rp->maxactive, rp->data_size +
2256 sizeof(struct kretprobe_instance), GFP_KERNEL,
2257 rp->rph, kretprobe_init_inst, kretprobe_fini_pool)) {
2258 kfree(rp->rph);
2259 rp->rph = NULL;
2260 return -ENOMEM;
b94cce92 2261 }
d839a656 2262 rcu_assign_pointer(rp->rph->rp, rp);
b94cce92
HN
2263 rp->nmissed = 0;
2264 /* Establish function entry probe point */
49ad2fd7 2265 ret = register_kprobe(&rp->kp);
4a296e07 2266 if (ret != 0)
b94cce92 2267 free_rp_inst(rp);
73f9b911 2268#endif
b94cce92
HN
2269 return ret;
2270}
99081ab5 2271EXPORT_SYMBOL_GPL(register_kretprobe);
b94cce92 2272
55479f64 2273int register_kretprobes(struct kretprobe **rps, int num)
4a296e07
MH
2274{
2275 int ret = 0, i;
2276
2277 if (num <= 0)
2278 return -EINVAL;
2279 for (i = 0; i < num; i++) {
49ad2fd7 2280 ret = register_kretprobe(rps[i]);
67dddaad
MH
2281 if (ret < 0) {
2282 if (i > 0)
2283 unregister_kretprobes(rps, i);
4a296e07
MH
2284 break;
2285 }
2286 }
2287 return ret;
2288}
99081ab5 2289EXPORT_SYMBOL_GPL(register_kretprobes);
4a296e07 2290
55479f64 2291void unregister_kretprobe(struct kretprobe *rp)
4a296e07
MH
2292{
2293 unregister_kretprobes(&rp, 1);
2294}
99081ab5 2295EXPORT_SYMBOL_GPL(unregister_kretprobe);
4a296e07 2296
55479f64 2297void unregister_kretprobes(struct kretprobe **rps, int num)
4a296e07
MH
2298{
2299 int i;
2300
2301 if (num <= 0)
2302 return;
2303 mutex_lock(&kprobe_mutex);
d741bf41 2304 for (i = 0; i < num; i++) {
4a296e07
MH
2305 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
2306 rps[i]->kp.addr = NULL;
73f9b911
MH
2307#ifdef CONFIG_KRETPROBE_ON_RETHOOK
2308 rethook_free(rps[i]->rh);
2309#else
d839a656 2310 rcu_assign_pointer(rps[i]->rph->rp, NULL);
73f9b911 2311#endif
d741bf41 2312 }
4a296e07
MH
2313 mutex_unlock(&kprobe_mutex);
2314
ae8b7ce7 2315 synchronize_rcu();
4a296e07
MH
2316 for (i = 0; i < num; i++) {
2317 if (rps[i]->kp.addr) {
2318 __unregister_kprobe_bottom(&rps[i]->kp);
73f9b911 2319#ifndef CONFIG_KRETPROBE_ON_RETHOOK
d741bf41 2320 free_rp_inst(rps[i]);
73f9b911 2321#endif
4a296e07
MH
2322 }
2323 }
2324}
99081ab5 2325EXPORT_SYMBOL_GPL(unregister_kretprobes);
4a296e07 2326
9edddaa2 2327#else /* CONFIG_KRETPROBES */
55479f64 2328int register_kretprobe(struct kretprobe *rp)
b94cce92 2329{
223a76b2 2330 return -EOPNOTSUPP;
b94cce92 2331}
99081ab5 2332EXPORT_SYMBOL_GPL(register_kretprobe);
b94cce92 2333
55479f64 2334int register_kretprobes(struct kretprobe **rps, int num)
346fd59b 2335{
223a76b2 2336 return -EOPNOTSUPP;
346fd59b 2337}
99081ab5
MH
2338EXPORT_SYMBOL_GPL(register_kretprobes);
2339
55479f64 2340void unregister_kretprobe(struct kretprobe *rp)
b94cce92 2341{
4a296e07 2342}
99081ab5 2343EXPORT_SYMBOL_GPL(unregister_kretprobe);
b94cce92 2344
55479f64 2345void unregister_kretprobes(struct kretprobe **rps, int num)
4a296e07
MH
2346{
2347}
99081ab5 2348EXPORT_SYMBOL_GPL(unregister_kretprobes);
4c4308cb 2349
820aede0 2350static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
4a296e07
MH
2351{
2352 return 0;
b94cce92 2353}
820aede0 2354NOKPROBE_SYMBOL(pre_handler_kretprobe);
b94cce92 2355
4a296e07
MH
2356#endif /* CONFIG_KRETPROBES */
2357
e8386a0c 2358/* Set the kprobe gone and remove its instruction buffer. */
55479f64 2359static void kill_kprobe(struct kprobe *p)
e8386a0c
MH
2360{
2361 struct kprobe *kp;
de5bd88d 2362
7e6a71d8
MH
2363 lockdep_assert_held(&kprobe_mutex);
2364
0c76ef3f
LH
2365 /*
2366 * The module is going away. We should disarm the kprobe which
2367 * is using ftrace, because ftrace framework is still available at
2368 * 'MODULE_STATE_GOING' notification.
2369 */
2370 if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
2371 disarm_kprobe_ftrace(p);
2372
e8386a0c 2373 p->flags |= KPROBE_FLAG_GONE;
afd66255 2374 if (kprobe_aggrprobe(p)) {
e8386a0c
MH
2375 /*
2376 * If this is an aggr_kprobe, we have to list all the
2377 * chained probes and mark them GONE.
2378 */
7e6a71d8 2379 list_for_each_entry(kp, &p->list, list)
e8386a0c
MH
2380 kp->flags |= KPROBE_FLAG_GONE;
2381 p->post_handler = NULL;
afd66255 2382 kill_optimized_kprobe(p);
e8386a0c
MH
2383 }
2384 /*
2385 * Here, we can remove insn_slot safely, because no thread calls
2386 * the original probed function (which will be freed soon) any more.
2387 */
2388 arch_remove_kprobe(p);
2389}
2390
c0614829 2391/* Disable one kprobe */
55479f64 2392int disable_kprobe(struct kprobe *kp)
c0614829
MH
2393{
2394 int ret = 0;
297f9233 2395 struct kprobe *p;
c0614829
MH
2396
2397 mutex_lock(&kprobe_mutex);
2398
6f0f1dd7 2399 /* Disable this kprobe */
297f9233
JY
2400 p = __disable_kprobe(kp);
2401 if (IS_ERR(p))
2402 ret = PTR_ERR(p);
c0614829 2403
c0614829
MH
2404 mutex_unlock(&kprobe_mutex);
2405 return ret;
2406}
2407EXPORT_SYMBOL_GPL(disable_kprobe);
2408
2409/* Enable one kprobe */
55479f64 2410int enable_kprobe(struct kprobe *kp)
c0614829
MH
2411{
2412 int ret = 0;
2413 struct kprobe *p;
2414
2415 mutex_lock(&kprobe_mutex);
2416
2417 /* Check whether specified probe is valid. */
2418 p = __get_valid_kprobe(kp);
2419 if (unlikely(p == NULL)) {
2420 ret = -EINVAL;
2421 goto out;
2422 }
2423
2424 if (kprobe_gone(kp)) {
2425 /* This kprobe has gone, we couldn't enable it. */
2426 ret = -EINVAL;
2427 goto out;
2428 }
2429
2430 if (p != kp)
2431 kp->flags &= ~KPROBE_FLAG_DISABLED;
2432
2433 if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2434 p->flags &= ~KPROBE_FLAG_DISABLED;
12310e34 2435 ret = arm_kprobe(p);
4a6f316d 2436 if (ret) {
12310e34 2437 p->flags |= KPROBE_FLAG_DISABLED;
4a6f316d
LQ
2438 if (p != kp)
2439 kp->flags |= KPROBE_FLAG_DISABLED;
2440 }
c0614829
MH
2441 }
2442out:
2443 mutex_unlock(&kprobe_mutex);
2444 return ret;
2445}
2446EXPORT_SYMBOL_GPL(enable_kprobe);
2447
4458515b 2448/* Caller must NOT call this in usual path. This is only for critical case */
820aede0 2449void dump_kprobe(struct kprobe *kp)
24851d24 2450{
9c89bb8e 2451 pr_err("Dump kprobe:\n.symbol_name = %s, .offset = %x, .addr = %pS\n",
4458515b 2452 kp->symbol_name, kp->offset, kp->addr);
24851d24 2453}
820aede0 2454NOKPROBE_SYMBOL(dump_kprobe);
24851d24 2455
fb1a59fa
MH
2456int kprobe_add_ksym_blacklist(unsigned long entry)
2457{
2458 struct kprobe_blacklist_entry *ent;
2459 unsigned long offset = 0, size = 0;
2460
2461 if (!kernel_text_address(entry) ||
2462 !kallsyms_lookup_size_offset(entry, &size, &offset))
2463 return -EINVAL;
2464
2465 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2466 if (!ent)
2467 return -ENOMEM;
2468 ent->start_addr = entry;
2469 ent->end_addr = entry + size;
2470 INIT_LIST_HEAD(&ent->list);
2471 list_add_tail(&ent->list, &kprobe_blacklist);
2472
2473 return (int)size;
2474}
2475
2476/* Add all symbols in given area into kprobe blacklist */
2477int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
2478{
2479 unsigned long entry;
2480 int ret = 0;
2481
2482 for (entry = start; entry < end; entry += ret) {
2483 ret = kprobe_add_ksym_blacklist(entry);
2484 if (ret < 0)
2485 return ret;
2486 if (ret == 0) /* In case of alias symbol */
2487 ret = 1;
2488 }
2489 return 0;
2490}
2491
d002b8bc
AH
2492int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
2493 char *type, char *sym)
2494{
2495 return -ERANGE;
2496}
2497
2498int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
2499 char *sym)
2500{
2501#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
2502 if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym))
2503 return 0;
2504#ifdef CONFIG_OPTPROBES
2505 if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym))
2506 return 0;
2507#endif
2508#endif
2509 if (!arch_kprobe_get_kallsym(&symnum, value, type, sym))
2510 return 0;
2511 return -ERANGE;
2512}
2513
fb1a59fa
MH
2514int __init __weak arch_populate_kprobe_blacklist(void)
2515{
2516 return 0;
2517}
2518
376e2424
MH
2519/*
2520 * Lookup and populate the kprobe_blacklist.
2521 *
2522 * Unlike the kretprobe blacklist, we'll need to determine
2523 * the range of addresses that belong to the said functions,
2524 * since a kprobe need not necessarily be at the beginning
2525 * of a function.
2526 */
2527static int __init populate_kprobe_blacklist(unsigned long *start,
2528 unsigned long *end)
2529{
fb1a59fa 2530 unsigned long entry;
376e2424 2531 unsigned long *iter;
fb1a59fa 2532 int ret;
376e2424
MH
2533
2534 for (iter = start; iter < end; iter++) {
f2ec8d9a 2535 entry = (unsigned long)dereference_symbol_descriptor((void *)*iter);
fb1a59fa
MH
2536 ret = kprobe_add_ksym_blacklist(entry);
2537 if (ret == -EINVAL)
376e2424 2538 continue;
fb1a59fa
MH
2539 if (ret < 0)
2540 return ret;
376e2424 2541 }
fb1a59fa 2542
223a76b2 2543 /* Symbols in '__kprobes_text' are blacklisted */
fb1a59fa
MH
2544 ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start,
2545 (unsigned long)__kprobes_text_end);
66e9b071
TG
2546 if (ret)
2547 return ret;
2548
223a76b2 2549 /* Symbols in 'noinstr' section are blacklisted */
66e9b071
TG
2550 ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start,
2551 (unsigned long)__noinstr_text_end);
fb1a59fa
MH
2552
2553 return ret ? : arch_populate_kprobe_blacklist();
376e2424
MH
2554}
2555
7582b7be
MRI
2556#ifdef CONFIG_MODULES
2557/* Remove all symbols in given area from kprobe blacklist */
2558static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end)
2559{
2560 struct kprobe_blacklist_entry *ent, *n;
2561
2562 list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) {
2563 if (ent->start_addr < start || ent->start_addr >= end)
2564 continue;
2565 list_del(&ent->list);
2566 kfree(ent);
2567 }
2568}
2569
2570static void kprobe_remove_ksym_blacklist(unsigned long entry)
2571{
2572 kprobe_remove_area_blacklist(entry, entry + 1);
2573}
2574
1e6769b0
MH
2575static void add_module_kprobe_blacklist(struct module *mod)
2576{
2577 unsigned long start, end;
16db6264
MH
2578 int i;
2579
2580 if (mod->kprobe_blacklist) {
2581 for (i = 0; i < mod->num_kprobe_blacklist; i++)
2582 kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]);
2583 }
1e6769b0
MH
2584
2585 start = (unsigned long)mod->kprobes_text_start;
2586 if (start) {
2587 end = start + mod->kprobes_text_size;
2588 kprobe_add_area_blacklist(start, end);
2589 }
66e9b071
TG
2590
2591 start = (unsigned long)mod->noinstr_text_start;
2592 if (start) {
2593 end = start + mod->noinstr_text_size;
2594 kprobe_add_area_blacklist(start, end);
2595 }
1e6769b0
MH
2596}
2597
2598static void remove_module_kprobe_blacklist(struct module *mod)
2599{
2600 unsigned long start, end;
16db6264
MH
2601 int i;
2602
2603 if (mod->kprobe_blacklist) {
2604 for (i = 0; i < mod->num_kprobe_blacklist; i++)
2605 kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]);
2606 }
1e6769b0
MH
2607
2608 start = (unsigned long)mod->kprobes_text_start;
2609 if (start) {
2610 end = start + mod->kprobes_text_size;
2611 kprobe_remove_area_blacklist(start, end);
2612 }
66e9b071
TG
2613
2614 start = (unsigned long)mod->noinstr_text_start;
2615 if (start) {
2616 end = start + mod->noinstr_text_size;
2617 kprobe_remove_area_blacklist(start, end);
2618 }
1e6769b0
MH
2619}
2620
e8386a0c 2621/* Module notifier call back, checking kprobes on the module */
55479f64
MH
2622static int kprobes_module_callback(struct notifier_block *nb,
2623 unsigned long val, void *data)
e8386a0c
MH
2624{
2625 struct module *mod = data;
2626 struct hlist_head *head;
e8386a0c
MH
2627 struct kprobe *p;
2628 unsigned int i;
f24659d9 2629 int checkcore = (val == MODULE_STATE_GOING);
e8386a0c 2630
1e6769b0
MH
2631 if (val == MODULE_STATE_COMING) {
2632 mutex_lock(&kprobe_mutex);
2633 add_module_kprobe_blacklist(mod);
2634 mutex_unlock(&kprobe_mutex);
2635 }
f24659d9 2636 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
e8386a0c
MH
2637 return NOTIFY_DONE;
2638
2639 /*
223a76b2
MH
2640 * When 'MODULE_STATE_GOING' was notified, both of module '.text' and
2641 * '.init.text' sections would be freed. When 'MODULE_STATE_LIVE' was
2642 * notified, only '.init.text' section would be freed. We need to
f24659d9 2643 * disable kprobes which have been inserted in the sections.
e8386a0c
MH
2644 */
2645 mutex_lock(&kprobe_mutex);
2646 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2647 head = &kprobe_table[i];
7e6a71d8 2648 hlist_for_each_entry(p, head, hlist)
f24659d9
MH
2649 if (within_module_init((unsigned long)p->addr, mod) ||
2650 (checkcore &&
2651 within_module_core((unsigned long)p->addr, mod))) {
e8386a0c
MH
2652 /*
2653 * The vaddr this probe is installed will soon
2654 * be vfreed buy not synced to disk. Hence,
2655 * disarming the breakpoint isn't needed.
545a0281
SRV
2656 *
2657 * Note, this will also move any optimized probes
2658 * that are pending to be removed from their
223a76b2 2659 * corresponding lists to the 'freeing_list' and
545a0281 2660 * will not be touched by the delayed
223a76b2 2661 * kprobe_optimizer() work handler.
e8386a0c
MH
2662 */
2663 kill_kprobe(p);
2664 }
2665 }
1e6769b0
MH
2666 if (val == MODULE_STATE_GOING)
2667 remove_module_kprobe_blacklist(mod);
e8386a0c
MH
2668 mutex_unlock(&kprobe_mutex);
2669 return NOTIFY_DONE;
2670}
2671
2672static struct notifier_block kprobe_module_nb = {
2673 .notifier_call = kprobes_module_callback,
2674 .priority = 0
2675};
2676
7582b7be
MRI
2677static int kprobe_register_module_notifier(void)
2678{
2679 return register_module_notifier(&kprobe_module_nb);
2680}
2681#else
2682static int kprobe_register_module_notifier(void)
2683{
2684 return 0;
2685}
2686#endif /* CONFIG_MODULES */
2687
82d083ab
MH
2688void kprobe_free_init_mem(void)
2689{
2690 void *start = (void *)(&__init_begin);
2691 void *end = (void *)(&__init_end);
2692 struct hlist_head *head;
2693 struct kprobe *p;
2694 int i;
2695
2696 mutex_lock(&kprobe_mutex);
2697
223a76b2 2698 /* Kill all kprobes on initmem because the target code has been freed. */
82d083ab
MH
2699 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2700 head = &kprobe_table[i];
2701 hlist_for_each_entry(p, head, hlist) {
2702 if (start <= (void *)p->addr && (void *)p->addr < end)
2703 kill_kprobe(p);
2704 }
2705 }
2706
2707 mutex_unlock(&kprobe_mutex);
2708}
2709
1da177e4
LT
2710static int __init init_kprobes(void)
2711{
ed9492df 2712 int i, err;
1da177e4
LT
2713
2714 /* FIXME allocate the probe table, currently defined statically */
2715 /* initialize all list heads */
d741bf41 2716 for (i = 0; i < KPROBE_TABLE_SIZE; i++)
1da177e4
LT
2717 INIT_HLIST_HEAD(&kprobe_table[i]);
2718
376e2424
MH
2719 err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2720 __stop_kprobe_blacklist);
223a76b2 2721 if (err)
9c89bb8e 2722 pr_err("Failed to populate blacklist (error %d), kprobes not restricted, be careful using them!\n", err);
3d8d996e 2723
f438d914
MH
2724 if (kretprobe_blacklist_size) {
2725 /* lookup the function address from its name */
2726 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
49e0b465 2727 kretprobe_blacklist[i].addr =
290e3070 2728 kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
f438d914 2729 if (!kretprobe_blacklist[i].addr)
9c89bb8e 2730 pr_err("Failed to lookup symbol '%s' for kretprobe blacklist. Maybe the target function is removed or renamed.\n",
f438d914
MH
2731 kretprobe_blacklist[i].name);
2732 }
2733 }
2734
e579abeb
MH
2735 /* By default, kprobes are armed */
2736 kprobes_all_disarmed = false;
bf8f6e5b 2737
c85c9a2c 2738#if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
223a76b2 2739 /* Init 'kprobe_optinsn_slots' for allocation */
c85c9a2c
MH
2740 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2741#endif
2742
6772926b 2743 err = arch_init_kprobes();
802eae7c
RL
2744 if (!err)
2745 err = register_die_notifier(&kprobe_exceptions_nb);
e8386a0c 2746 if (!err)
7582b7be 2747 err = kprobe_register_module_notifier();
e8386a0c 2748
ef53d9c5 2749 kprobes_initialized = (err == 0);
a737a3c6 2750 kprobe_sysctls_init();
1da177e4
LT
2751 return err;
2752}
36dadef2 2753early_initcall(init_kprobes);
1da177e4 2754
c85c9a2c
MH
2755#if defined(CONFIG_OPTPROBES)
2756static int __init init_optprobes(void)
2757{
2758 /*
2759 * Enable kprobe optimization - this kicks the optimizer which
2760 * depends on synchronize_rcu_tasks() and ksoftirqd, that is
2761 * not spawned in early initcall. So delay the optimization.
2762 */
2763 optimize_all_kprobes();
2764
2765 return 0;
2766}
2767subsys_initcall(init_optprobes);
2768#endif
2769
346fd59b 2770#ifdef CONFIG_DEBUG_FS
55479f64 2771static void report_probe(struct seq_file *pi, struct kprobe *p,
afd66255 2772 const char *sym, int offset, char *modname, struct kprobe *pp)
346fd59b
SD
2773{
2774 char *kprobe_type;
81365a94 2775 void *addr = p->addr;
346fd59b
SD
2776
2777 if (p->pre_handler == pre_handler_kretprobe)
2778 kprobe_type = "r";
346fd59b
SD
2779 else
2780 kprobe_type = "k";
afd66255 2781
60f7bb66 2782 if (!kallsyms_show_value(pi->file->f_cred))
81365a94
MH
2783 addr = NULL;
2784
346fd59b 2785 if (sym)
81365a94
MH
2786 seq_printf(pi, "%px %s %s+0x%x %s ",
2787 addr, kprobe_type, sym, offset,
afd66255 2788 (modname ? modname : " "));
81365a94
MH
2789 else /* try to use %pS */
2790 seq_printf(pi, "%px %s %pS ",
2791 addr, kprobe_type, p->addr);
afd66255
MH
2792
2793 if (!pp)
2794 pp = p;
ae6aa16f 2795 seq_printf(pi, "%s%s%s%s\n",
afd66255
MH
2796 (kprobe_gone(p) ? "[GONE]" : ""),
2797 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
ae6aa16f
MH
2798 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2799 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
346fd59b
SD
2800}
2801
55479f64 2802static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
346fd59b
SD
2803{
2804 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2805}
2806
55479f64 2807static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
346fd59b
SD
2808{
2809 (*pos)++;
2810 if (*pos >= KPROBE_TABLE_SIZE)
2811 return NULL;
2812 return pos;
2813}
2814
55479f64 2815static void kprobe_seq_stop(struct seq_file *f, void *v)
346fd59b
SD
2816{
2817 /* Nothing to do */
2818}
2819
55479f64 2820static int show_kprobe_addr(struct seq_file *pi, void *v)
346fd59b
SD
2821{
2822 struct hlist_head *head;
346fd59b 2823 struct kprobe *p, *kp;
9efd24ec 2824 const char *sym;
346fd59b 2825 unsigned int i = *(loff_t *) v;
ffb45122 2826 unsigned long offset = 0;
ab767865 2827 char *modname, namebuf[KSYM_NAME_LEN];
346fd59b
SD
2828
2829 head = &kprobe_table[i];
2830 preempt_disable();
b67bfe0d 2831 hlist_for_each_entry_rcu(p, head, hlist) {
ffb45122 2832 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
346fd59b 2833 &offset, &modname, namebuf);
afd66255 2834 if (kprobe_aggrprobe(p)) {
346fd59b 2835 list_for_each_entry_rcu(kp, &p->list, list)
afd66255 2836 report_probe(pi, kp, sym, offset, modname, p);
346fd59b 2837 } else
afd66255 2838 report_probe(pi, p, sym, offset, modname, NULL);
346fd59b
SD
2839 }
2840 preempt_enable();
2841 return 0;
2842}
2843
eac2cece 2844static const struct seq_operations kprobes_sops = {
346fd59b
SD
2845 .start = kprobe_seq_start,
2846 .next = kprobe_seq_next,
2847 .stop = kprobe_seq_stop,
2848 .show = show_kprobe_addr
2849};
2850
eac2cece 2851DEFINE_SEQ_ATTRIBUTE(kprobes);
346fd59b 2852
63724740
MH
2853/* kprobes/blacklist -- shows which functions can not be probed */
2854static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2855{
4fdd8887 2856 mutex_lock(&kprobe_mutex);
63724740
MH
2857 return seq_list_start(&kprobe_blacklist, *pos);
2858}
2859
2860static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2861{
2862 return seq_list_next(v, &kprobe_blacklist, pos);
2863}
2864
2865static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2866{
2867 struct kprobe_blacklist_entry *ent =
2868 list_entry(v, struct kprobe_blacklist_entry, list);
2869
ffb9bd68 2870 /*
223a76b2 2871 * If '/proc/kallsyms' is not showing kernel address, we won't
ffb9bd68
MH
2872 * show them here either.
2873 */
60f7bb66 2874 if (!kallsyms_show_value(m->file->f_cred))
ffb9bd68
MH
2875 seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
2876 (void *)ent->start_addr);
2877 else
2878 seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
2879 (void *)ent->end_addr, (void *)ent->start_addr);
63724740
MH
2880 return 0;
2881}
2882
4fdd8887
MH
2883static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v)
2884{
2885 mutex_unlock(&kprobe_mutex);
2886}
2887
eac2cece 2888static const struct seq_operations kprobe_blacklist_sops = {
63724740
MH
2889 .start = kprobe_blacklist_seq_start,
2890 .next = kprobe_blacklist_seq_next,
4fdd8887 2891 .stop = kprobe_blacklist_seq_stop,
63724740
MH
2892 .show = kprobe_blacklist_seq_show,
2893};
eac2cece 2894DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist);
63724740 2895
12310e34 2896static int arm_all_kprobes(void)
bf8f6e5b
AM
2897{
2898 struct hlist_head *head;
bf8f6e5b 2899 struct kprobe *p;
12310e34
JY
2900 unsigned int i, total = 0, errors = 0;
2901 int err, ret = 0;
bf8f6e5b
AM
2902
2903 mutex_lock(&kprobe_mutex);
2904
e579abeb
MH
2905 /* If kprobes are armed, just return */
2906 if (!kprobes_all_disarmed)
bf8f6e5b
AM
2907 goto already_enabled;
2908
977ad481
WN
2909 /*
2910 * optimize_kprobe() called by arm_kprobe() checks
2911 * kprobes_all_disarmed, so set kprobes_all_disarmed before
2912 * arm_kprobe.
2913 */
2914 kprobes_all_disarmed = false;
afd66255 2915 /* Arming kprobes doesn't optimize kprobe itself */
bf8f6e5b
AM
2916 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2917 head = &kprobe_table[i];
12310e34 2918 /* Arm all kprobes on a best-effort basis */
7e6a71d8 2919 hlist_for_each_entry(p, head, hlist) {
12310e34
JY
2920 if (!kprobe_disabled(p)) {
2921 err = arm_kprobe(p);
2922 if (err) {
2923 errors++;
2924 ret = err;
2925 }
2926 total++;
2927 }
2928 }
bf8f6e5b
AM
2929 }
2930
12310e34 2931 if (errors)
9c89bb8e 2932 pr_warn("Kprobes globally enabled, but failed to enable %d out of %d probes. Please check which kprobes are kept disabled via debugfs.\n",
12310e34
JY
2933 errors, total);
2934 else
2935 pr_info("Kprobes globally enabled\n");
bf8f6e5b
AM
2936
2937already_enabled:
2938 mutex_unlock(&kprobe_mutex);
12310e34 2939 return ret;
bf8f6e5b
AM
2940}
2941
297f9233 2942static int disarm_all_kprobes(void)
bf8f6e5b
AM
2943{
2944 struct hlist_head *head;
bf8f6e5b 2945 struct kprobe *p;
297f9233
JY
2946 unsigned int i, total = 0, errors = 0;
2947 int err, ret = 0;
bf8f6e5b
AM
2948
2949 mutex_lock(&kprobe_mutex);
2950
e579abeb 2951 /* If kprobes are already disarmed, just return */
6274de49
MH
2952 if (kprobes_all_disarmed) {
2953 mutex_unlock(&kprobe_mutex);
297f9233 2954 return 0;
6274de49 2955 }
bf8f6e5b 2956
e579abeb 2957 kprobes_all_disarmed = true;
afd66255 2958
bf8f6e5b
AM
2959 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2960 head = &kprobe_table[i];
297f9233 2961 /* Disarm all kprobes on a best-effort basis */
7e6a71d8 2962 hlist_for_each_entry(p, head, hlist) {
297f9233
JY
2963 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
2964 err = disarm_kprobe(p, false);
2965 if (err) {
2966 errors++;
2967 ret = err;
2968 }
2969 total++;
2970 }
bf8f6e5b
AM
2971 }
2972 }
297f9233
JY
2973
2974 if (errors)
9c89bb8e 2975 pr_warn("Kprobes globally disabled, but failed to disable %d out of %d probes. Please check which kprobes are kept enabled via debugfs.\n",
297f9233
JY
2976 errors, total);
2977 else
2978 pr_info("Kprobes globally disabled\n");
2979
bf8f6e5b 2980 mutex_unlock(&kprobe_mutex);
bf8f6e5b 2981
6274de49
MH
2982 /* Wait for disarming all kprobes by optimizer */
2983 wait_for_kprobe_optimizer();
297f9233
JY
2984
2985 return ret;
bf8f6e5b
AM
2986}
2987
2988/*
2989 * XXX: The debugfs bool file interface doesn't allow for callbacks
2990 * when the bool state is switched. We can reuse that facility when
2991 * available
2992 */
2993static ssize_t read_enabled_file_bool(struct file *file,
2994 char __user *user_buf, size_t count, loff_t *ppos)
2995{
2996 char buf[3];
2997
e579abeb 2998 if (!kprobes_all_disarmed)
bf8f6e5b
AM
2999 buf[0] = '1';
3000 else
3001 buf[0] = '0';
3002 buf[1] = '\n';
3003 buf[2] = 0x00;
3004 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
3005}
3006
3007static ssize_t write_enabled_file_bool(struct file *file,
3008 const char __user *user_buf, size_t count, loff_t *ppos)
3009{
5d6de7d7
PA
3010 bool enable;
3011 int ret;
bf8f6e5b 3012
5d6de7d7
PA
3013 ret = kstrtobool_from_user(user_buf, count, &enable);
3014 if (ret)
3015 return ret;
bf8f6e5b 3016
5d6de7d7 3017 ret = enable ? arm_all_kprobes() : disarm_all_kprobes();
12310e34
JY
3018 if (ret)
3019 return ret;
3020
bf8f6e5b
AM
3021 return count;
3022}
3023
828c0950 3024static const struct file_operations fops_kp = {
bf8f6e5b
AM
3025 .read = read_enabled_file_bool,
3026 .write = write_enabled_file_bool,
6038f373 3027 .llseek = default_llseek,
bf8f6e5b
AM
3028};
3029
55479f64 3030static int __init debugfs_kprobe_init(void)
346fd59b 3031{
8c0fd1fa 3032 struct dentry *dir;
346fd59b
SD
3033
3034 dir = debugfs_create_dir("kprobes", NULL);
346fd59b 3035
eac2cece 3036 debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops);
346fd59b 3037
8f7262cd 3038 debugfs_create_file("enabled", 0600, dir, NULL, &fops_kp);
63724740 3039
8c0fd1fa 3040 debugfs_create_file("blacklist", 0400, dir, NULL,
eac2cece 3041 &kprobe_blacklist_fops);
bf8f6e5b 3042
346fd59b
SD
3043 return 0;
3044}
3045
3046late_initcall(debugfs_kprobe_init);
3047#endif /* CONFIG_DEBUG_FS */