Merge tag 'cxl-fixes-6.10-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl
[linux-2.6-block.git] / kernel / kprobes.c
CommitLineData
1a59d1b8 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * Kernel Probes (KProbes)
1da177e4 4 *
1da177e4
LT
5 * Copyright (C) IBM Corporation, 2002, 2004
6 *
7 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
8 * Probes initial implementation (includes suggestions from
9 * Rusty Russell).
10 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
11 * hlists and exceptions notifier as suggested by Andi Kleen.
12 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
13 * interface to access function arguments.
14 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
15 * exceptions notifier to be first on the priority list.
b94cce92
HN
16 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
17 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
18 * <prasanna@in.ibm.com> added function-return probes.
1da177e4 19 */
9c89bb8e
MH
20
21#define pr_fmt(fmt) "kprobes: " fmt
22
1da177e4 23#include <linux/kprobes.h>
1da177e4
LT
24#include <linux/hash.h>
25#include <linux/init.h>
4e57b681 26#include <linux/slab.h>
e3869792 27#include <linux/stddef.h>
9984de1a 28#include <linux/export.h>
3a872d89 29#include <linux/kallsyms.h>
b4c6c34a 30#include <linux/freezer.h>
346fd59b
SD
31#include <linux/seq_file.h>
32#include <linux/debugfs.h>
b2be84df 33#include <linux/sysctl.h>
1eeb66a1 34#include <linux/kdebug.h>
4460fdad 35#include <linux/memory.h>
4554dbcb 36#include <linux/ftrace.h>
afd66255 37#include <linux/cpu.h>
bf5438fc 38#include <linux/jump_label.h>
fa68bd09 39#include <linux/static_call.h>
69e49088 40#include <linux/perf_event.h>
12af2b83 41#include <linux/execmem.h>
bf8f6e5b 42
bfd45be0 43#include <asm/sections.h>
1da177e4
LT
44#include <asm/cacheflush.h>
45#include <asm/errno.h>
7c0f6ba6 46#include <linux/uaccess.h>
1da177e4
LT
47
48#define KPROBE_HASH_BITS 6
49#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
50
a737a3c6
XN
51#if !defined(CONFIG_OPTPROBES) || !defined(CONFIG_SYSCTL)
52#define kprobe_sysctls_init() do { } while (0)
53#endif
3a872d89 54
ef53d9c5 55static int kprobes_initialized;
7e6a71d8 56/* kprobe_table can be accessed by
223a76b2 57 * - Normal hlist traversal and RCU add/del under 'kprobe_mutex' is held.
7e6a71d8
MH
58 * Or
59 * - RCU hlist traversal under disabling preempt (breakpoint handlers)
60 */
1da177e4
LT
61static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
62
223a76b2 63/* NOTE: change this value only with 'kprobe_mutex' held */
e579abeb 64static bool kprobes_all_disarmed;
bf8f6e5b 65
223a76b2 66/* This protects 'kprobe_table' and 'optimizing_list' */
43948f50 67static DEFINE_MUTEX(kprobe_mutex);
223a76b2 68static DEFINE_PER_CPU(struct kprobe *, kprobe_instance);
ef53d9c5 69
290e3070
NR
70kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
71 unsigned int __unused)
49e0b465
NR
72{
73 return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
74}
75
223a76b2
MH
76/*
77 * Blacklist -- list of 'struct kprobe_blacklist_entry' to store info where
78 * kprobes can not probe.
79 */
376e2424 80static LIST_HEAD(kprobe_blacklist);
3d8d996e 81
2d14e39d 82#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
9ec4b1f3 83/*
223a76b2 84 * 'kprobe::ainsn.insn' points to the copy of the instruction to be
9ec4b1f3
AM
85 * single-stepped. x86_64, POWER4 and above have no-exec support and
86 * stepping on the instruction on a vmalloced/kmalloced/data page
87 * is a recipe for disaster
88 */
9ec4b1f3 89struct kprobe_insn_page {
c5cb5a2d 90 struct list_head list;
9ec4b1f3 91 kprobe_opcode_t *insns; /* Page of instruction slots */
af96397d 92 struct kprobe_insn_cache *cache;
9ec4b1f3 93 int nused;
b4c6c34a 94 int ngarbage;
4610ee1d 95 char slot_used[];
9ec4b1f3
AM
96};
97
4610ee1d
MH
98#define KPROBE_INSN_PAGE_SIZE(slots) \
99 (offsetof(struct kprobe_insn_page, slot_used) + \
100 (sizeof(char) * (slots)))
101
4610ee1d
MH
102static int slots_per_page(struct kprobe_insn_cache *c)
103{
104 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
105}
106
ab40c5c6
MH
107enum kprobe_slot_state {
108 SLOT_CLEAN = 0,
109 SLOT_DIRTY = 1,
110 SLOT_USED = 2,
111};
112
63fef14f 113void __weak *alloc_insn_page(void)
af96397d 114{
223a76b2 115 /*
12af2b83 116 * Use execmem_alloc() so this page is within +/- 2GB of where the
223a76b2
MH
117 * kernel image and loaded module images reside. This is required
118 * for most of the architectures.
119 * (e.g. x86-64 needs this to handle the %rip-relative fixups.)
120 */
12af2b83 121 return execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE);
af96397d
HC
122}
123
66ce7514 124static void free_insn_page(void *page)
af96397d 125{
12af2b83 126 execmem_free(page);
af96397d
HC
127}
128
c802d64a
HC
129struct kprobe_insn_cache kprobe_insn_slots = {
130 .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
af96397d
HC
131 .alloc = alloc_insn_page,
132 .free = free_insn_page,
d002b8bc 133 .sym = KPROBE_INSN_PAGE_SYM,
4610ee1d
MH
134 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
135 .insn_size = MAX_INSN_SIZE,
136 .nr_garbage = 0,
137};
55479f64 138static int collect_garbage_slots(struct kprobe_insn_cache *c);
b4c6c34a 139
9ec4b1f3 140/**
12941560 141 * __get_insn_slot() - Find a slot on an executable page for an instruction.
9ec4b1f3
AM
142 * We allocate an executable page if there's no room on existing ones.
143 */
55479f64 144kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
9ec4b1f3
AM
145{
146 struct kprobe_insn_page *kip;
c802d64a 147 kprobe_opcode_t *slot = NULL;
9ec4b1f3 148
5b485629 149 /* Since the slot array is not protected by rcu, we need a mutex */
c802d64a 150 mutex_lock(&c->mutex);
6f716acd 151 retry:
5b485629
MH
152 rcu_read_lock();
153 list_for_each_entry_rcu(kip, &c->pages, list) {
4610ee1d 154 if (kip->nused < slots_per_page(c)) {
9ec4b1f3 155 int i;
223a76b2 156
4610ee1d 157 for (i = 0; i < slots_per_page(c); i++) {
ab40c5c6
MH
158 if (kip->slot_used[i] == SLOT_CLEAN) {
159 kip->slot_used[i] = SLOT_USED;
9ec4b1f3 160 kip->nused++;
c802d64a 161 slot = kip->insns + (i * c->insn_size);
5b485629 162 rcu_read_unlock();
c802d64a 163 goto out;
9ec4b1f3
AM
164 }
165 }
4610ee1d
MH
166 /* kip->nused is broken. Fix it. */
167 kip->nused = slots_per_page(c);
168 WARN_ON(1);
9ec4b1f3
AM
169 }
170 }
5b485629 171 rcu_read_unlock();
9ec4b1f3 172
b4c6c34a 173 /* If there are any garbage slots, collect it and try again. */
4610ee1d 174 if (c->nr_garbage && collect_garbage_slots(c) == 0)
b4c6c34a 175 goto retry;
4610ee1d
MH
176
177 /* All out of space. Need to allocate a new page. */
178 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
6f716acd 179 if (!kip)
c802d64a 180 goto out;
9ec4b1f3 181
af96397d 182 kip->insns = c->alloc();
9ec4b1f3
AM
183 if (!kip->insns) {
184 kfree(kip);
c802d64a 185 goto out;
9ec4b1f3 186 }
c5cb5a2d 187 INIT_LIST_HEAD(&kip->list);
4610ee1d 188 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
ab40c5c6 189 kip->slot_used[0] = SLOT_USED;
9ec4b1f3 190 kip->nused = 1;
b4c6c34a 191 kip->ngarbage = 0;
af96397d 192 kip->cache = c;
5b485629 193 list_add_rcu(&kip->list, &c->pages);
c802d64a 194 slot = kip->insns;
69e49088
AH
195
196 /* Record the perf ksymbol register event after adding the page */
197 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns,
198 PAGE_SIZE, false, c->sym);
c802d64a
HC
199out:
200 mutex_unlock(&c->mutex);
201 return slot;
12941560
MH
202}
203
29e8077a
MH
204/* Return true if all garbages are collected, otherwise false. */
205static bool collect_one_slot(struct kprobe_insn_page *kip, int idx)
b4c6c34a 206{
ab40c5c6 207 kip->slot_used[idx] = SLOT_CLEAN;
b4c6c34a
MH
208 kip->nused--;
209 if (kip->nused == 0) {
210 /*
211 * Page is no longer in use. Free it unless
212 * it's the last one. We keep the last one
213 * so as not to have to set it up again the
214 * next time somebody inserts a probe.
215 */
4610ee1d 216 if (!list_is_singular(&kip->list)) {
69e49088
AH
217 /*
218 * Record perf ksymbol unregister event before removing
219 * the page.
220 */
221 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
222 (unsigned long)kip->insns, PAGE_SIZE, true,
223 kip->cache->sym);
5b485629
MH
224 list_del_rcu(&kip->list);
225 synchronize_rcu();
af96397d 226 kip->cache->free(kip->insns);
b4c6c34a
MH
227 kfree(kip);
228 }
29e8077a 229 return true;
b4c6c34a 230 }
29e8077a 231 return false;
b4c6c34a
MH
232}
233
55479f64 234static int collect_garbage_slots(struct kprobe_insn_cache *c)
b4c6c34a 235{
c5cb5a2d 236 struct kprobe_insn_page *kip, *next;
b4c6c34a 237
615d0ebb 238 /* Ensure no-one is interrupted on the garbages */
ae8b7ce7 239 synchronize_rcu();
b4c6c34a 240
4610ee1d 241 list_for_each_entry_safe(kip, next, &c->pages, list) {
b4c6c34a 242 int i;
223a76b2 243
b4c6c34a
MH
244 if (kip->ngarbage == 0)
245 continue;
246 kip->ngarbage = 0; /* we will collect all garbages */
4610ee1d 247 for (i = 0; i < slots_per_page(c); i++) {
5b485629 248 if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
b4c6c34a
MH
249 break;
250 }
251 }
4610ee1d 252 c->nr_garbage = 0;
b4c6c34a
MH
253 return 0;
254}
255
55479f64
MH
256void __free_insn_slot(struct kprobe_insn_cache *c,
257 kprobe_opcode_t *slot, int dirty)
9ec4b1f3
AM
258{
259 struct kprobe_insn_page *kip;
5b485629 260 long idx;
9ec4b1f3 261
c802d64a 262 mutex_lock(&c->mutex);
5b485629
MH
263 rcu_read_lock();
264 list_for_each_entry_rcu(kip, &c->pages, list) {
265 idx = ((long)slot - (long)kip->insns) /
266 (c->insn_size * sizeof(kprobe_opcode_t));
267 if (idx >= 0 && idx < slots_per_page(c))
c802d64a 268 goto out;
9ec4b1f3 269 }
5b485629 270 /* Could not find this slot. */
4610ee1d 271 WARN_ON(1);
5b485629 272 kip = NULL;
c802d64a 273out:
5b485629
MH
274 rcu_read_unlock();
275 /* Mark and sweep: this may sleep */
276 if (kip) {
277 /* Check double free */
278 WARN_ON(kip->slot_used[idx] != SLOT_USED);
279 if (dirty) {
280 kip->slot_used[idx] = SLOT_DIRTY;
281 kip->ngarbage++;
282 if (++c->nr_garbage > slots_per_page(c))
283 collect_garbage_slots(c);
284 } else {
285 collect_one_slot(kip, idx);
286 }
287 }
c802d64a 288 mutex_unlock(&c->mutex);
4610ee1d 289}
6f716acd 290
5b485629
MH
291/*
292 * Check given address is on the page of kprobe instruction slots.
293 * This will be used for checking whether the address on a stack
294 * is on a text area or not.
295 */
296bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
297{
298 struct kprobe_insn_page *kip;
299 bool ret = false;
300
301 rcu_read_lock();
302 list_for_each_entry_rcu(kip, &c->pages, list) {
303 if (addr >= (unsigned long)kip->insns &&
304 addr < (unsigned long)kip->insns + PAGE_SIZE) {
305 ret = true;
306 break;
307 }
308 }
309 rcu_read_unlock();
310
311 return ret;
312}
313
d002b8bc
AH
314int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
315 unsigned long *value, char *type, char *sym)
316{
317 struct kprobe_insn_page *kip;
318 int ret = -ERANGE;
319
320 rcu_read_lock();
321 list_for_each_entry_rcu(kip, &c->pages, list) {
322 if ((*symnum)--)
323 continue;
223a76b2 324 strscpy(sym, c->sym, KSYM_NAME_LEN);
d002b8bc
AH
325 *type = 't';
326 *value = (unsigned long)kip->insns;
327 ret = 0;
328 break;
329 }
330 rcu_read_unlock();
331
332 return ret;
333}
334
afd66255 335#ifdef CONFIG_OPTPROBES
7ee3e97e
CL
336void __weak *alloc_optinsn_page(void)
337{
338 return alloc_insn_page();
339}
340
341void __weak free_optinsn_page(void *page)
342{
343 free_insn_page(page);
344}
345
afd66255 346/* For optimized_kprobe buffer */
c802d64a
HC
347struct kprobe_insn_cache kprobe_optinsn_slots = {
348 .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
7ee3e97e
CL
349 .alloc = alloc_optinsn_page,
350 .free = free_optinsn_page,
d002b8bc 351 .sym = KPROBE_OPTINSN_PAGE_SYM,
afd66255
MH
352 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
353 /* .insn_size is initialized later */
354 .nr_garbage = 0,
355};
afd66255 356#endif
2d14e39d 357#endif
9ec4b1f3 358
e6584523
AM
359/* We have preemption disabled.. so it is safe to use __ versions */
360static inline void set_kprobe_instance(struct kprobe *kp)
361{
b76834bc 362 __this_cpu_write(kprobe_instance, kp);
e6584523
AM
363}
364
365static inline void reset_kprobe_instance(void)
366{
b76834bc 367 __this_cpu_write(kprobe_instance, NULL);
e6584523
AM
368}
369
3516a460
AM
370/*
371 * This routine is called either:
223a76b2
MH
372 * - under the 'kprobe_mutex' - during kprobe_[un]register().
373 * OR
374 * - with preemption disabled - from architecture specific code.
3516a460 375 */
820aede0 376struct kprobe *get_kprobe(void *addr)
1da177e4
LT
377{
378 struct hlist_head *head;
3516a460 379 struct kprobe *p;
1da177e4
LT
380
381 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
6743ad43
MH
382 hlist_for_each_entry_rcu(p, head, hlist,
383 lockdep_is_held(&kprobe_mutex)) {
1da177e4
LT
384 if (p->addr == addr)
385 return p;
386 }
afd66255 387
1da177e4
LT
388 return NULL;
389}
820aede0 390NOKPROBE_SYMBOL(get_kprobe);
1da177e4 391
820aede0 392static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
afd66255 393
223a76b2 394/* Return true if 'p' is an aggregator */
29e8077a 395static inline bool kprobe_aggrprobe(struct kprobe *p)
afd66255
MH
396{
397 return p->pre_handler == aggr_pre_handler;
398}
399
223a76b2 400/* Return true if 'p' is unused */
29e8077a 401static inline bool kprobe_unused(struct kprobe *p)
6274de49
MH
402{
403 return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
404 list_empty(&p->list);
405}
406
223a76b2 407/* Keep all fields in the kprobe consistent. */
6d8e40a8 408static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
afd66255 409{
6d8e40a8
MH
410 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
411 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
afd66255
MH
412}
413
414#ifdef CONFIG_OPTPROBES
223a76b2 415/* NOTE: This is protected by 'kprobe_mutex'. */
b2be84df
MH
416static bool kprobes_allow_optimization;
417
afd66255 418/*
223a76b2 419 * Call all 'kprobe::pre_handler' on the list, but ignores its return value.
afd66255
MH
420 * This must be called from arch-dep optimized caller.
421 */
820aede0 422void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
afd66255
MH
423{
424 struct kprobe *kp;
425
426 list_for_each_entry_rcu(kp, &p->list, list) {
427 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
428 set_kprobe_instance(kp);
4f3a8714 429 kp->pre_handler(kp, regs);
afd66255
MH
430 }
431 reset_kprobe_instance();
432 }
433}
820aede0 434NOKPROBE_SYMBOL(opt_pre_handler);
afd66255 435
6274de49 436/* Free optimized instructions and optimized_kprobe */
55479f64 437static void free_aggr_kprobe(struct kprobe *p)
6274de49
MH
438{
439 struct optimized_kprobe *op;
440
441 op = container_of(p, struct optimized_kprobe, kp);
442 arch_remove_optimized_kprobe(op);
443 arch_remove_kprobe(p);
444 kfree(op);
445}
446
223a76b2 447/* Return true if the kprobe is ready for optimization. */
afd66255
MH
448static inline int kprobe_optready(struct kprobe *p)
449{
450 struct optimized_kprobe *op;
451
452 if (kprobe_aggrprobe(p)) {
453 op = container_of(p, struct optimized_kprobe, kp);
454 return arch_prepared_optinsn(&op->optinsn);
455 }
456
457 return 0;
458}
459
223a76b2 460/* Return true if the kprobe is disarmed. Note: p must be on hash list */
f1c97a1b 461bool kprobe_disarmed(struct kprobe *p)
6274de49
MH
462{
463 struct optimized_kprobe *op;
464
465 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
466 if (!kprobe_aggrprobe(p))
467 return kprobe_disabled(p);
468
469 op = container_of(p, struct optimized_kprobe, kp);
470
471 return kprobe_disabled(p) && list_empty(&op->list);
472}
473
223a76b2 474/* Return true if the probe is queued on (un)optimizing lists */
29e8077a 475static bool kprobe_queued(struct kprobe *p)
6274de49
MH
476{
477 struct optimized_kprobe *op;
478
479 if (kprobe_aggrprobe(p)) {
480 op = container_of(p, struct optimized_kprobe, kp);
481 if (!list_empty(&op->list))
29e8077a 482 return true;
6274de49 483 }
29e8077a 484 return false;
6274de49
MH
485}
486
afd66255
MH
487/*
488 * Return an optimized kprobe whose optimizing code replaces
223a76b2 489 * instructions including 'addr' (exclude breakpoint).
afd66255 490 */
c42421e2 491static struct kprobe *get_optimized_kprobe(kprobe_opcode_t *addr)
afd66255
MH
492{
493 int i;
494 struct kprobe *p = NULL;
495 struct optimized_kprobe *op;
496
497 /* Don't check i == 0, since that is a breakpoint case. */
c42421e2
MH
498 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH / sizeof(kprobe_opcode_t); i++)
499 p = get_kprobe(addr - i);
afd66255
MH
500
501 if (p && kprobe_optready(p)) {
502 op = container_of(p, struct optimized_kprobe, kp);
503 if (arch_within_optimized_kprobe(op, addr))
504 return p;
505 }
506
507 return NULL;
508}
509
223a76b2 510/* Optimization staging list, protected by 'kprobe_mutex' */
afd66255 511static LIST_HEAD(optimizing_list);
6274de49 512static LIST_HEAD(unoptimizing_list);
7b959fc5 513static LIST_HEAD(freeing_list);
afd66255
MH
514
515static void kprobe_optimizer(struct work_struct *work);
516static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
517#define OPTIMIZE_DELAY 5
518
61f4e13f
MH
519/*
520 * Optimize (replace a breakpoint with a jump) kprobes listed on
223a76b2 521 * 'optimizing_list'.
61f4e13f 522 */
55479f64 523static void do_optimize_kprobes(void)
afd66255 524{
f1c6ece2 525 lockdep_assert_held(&text_mutex);
afd66255 526 /*
223a76b2
MH
527 * The optimization/unoptimization refers 'online_cpus' via
528 * stop_machine() and cpu-hotplug modifies the 'online_cpus'.
529 * And same time, 'text_mutex' will be held in cpu-hotplug and here.
530 * This combination can cause a deadlock (cpu-hotplug tries to lock
531 * 'text_mutex' but stop_machine() can not be done because
532 * the 'online_cpus' has been changed)
533 * To avoid this deadlock, caller must have locked cpu-hotplug
534 * for preventing cpu-hotplug outside of 'text_mutex' locking.
afd66255 535 */
2d1e38f5
TG
536 lockdep_assert_cpus_held();
537
538 /* Optimization never be done when disarmed */
539 if (kprobes_all_disarmed || !kprobes_allow_optimization ||
540 list_empty(&optimizing_list))
541 return;
542
cd7ebe22 543 arch_optimize_kprobes(&optimizing_list);
61f4e13f
MH
544}
545
6274de49
MH
546/*
547 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
223a76b2 548 * if need) kprobes listed on 'unoptimizing_list'.
6274de49 549 */
55479f64 550static void do_unoptimize_kprobes(void)
6274de49
MH
551{
552 struct optimized_kprobe *op, *tmp;
553
f1c6ece2 554 lockdep_assert_held(&text_mutex);
2d1e38f5
TG
555 /* See comment in do_optimize_kprobes() */
556 lockdep_assert_cpus_held();
557
4fbd2f83
MHG
558 if (!list_empty(&unoptimizing_list))
559 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
6274de49 560
4fbd2f83 561 /* Loop on 'freeing_list' for disarming and removing from kprobe hash list */
7b959fc5 562 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
f66c0447
MH
563 /* Switching from detour code to origin */
564 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
4fbd2f83
MHG
565 /* Disarm probes if marked disabled and not gone */
566 if (kprobe_disabled(&op->kp) && !kprobe_gone(&op->kp))
6274de49
MH
567 arch_disarm_kprobe(&op->kp);
568 if (kprobe_unused(&op->kp)) {
569 /*
570 * Remove unused probes from hash list. After waiting
571 * for synchronization, these probes are reclaimed.
223a76b2 572 * (reclaiming is done by do_free_cleaned_kprobes().)
6274de49
MH
573 */
574 hlist_del_rcu(&op->kp.hlist);
6274de49
MH
575 } else
576 list_del_init(&op->list);
577 }
6274de49
MH
578}
579
223a76b2 580/* Reclaim all kprobes on the 'freeing_list' */
55479f64 581static void do_free_cleaned_kprobes(void)
6274de49
MH
582{
583 struct optimized_kprobe *op, *tmp;
584
7b959fc5 585 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
6274de49 586 list_del_init(&op->list);
cbdd96f5
MH
587 if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
588 /*
589 * This must not happen, but if there is a kprobe
590 * still in use, keep it on kprobes hash list.
591 */
592 continue;
593 }
6274de49
MH
594 free_aggr_kprobe(&op->kp);
595 }
596}
597
598/* Start optimizer after OPTIMIZE_DELAY passed */
55479f64 599static void kick_kprobe_optimizer(void)
6274de49 600{
ad72b3be 601 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
6274de49
MH
602}
603
61f4e13f 604/* Kprobe jump optimizer */
55479f64 605static void kprobe_optimizer(struct work_struct *work)
61f4e13f 606{
72ef3794 607 mutex_lock(&kprobe_mutex);
2d1e38f5 608 cpus_read_lock();
f1c6ece2 609 mutex_lock(&text_mutex);
61f4e13f
MH
610
611 /*
6274de49
MH
612 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
613 * kprobes before waiting for quiesence period.
614 */
7b959fc5 615 do_unoptimize_kprobes();
6274de49
MH
616
617 /*
a30b85df
MH
618 * Step 2: Wait for quiesence period to ensure all potentially
619 * preempted tasks to have normally scheduled. Because optprobe
620 * may modify multiple instructions, there is a chance that Nth
621 * instruction is preempted. In that case, such tasks can return
622 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
623 * Note that on non-preemptive kernel, this is transparently converted
624 * to synchronoze_sched() to wait for all interrupts to have completed.
61f4e13f 625 */
a30b85df 626 synchronize_rcu_tasks();
61f4e13f 627
6274de49 628 /* Step 3: Optimize kprobes after quiesence period */
61f4e13f 629 do_optimize_kprobes();
6274de49
MH
630
631 /* Step 4: Free cleaned kprobes after quiesence period */
7b959fc5 632 do_free_cleaned_kprobes();
6274de49 633
f1c6ece2 634 mutex_unlock(&text_mutex);
2d1e38f5 635 cpus_read_unlock();
6274de49 636
cd7ebe22 637 /* Step 5: Kick optimizer again if needed */
f984ba4e 638 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
cd7ebe22 639 kick_kprobe_optimizer();
1a0aa991
MH
640
641 mutex_unlock(&kprobe_mutex);
6274de49
MH
642}
643
644/* Wait for completing optimization and unoptimization */
30e7d894 645void wait_for_kprobe_optimizer(void)
6274de49 646{
ad72b3be
TH
647 mutex_lock(&kprobe_mutex);
648
649 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
650 mutex_unlock(&kprobe_mutex);
651
223a76b2 652 /* This will also make 'optimizing_work' execute immmediately */
ad72b3be 653 flush_delayed_work(&optimizing_work);
223a76b2 654 /* 'optimizing_work' might not have been queued yet, relax */
ad72b3be
TH
655 cpu_relax();
656
657 mutex_lock(&kprobe_mutex);
658 }
659
660 mutex_unlock(&kprobe_mutex);
afd66255
MH
661}
662
868a6fc0 663bool optprobe_queued_unopt(struct optimized_kprobe *op)
e4add247
MH
664{
665 struct optimized_kprobe *_op;
666
667 list_for_each_entry(_op, &unoptimizing_list, list) {
668 if (op == _op)
669 return true;
670 }
671
672 return false;
673}
674
afd66255 675/* Optimize kprobe if p is ready to be optimized */
55479f64 676static void optimize_kprobe(struct kprobe *p)
afd66255
MH
677{
678 struct optimized_kprobe *op;
679
680 /* Check if the kprobe is disabled or not ready for optimization. */
b2be84df 681 if (!kprobe_optready(p) || !kprobes_allow_optimization ||
afd66255
MH
682 (kprobe_disabled(p) || kprobes_all_disarmed))
683 return;
684
223a76b2 685 /* kprobes with 'post_handler' can not be optimized */
059053a2 686 if (p->post_handler)
afd66255
MH
687 return;
688
689 op = container_of(p, struct optimized_kprobe, kp);
690
691 /* Check there is no other kprobes at the optimized instructions */
692 if (arch_check_optimized_kprobe(op) < 0)
693 return;
694
695 /* Check if it is already optimized. */
e4add247
MH
696 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
697 if (optprobe_queued_unopt(op)) {
698 /* This is under unoptimizing. Just dequeue the probe */
699 list_del_init(&op->list);
700 }
afd66255 701 return;
e4add247 702 }
afd66255 703 op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
6274de49 704
223a76b2
MH
705 /*
706 * On the 'unoptimizing_list' and 'optimizing_list',
707 * 'op' must have OPTIMIZED flag
708 */
e4add247
MH
709 if (WARN_ON_ONCE(!list_empty(&op->list)))
710 return;
711
712 list_add(&op->list, &optimizing_list);
713 kick_kprobe_optimizer();
6274de49
MH
714}
715
716/* Short cut to direct unoptimizing */
55479f64 717static void force_unoptimize_kprobe(struct optimized_kprobe *op)
6274de49 718{
2d1e38f5 719 lockdep_assert_cpus_held();
6274de49 720 arch_unoptimize_kprobe(op);
f66c0447 721 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
afd66255
MH
722}
723
724/* Unoptimize a kprobe if p is optimized */
55479f64 725static void unoptimize_kprobe(struct kprobe *p, bool force)
afd66255
MH
726{
727 struct optimized_kprobe *op;
728
6274de49
MH
729 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
730 return; /* This is not an optprobe nor optimized */
731
732 op = container_of(p, struct optimized_kprobe, kp);
e4add247 733 if (!kprobe_optimized(p))
6274de49 734 return;
6274de49 735
6274de49 736 if (!list_empty(&op->list)) {
e4add247
MH
737 if (optprobe_queued_unopt(op)) {
738 /* Queued in unoptimizing queue */
739 if (force) {
740 /*
741 * Forcibly unoptimize the kprobe here, and queue it
742 * in the freeing list for release afterwards.
743 */
744 force_unoptimize_kprobe(op);
745 list_move(&op->list, &freeing_list);
746 }
747 } else {
748 /* Dequeue from the optimizing queue */
749 list_del_init(&op->list);
750 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
751 }
6274de49
MH
752 return;
753 }
e4add247 754
6274de49 755 /* Optimized kprobe case */
e4add247 756 if (force) {
6274de49
MH
757 /* Forcibly update the code: this is a special case */
758 force_unoptimize_kprobe(op);
e4add247 759 } else {
6274de49
MH
760 list_add(&op->list, &unoptimizing_list);
761 kick_kprobe_optimizer();
afd66255
MH
762 }
763}
764
0490cd1f 765/* Cancel unoptimizing for reusing */
819319fc 766static int reuse_unused_kprobe(struct kprobe *ap)
0490cd1f
MH
767{
768 struct optimized_kprobe *op;
769
0490cd1f
MH
770 /*
771 * Unused kprobe MUST be on the way of delayed unoptimizing (means
772 * there is still a relative jump) and disabled.
773 */
774 op = container_of(ap, struct optimized_kprobe, kp);
4458515b 775 WARN_ON_ONCE(list_empty(&op->list));
0490cd1f
MH
776 /* Enable the probe again */
777 ap->flags &= ~KPROBE_FLAG_DISABLED;
223a76b2 778 /* Optimize it again. (remove from 'op->list') */
5f843ed4
MH
779 if (!kprobe_optready(ap))
780 return -EINVAL;
819319fc 781
0490cd1f 782 optimize_kprobe(ap);
819319fc 783 return 0;
0490cd1f
MH
784}
785
afd66255 786/* Remove optimized instructions */
55479f64 787static void kill_optimized_kprobe(struct kprobe *p)
afd66255
MH
788{
789 struct optimized_kprobe *op;
790
791 op = container_of(p, struct optimized_kprobe, kp);
6274de49
MH
792 if (!list_empty(&op->list))
793 /* Dequeue from the (un)optimization queue */
afd66255 794 list_del_init(&op->list);
6274de49 795 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
7b959fc5
MH
796
797 if (kprobe_unused(p)) {
7b959fc5 798 /*
4fbd2f83
MHG
799 * Unused kprobe is on unoptimizing or freeing list. We move it
800 * to freeing_list and let the kprobe_optimizer() remove it from
801 * the kprobe hash list and free it.
7b959fc5 802 */
4fbd2f83
MHG
803 if (optprobe_queued_unopt(op))
804 list_move(&op->list, &freeing_list);
7b959fc5
MH
805 }
806
6274de49 807 /* Don't touch the code, because it is already freed. */
afd66255
MH
808 arch_remove_optimized_kprobe(op);
809}
810
a460246c
MH
811static inline
812void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
813{
814 if (!kprobe_ftrace(p))
815 arch_prepare_optimized_kprobe(op, p);
816}
817
afd66255 818/* Try to prepare optimized instructions */
55479f64 819static void prepare_optimized_kprobe(struct kprobe *p)
afd66255
MH
820{
821 struct optimized_kprobe *op;
822
823 op = container_of(p, struct optimized_kprobe, kp);
a460246c 824 __prepare_optimized_kprobe(op, p);
afd66255
MH
825}
826
223a76b2 827/* Allocate new optimized_kprobe and try to prepare optimized instructions. */
55479f64 828static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
afd66255
MH
829{
830 struct optimized_kprobe *op;
831
832 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
833 if (!op)
834 return NULL;
835
836 INIT_LIST_HEAD(&op->list);
837 op->kp.addr = p->addr;
a460246c 838 __prepare_optimized_kprobe(op, p);
afd66255
MH
839
840 return &op->kp;
841}
842
55479f64 843static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
afd66255
MH
844
845/*
223a76b2
MH
846 * Prepare an optimized_kprobe and optimize it.
847 * NOTE: 'p' must be a normal registered kprobe.
afd66255 848 */
55479f64 849static void try_to_optimize_kprobe(struct kprobe *p)
afd66255
MH
850{
851 struct kprobe *ap;
852 struct optimized_kprobe *op;
853
223a76b2 854 /* Impossible to optimize ftrace-based kprobe. */
ae6aa16f
MH
855 if (kprobe_ftrace(p))
856 return;
857
223a76b2 858 /* For preparing optimization, jump_label_text_reserved() is called. */
2d1e38f5 859 cpus_read_lock();
25764288
MH
860 jump_label_lock();
861 mutex_lock(&text_mutex);
862
afd66255
MH
863 ap = alloc_aggr_kprobe(p);
864 if (!ap)
25764288 865 goto out;
afd66255
MH
866
867 op = container_of(ap, struct optimized_kprobe, kp);
868 if (!arch_prepared_optinsn(&op->optinsn)) {
223a76b2 869 /* If failed to setup optimizing, fallback to kprobe. */
6274de49
MH
870 arch_remove_optimized_kprobe(op);
871 kfree(op);
25764288 872 goto out;
afd66255
MH
873 }
874
875 init_aggr_kprobe(ap, p);
223a76b2 876 optimize_kprobe(ap); /* This just kicks optimizer thread. */
25764288
MH
877
878out:
879 mutex_unlock(&text_mutex);
880 jump_label_unlock();
2d1e38f5 881 cpus_read_unlock();
afd66255
MH
882}
883
55479f64 884static void optimize_all_kprobes(void)
b2be84df
MH
885{
886 struct hlist_head *head;
b2be84df
MH
887 struct kprobe *p;
888 unsigned int i;
889
5c51543b 890 mutex_lock(&kprobe_mutex);
223a76b2 891 /* If optimization is already allowed, just return. */
b2be84df 892 if (kprobes_allow_optimization)
5c51543b 893 goto out;
b2be84df 894
2d1e38f5 895 cpus_read_lock();
b2be84df 896 kprobes_allow_optimization = true;
b2be84df
MH
897 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
898 head = &kprobe_table[i];
7e6a71d8 899 hlist_for_each_entry(p, head, hlist)
b2be84df
MH
900 if (!kprobe_disabled(p))
901 optimize_kprobe(p);
902 }
2d1e38f5 903 cpus_read_unlock();
9c89bb8e 904 pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n");
5c51543b
MH
905out:
906 mutex_unlock(&kprobe_mutex);
b2be84df
MH
907}
908
c85c9a2c 909#ifdef CONFIG_SYSCTL
55479f64 910static void unoptimize_all_kprobes(void)
b2be84df
MH
911{
912 struct hlist_head *head;
b2be84df
MH
913 struct kprobe *p;
914 unsigned int i;
915
5c51543b 916 mutex_lock(&kprobe_mutex);
223a76b2 917 /* If optimization is already prohibited, just return. */
5c51543b
MH
918 if (!kprobes_allow_optimization) {
919 mutex_unlock(&kprobe_mutex);
b2be84df 920 return;
5c51543b 921 }
b2be84df 922
2d1e38f5 923 cpus_read_lock();
b2be84df 924 kprobes_allow_optimization = false;
b2be84df
MH
925 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
926 head = &kprobe_table[i];
7e6a71d8 927 hlist_for_each_entry(p, head, hlist) {
b2be84df 928 if (!kprobe_disabled(p))
6274de49 929 unoptimize_kprobe(p, false);
b2be84df
MH
930 }
931 }
2d1e38f5 932 cpus_read_unlock();
5c51543b
MH
933 mutex_unlock(&kprobe_mutex);
934
223a76b2 935 /* Wait for unoptimizing completion. */
6274de49 936 wait_for_kprobe_optimizer();
9c89bb8e 937 pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n");
b2be84df
MH
938}
939
5c51543b 940static DEFINE_MUTEX(kprobe_sysctl_mutex);
a737a3c6
XN
941static int sysctl_kprobes_optimization;
942static int proc_kprobes_optimization_handler(struct ctl_table *table,
943 int write, void *buffer,
944 size_t *length, loff_t *ppos)
b2be84df
MH
945{
946 int ret;
947
5c51543b 948 mutex_lock(&kprobe_sysctl_mutex);
b2be84df
MH
949 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
950 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
951
952 if (sysctl_kprobes_optimization)
953 optimize_all_kprobes();
954 else
955 unoptimize_all_kprobes();
5c51543b 956 mutex_unlock(&kprobe_sysctl_mutex);
b2be84df
MH
957
958 return ret;
959}
a737a3c6
XN
960
961static struct ctl_table kprobe_sysctls[] = {
962 {
963 .procname = "kprobes-optimization",
964 .data = &sysctl_kprobes_optimization,
965 .maxlen = sizeof(int),
966 .mode = 0644,
967 .proc_handler = proc_kprobes_optimization_handler,
968 .extra1 = SYSCTL_ZERO,
969 .extra2 = SYSCTL_ONE,
970 },
a737a3c6
XN
971};
972
973static void __init kprobe_sysctls_init(void)
974{
975 register_sysctl_init("debug", kprobe_sysctls);
976}
b2be84df
MH
977#endif /* CONFIG_SYSCTL */
978
57d4e317 979/* Put a breakpoint for a probe. */
55479f64 980static void __arm_kprobe(struct kprobe *p)
afd66255 981{
6d8e40a8 982 struct kprobe *_p;
afd66255 983
57d4e317
MH
984 lockdep_assert_held(&text_mutex);
985
223a76b2 986 /* Find the overlapping optimized kprobes. */
c42421e2 987 _p = get_optimized_kprobe(p->addr);
6d8e40a8 988 if (unlikely(_p))
6274de49
MH
989 /* Fallback to unoptimized kprobe */
990 unoptimize_kprobe(_p, true);
afd66255
MH
991
992 arch_arm_kprobe(p);
993 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
994}
995
57d4e317 996/* Remove the breakpoint of a probe. */
55479f64 997static void __disarm_kprobe(struct kprobe *p, bool reopt)
afd66255 998{
6d8e40a8 999 struct kprobe *_p;
afd66255 1000
57d4e317
MH
1001 lockdep_assert_held(&text_mutex);
1002
69d54b91
WN
1003 /* Try to unoptimize */
1004 unoptimize_kprobe(p, kprobes_all_disarmed);
afd66255 1005
6274de49
MH
1006 if (!kprobe_queued(p)) {
1007 arch_disarm_kprobe(p);
223a76b2 1008 /* If another kprobe was blocked, re-optimize it. */
c42421e2 1009 _p = get_optimized_kprobe(p->addr);
6274de49
MH
1010 if (unlikely(_p) && reopt)
1011 optimize_kprobe(_p);
1012 }
223a76b2
MH
1013 /*
1014 * TODO: Since unoptimization and real disarming will be done by
1015 * the worker thread, we can not check whether another probe are
1016 * unoptimized because of this probe here. It should be re-optimized
1017 * by the worker thread.
1018 */
afd66255
MH
1019}
1020
1021#else /* !CONFIG_OPTPROBES */
1022
1023#define optimize_kprobe(p) do {} while (0)
6274de49 1024#define unoptimize_kprobe(p, f) do {} while (0)
afd66255
MH
1025#define kill_optimized_kprobe(p) do {} while (0)
1026#define prepare_optimized_kprobe(p) do {} while (0)
1027#define try_to_optimize_kprobe(p) do {} while (0)
1028#define __arm_kprobe(p) arch_arm_kprobe(p)
6274de49
MH
1029#define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
1030#define kprobe_disarmed(p) kprobe_disabled(p)
1031#define wait_for_kprobe_optimizer() do {} while (0)
afd66255 1032
819319fc 1033static int reuse_unused_kprobe(struct kprobe *ap)
0490cd1f 1034{
819319fc
MH
1035 /*
1036 * If the optimized kprobe is NOT supported, the aggr kprobe is
1037 * released at the same time that the last aggregated kprobe is
1038 * unregistered.
1039 * Thus there should be no chance to reuse unused kprobe.
1040 */
9c89bb8e 1041 WARN_ON_ONCE(1);
819319fc 1042 return -EINVAL;
0490cd1f
MH
1043}
1044
55479f64 1045static void free_aggr_kprobe(struct kprobe *p)
afd66255 1046{
6274de49 1047 arch_remove_kprobe(p);
afd66255
MH
1048 kfree(p);
1049}
1050
55479f64 1051static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
afd66255
MH
1052{
1053 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
1054}
1055#endif /* CONFIG_OPTPROBES */
1056
e7dbfe34 1057#ifdef CONFIG_KPROBES_ON_FTRACE
ae6aa16f 1058static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
0bc11ed5
MH
1059 .func = kprobe_ftrace_handler,
1060 .flags = FTRACE_OPS_FL_SAVE_REGS,
1061};
1062
1063static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = {
e5253896 1064 .func = kprobe_ftrace_handler,
1d70be34 1065 .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
ae6aa16f 1066};
0bc11ed5
MH
1067
1068static int kprobe_ipmodify_enabled;
ae6aa16f 1069static int kprobe_ftrace_enabled;
1a7d0890 1070bool kprobe_ftrace_disabled;
ae6aa16f 1071
0bc11ed5
MH
1072static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1073 int *cnt)
ae6aa16f 1074{
ed9492df 1075 int ret;
ae6aa16f 1076
57d4e317
MH
1077 lockdep_assert_held(&kprobe_mutex);
1078
0bc11ed5 1079 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0);
9c89bb8e 1080 if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error %d)\n", p->addr, ret))
12310e34 1081 return ret;
12310e34 1082
0bc11ed5
MH
1083 if (*cnt == 0) {
1084 ret = register_ftrace_function(ops);
9c89bb8e 1085 if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret))
12310e34 1086 goto err_ftrace;
ae6aa16f 1087 }
12310e34 1088
0bc11ed5 1089 (*cnt)++;
12310e34
JY
1090 return ret;
1091
1092err_ftrace:
1093 /*
0bc11ed5
MH
1094 * At this point, sinec ops is not registered, we should be sefe from
1095 * registering empty filter.
12310e34 1096 */
0bc11ed5 1097 ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
12310e34 1098 return ret;
ae6aa16f
MH
1099}
1100
0bc11ed5
MH
1101static int arm_kprobe_ftrace(struct kprobe *p)
1102{
1103 bool ipmodify = (p->post_handler != NULL);
1104
1105 return __arm_kprobe_ftrace(p,
1106 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1107 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1108}
1109
0bc11ed5
MH
1110static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1111 int *cnt)
ae6aa16f 1112{
ed9492df 1113 int ret;
ae6aa16f 1114
57d4e317
MH
1115 lockdep_assert_held(&kprobe_mutex);
1116
0bc11ed5
MH
1117 if (*cnt == 1) {
1118 ret = unregister_ftrace_function(ops);
9c89bb8e 1119 if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (error %d)\n", ret))
297f9233 1120 return ret;
ae6aa16f 1121 }
297f9233 1122
0bc11ed5 1123 (*cnt)--;
297f9233 1124
0bc11ed5 1125 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
9c89bb8e 1126 WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (error %d)\n",
4458515b 1127 p->addr, ret);
297f9233 1128 return ret;
ae6aa16f 1129}
0bc11ed5
MH
1130
1131static int disarm_kprobe_ftrace(struct kprobe *p)
1132{
1133 bool ipmodify = (p->post_handler != NULL);
1134
1135 return __disarm_kprobe_ftrace(p,
1136 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1137 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1138}
1a7d0890 1139
4b377b48 1140void kprobe_ftrace_kill(void)
1a7d0890
SB
1141{
1142 kprobe_ftrace_disabled = true;
1143}
e7dbfe34 1144#else /* !CONFIG_KPROBES_ON_FTRACE */
10de795a
MS
1145static inline int arm_kprobe_ftrace(struct kprobe *p)
1146{
1147 return -ENODEV;
1148}
1149
1150static inline int disarm_kprobe_ftrace(struct kprobe *p)
1151{
1152 return -ENODEV;
1153}
ae6aa16f
MH
1154#endif
1155
02afb8d6
PA
1156static int prepare_kprobe(struct kprobe *p)
1157{
1158 /* Must ensure p->addr is really on ftrace */
1159 if (kprobe_ftrace(p))
1160 return arch_prepare_kprobe_ftrace(p);
1161
1162 return arch_prepare_kprobe(p);
1163}
1164
12310e34 1165static int arm_kprobe(struct kprobe *kp)
201517a7 1166{
12310e34
JY
1167 if (unlikely(kprobe_ftrace(kp)))
1168 return arm_kprobe_ftrace(kp);
1169
2d1e38f5 1170 cpus_read_lock();
201517a7 1171 mutex_lock(&text_mutex);
afd66255 1172 __arm_kprobe(kp);
201517a7 1173 mutex_unlock(&text_mutex);
2d1e38f5 1174 cpus_read_unlock();
12310e34
JY
1175
1176 return 0;
201517a7
MH
1177}
1178
297f9233 1179static int disarm_kprobe(struct kprobe *kp, bool reopt)
201517a7 1180{
297f9233
JY
1181 if (unlikely(kprobe_ftrace(kp)))
1182 return disarm_kprobe_ftrace(kp);
2d1e38f5
TG
1183
1184 cpus_read_lock();
201517a7 1185 mutex_lock(&text_mutex);
ae6aa16f 1186 __disarm_kprobe(kp, reopt);
201517a7 1187 mutex_unlock(&text_mutex);
2d1e38f5 1188 cpus_read_unlock();
297f9233
JY
1189
1190 return 0;
201517a7
MH
1191}
1192
64f562c6
AM
1193/*
1194 * Aggregate handlers for multiple kprobes support - these handlers
1195 * take care of invoking the individual kprobe handlers on p->list
1196 */
820aede0 1197static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
64f562c6
AM
1198{
1199 struct kprobe *kp;
1200
3516a460 1201 list_for_each_entry_rcu(kp, &p->list, list) {
de5bd88d 1202 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
e6584523 1203 set_kprobe_instance(kp);
8b0914ea
PP
1204 if (kp->pre_handler(kp, regs))
1205 return 1;
64f562c6 1206 }
e6584523 1207 reset_kprobe_instance();
64f562c6
AM
1208 }
1209 return 0;
1210}
820aede0 1211NOKPROBE_SYMBOL(aggr_pre_handler);
64f562c6 1212
820aede0
MH
1213static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1214 unsigned long flags)
64f562c6
AM
1215{
1216 struct kprobe *kp;
1217
3516a460 1218 list_for_each_entry_rcu(kp, &p->list, list) {
de5bd88d 1219 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
e6584523 1220 set_kprobe_instance(kp);
64f562c6 1221 kp->post_handler(kp, regs, flags);
e6584523 1222 reset_kprobe_instance();
64f562c6
AM
1223 }
1224 }
64f562c6 1225}
820aede0 1226NOKPROBE_SYMBOL(aggr_post_handler);
64f562c6 1227
223a76b2 1228/* Walks the list and increments 'nmissed' if 'p' has child probes. */
820aede0 1229void kprobes_inc_nmissed_count(struct kprobe *p)
bf8d5c52
KA
1230{
1231 struct kprobe *kp;
223a76b2 1232
afd66255 1233 if (!kprobe_aggrprobe(p)) {
bf8d5c52
KA
1234 p->nmissed++;
1235 } else {
1236 list_for_each_entry_rcu(kp, &p->list, list)
1237 kp->nmissed++;
1238 }
bf8d5c52 1239}
820aede0 1240NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
bf8d5c52 1241
73f9b911
MH
1242static struct kprobe kprobe_busy = {
1243 .addr = (void *) get_kprobe,
1244};
1245
1246void kprobe_busy_begin(void)
1247{
1248 struct kprobe_ctlblk *kcb;
1249
1250 preempt_disable();
1251 __this_cpu_write(current_kprobe, &kprobe_busy);
1252 kcb = get_kprobe_ctlblk();
1253 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1254}
1255
1256void kprobe_busy_end(void)
1257{
1258 __this_cpu_write(current_kprobe, NULL);
1259 preempt_enable();
1260}
1261
223a76b2 1262/* Add the new probe to 'ap->list'. */
55479f64 1263static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
8b0914ea 1264{
059053a2 1265 if (p->post_handler)
6274de49 1266 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
afd66255 1267
059053a2 1268 list_add_rcu(&p->list, &ap->list);
b918e5e6
MH
1269 if (p->post_handler && !ap->post_handler)
1270 ap->post_handler = aggr_post_handler;
de5bd88d 1271
8b0914ea
PP
1272 return 0;
1273}
1274
64f562c6 1275/*
223a76b2
MH
1276 * Fill in the required fields of the aggregator kprobe. Replace the
1277 * earlier kprobe in the hlist with the aggregator kprobe.
64f562c6 1278 */
55479f64 1279static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
64f562c6 1280{
223a76b2 1281 /* Copy the insn slot of 'p' to 'ap'. */
8b0914ea 1282 copy_kprobe(p, ap);
a9ad965e 1283 flush_insn_slot(ap);
64f562c6 1284 ap->addr = p->addr;
afd66255 1285 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
64f562c6 1286 ap->pre_handler = aggr_pre_handler;
e8386a0c
MH
1287 /* We don't care the kprobe which has gone. */
1288 if (p->post_handler && !kprobe_gone(p))
36721656 1289 ap->post_handler = aggr_post_handler;
64f562c6
AM
1290
1291 INIT_LIST_HEAD(&ap->list);
afd66255 1292 INIT_HLIST_NODE(&ap->hlist);
64f562c6 1293
afd66255 1294 list_add_rcu(&p->list, &ap->list);
adad0f33 1295 hlist_replace_rcu(&p->hlist, &ap->hlist);
64f562c6
AM
1296}
1297
1298/*
223a76b2 1299 * This registers the second or subsequent kprobe at the same address.
64f562c6 1300 */
55479f64 1301static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
64f562c6
AM
1302{
1303 int ret = 0;
6d8e40a8 1304 struct kprobe *ap = orig_p;
64f562c6 1305
2d1e38f5
TG
1306 cpus_read_lock();
1307
25764288
MH
1308 /* For preparing optimization, jump_label_text_reserved() is called */
1309 jump_label_lock();
25764288
MH
1310 mutex_lock(&text_mutex);
1311
6d8e40a8 1312 if (!kprobe_aggrprobe(orig_p)) {
223a76b2 1313 /* If 'orig_p' is not an 'aggr_kprobe', create new one. */
6d8e40a8 1314 ap = alloc_aggr_kprobe(orig_p);
25764288
MH
1315 if (!ap) {
1316 ret = -ENOMEM;
1317 goto out;
1318 }
6d8e40a8 1319 init_aggr_kprobe(ap, orig_p);
819319fc 1320 } else if (kprobe_unused(ap)) {
0490cd1f 1321 /* This probe is going to die. Rescue it */
819319fc
MH
1322 ret = reuse_unused_kprobe(ap);
1323 if (ret)
1324 goto out;
1325 }
b918e5e6
MH
1326
1327 if (kprobe_gone(ap)) {
e8386a0c
MH
1328 /*
1329 * Attempting to insert new probe at the same location that
1330 * had a probe in the module vaddr area which already
1331 * freed. So, the instruction slot has already been
1332 * released. We need a new slot for the new probe.
1333 */
b918e5e6 1334 ret = arch_prepare_kprobe(ap);
e8386a0c 1335 if (ret)
b918e5e6
MH
1336 /*
1337 * Even if fail to allocate new slot, don't need to
223a76b2
MH
1338 * free the 'ap'. It will be used next time, or
1339 * freed by unregister_kprobe().
b918e5e6 1340 */
25764288 1341 goto out;
de5bd88d 1342
afd66255
MH
1343 /* Prepare optimized instructions if possible. */
1344 prepare_optimized_kprobe(ap);
1345
e8386a0c 1346 /*
de5bd88d
MH
1347 * Clear gone flag to prevent allocating new slot again, and
1348 * set disabled flag because it is not armed yet.
e8386a0c 1349 */
de5bd88d
MH
1350 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1351 | KPROBE_FLAG_DISABLED;
e8386a0c 1352 }
b918e5e6 1353
223a76b2 1354 /* Copy the insn slot of 'p' to 'ap'. */
b918e5e6 1355 copy_kprobe(ap, p);
25764288
MH
1356 ret = add_new_kprobe(ap, p);
1357
1358out:
1359 mutex_unlock(&text_mutex);
25764288 1360 jump_label_unlock();
2d1e38f5 1361 cpus_read_unlock();
25764288
MH
1362
1363 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1364 ap->flags &= ~KPROBE_FLAG_DISABLED;
12310e34 1365 if (!kprobes_all_disarmed) {
25764288 1366 /* Arm the breakpoint again. */
12310e34
JY
1367 ret = arm_kprobe(ap);
1368 if (ret) {
1369 ap->flags |= KPROBE_FLAG_DISABLED;
1370 list_del_rcu(&p->list);
ae8b7ce7 1371 synchronize_rcu();
12310e34
JY
1372 }
1373 }
25764288
MH
1374 }
1375 return ret;
64f562c6
AM
1376}
1377
be8f2743
MH
1378bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1379{
223a76b2 1380 /* The '__kprobes' functions and entry code must not be probed. */
be8f2743
MH
1381 return addr >= (unsigned long)__kprobes_text_start &&
1382 addr < (unsigned long)__kprobes_text_end;
1383}
1384
6143c6fb 1385static bool __within_kprobe_blacklist(unsigned long addr)
d0aaff97 1386{
376e2424 1387 struct kprobe_blacklist_entry *ent;
3d8d996e 1388
be8f2743 1389 if (arch_within_kprobe_blacklist(addr))
376e2424 1390 return true;
3d8d996e 1391 /*
223a76b2
MH
1392 * If 'kprobe_blacklist' is defined, check the address and
1393 * reject any probe registration in the prohibited area.
3d8d996e 1394 */
376e2424
MH
1395 list_for_each_entry(ent, &kprobe_blacklist, list) {
1396 if (addr >= ent->start_addr && addr < ent->end_addr)
1397 return true;
3d8d996e 1398 }
6143c6fb
MH
1399 return false;
1400}
376e2424 1401
6143c6fb
MH
1402bool within_kprobe_blacklist(unsigned long addr)
1403{
1404 char symname[KSYM_NAME_LEN], *p;
1405
1406 if (__within_kprobe_blacklist(addr))
1407 return true;
1408
1409 /* Check if the address is on a suffixed-symbol */
1410 if (!lookup_symbol_name(addr, symname)) {
1411 p = strchr(symname, '.');
1412 if (!p)
1413 return false;
1414 *p = '\0';
1415 addr = (unsigned long)kprobe_lookup_name(symname, 0);
1416 if (addr)
1417 return __within_kprobe_blacklist(addr);
1418 }
376e2424 1419 return false;
d0aaff97
PP
1420}
1421
cc66bb91
PZ
1422/*
1423 * arch_adjust_kprobe_addr - adjust the address
1424 * @addr: symbol base address
1425 * @offset: offset within the symbol
1426 * @on_func_entry: was this @addr+@offset on the function entry
1427 *
1428 * Typically returns @addr + @offset, except for special cases where the
1429 * function might be prefixed by a CFI landing pad, in that case any offset
1430 * inside the landing pad is mapped to the first 'real' instruction of the
1431 * symbol.
1432 *
1433 * Specifically, for things like IBT/BTI, skip the resp. ENDBR/BTI.C
1434 * instruction at +0.
1435 */
1436kprobe_opcode_t *__weak arch_adjust_kprobe_addr(unsigned long addr,
1437 unsigned long offset,
1438 bool *on_func_entry)
1439{
1440 *on_func_entry = !offset;
1441 return (kprobe_opcode_t *)(addr + offset);
1442}
1443
b2a5cd69 1444/*
223a76b2 1445 * If 'symbol_name' is specified, look it up and add the 'offset'
b2a5cd69 1446 * to it. This way, we can specify a relative address to a symbol.
bc81d48d
MH
1447 * This returns encoded errors if it fails to look up symbol or invalid
1448 * combination of parameters.
b2a5cd69 1449 */
cc66bb91
PZ
1450static kprobe_opcode_t *
1451_kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
1452 unsigned long offset, bool *on_func_entry)
b2a5cd69 1453{
1d585e70 1454 if ((symbol_name && addr) || (!symbol_name && !addr))
bc81d48d
MH
1455 goto invalid;
1456
1d585e70 1457 if (symbol_name) {
cc66bb91
PZ
1458 /*
1459 * Input: @sym + @offset
1460 * Output: @addr + @offset
1461 *
1462 * NOTE: kprobe_lookup_name() does *NOT* fold the offset
1463 * argument into it's output!
1464 */
7246f600 1465 addr = kprobe_lookup_name(symbol_name, offset);
bc81d48d
MH
1466 if (!addr)
1467 return ERR_PTR(-ENOENT);
b2a5cd69
MH
1468 }
1469
cc66bb91
PZ
1470 /*
1471 * So here we have @addr + @offset, displace it into a new
1472 * @addr' + @offset' where @addr' is the symbol start address.
1473 */
1474 addr = (void *)addr + offset;
1475 if (!kallsyms_lookup_size_offset((unsigned long)addr, NULL, &offset))
1476 return ERR_PTR(-ENOENT);
1477 addr = (void *)addr - offset;
1478
1479 /*
1480 * Then ask the architecture to re-combine them, taking care of
1481 * magical function entry details while telling us if this was indeed
1482 * at the start of the function.
1483 */
1484 addr = arch_adjust_kprobe_addr((unsigned long)addr, offset, on_func_entry);
bc81d48d
MH
1485 if (addr)
1486 return addr;
1487
1488invalid:
1489 return ERR_PTR(-EINVAL);
b2a5cd69
MH
1490}
1491
1d585e70
NR
1492static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1493{
cc66bb91
PZ
1494 bool on_func_entry;
1495 return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
1d585e70
NR
1496}
1497
223a76b2
MH
1498/*
1499 * Check the 'p' is valid and return the aggregator kprobe
1500 * at the same address.
1501 */
55479f64 1502static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1f0ab409 1503{
6d8e40a8 1504 struct kprobe *ap, *list_p;
1f0ab409 1505
7e6a71d8
MH
1506 lockdep_assert_held(&kprobe_mutex);
1507
6d8e40a8
MH
1508 ap = get_kprobe(p->addr);
1509 if (unlikely(!ap))
1f0ab409
AM
1510 return NULL;
1511
6d8e40a8 1512 if (p != ap) {
7e6a71d8 1513 list_for_each_entry(list_p, &ap->list, list)
1f0ab409
AM
1514 if (list_p == p)
1515 /* kprobe p is a valid probe */
1516 goto valid;
1517 return NULL;
1518 }
1519valid:
6d8e40a8 1520 return ap;
1f0ab409
AM
1521}
1522
33b1d146
MH
1523/*
1524 * Warn and return error if the kprobe is being re-registered since
1525 * there must be a software bug.
1526 */
1527static inline int warn_kprobe_rereg(struct kprobe *p)
1f0ab409
AM
1528{
1529 int ret = 0;
1f0ab409
AM
1530
1531 mutex_lock(&kprobe_mutex);
33b1d146 1532 if (WARN_ON_ONCE(__get_valid_kprobe(p)))
1f0ab409
AM
1533 ret = -EINVAL;
1534 mutex_unlock(&kprobe_mutex);
6d8e40a8 1535
1f0ab409
AM
1536 return ret;
1537}
1538
4402deae 1539static int check_ftrace_location(struct kprobe *p)
1da177e4 1540{
aebfd125 1541 unsigned long addr = (unsigned long)p->addr;
ae6aa16f 1542
aebfd125 1543 if (ftrace_location(addr) == addr) {
e7dbfe34 1544#ifdef CONFIG_KPROBES_ON_FTRACE
ae6aa16f 1545 p->flags |= KPROBE_FLAG_FTRACE;
e7dbfe34 1546#else /* !CONFIG_KPROBES_ON_FTRACE */
ae6aa16f
MH
1547 return -EINVAL;
1548#endif
1549 }
f7f242ff
HC
1550 return 0;
1551}
1552
de02f2ac
MHG
1553static bool is_cfi_preamble_symbol(unsigned long addr)
1554{
1555 char symbuf[KSYM_NAME_LEN];
1556
1557 if (lookup_symbol_name(addr, symbuf))
1558 return false;
1559
1560 return str_has_prefix("__cfi_", symbuf) ||
1561 str_has_prefix("__pfx_", symbuf);
1562}
1563
f7f242ff
HC
1564static int check_kprobe_address_safe(struct kprobe *p,
1565 struct module **probed_mod)
1566{
1567 int ret;
1f0ab409 1568
4402deae 1569 ret = check_ftrace_location(p);
f7f242ff
HC
1570 if (ret)
1571 return ret;
91bad2f8 1572 jump_label_lock();
de31c3ca 1573 preempt_disable();
f7fa6ef0 1574
325f3fb5
ZY
1575 /* Ensure the address is in a text area, and find a module if exists. */
1576 *probed_mod = NULL;
1577 if (!core_kernel_text((unsigned long) p->addr)) {
1578 *probed_mod = __module_text_address((unsigned long) p->addr);
1579 if (!(*probed_mod)) {
1580 ret = -EINVAL;
1581 goto out;
1582 }
1583 }
1584 /* Ensure it is not in reserved area. */
1585 if (in_gate_area_no_mm((unsigned long) p->addr) ||
376e2424 1586 within_kprobe_blacklist((unsigned long) p->addr) ||
e336b402 1587 jump_label_text_reserved(p->addr, p->addr) ||
fa68bd09 1588 static_call_text_reserved(p->addr, p->addr) ||
de02f2ac
MHG
1589 find_bug((unsigned long)p->addr) ||
1590 is_cfi_preamble_symbol((unsigned long)p->addr)) {
f986a499 1591 ret = -EINVAL;
f7fa6ef0 1592 goto out;
f986a499 1593 }
b3e55c72 1594
325f3fb5 1595 /* Get module refcount and reject __init functions for loaded modules. */
7582b7be 1596 if (IS_ENABLED(CONFIG_MODULES) && *probed_mod) {
6f716acd 1597 /*
e8386a0c
MH
1598 * We must hold a refcount of the probed module while updating
1599 * its code to prohibit unexpected unloading.
df019b1d 1600 */
f7fa6ef0
MH
1601 if (unlikely(!try_module_get(*probed_mod))) {
1602 ret = -ENOENT;
1603 goto out;
1604 }
de31c3ca 1605
f24659d9 1606 /*
223a76b2 1607 * If the module freed '.init.text', we couldn't insert
f24659d9
MH
1608 * kprobes in there.
1609 */
f7fa6ef0 1610 if (within_module_init((unsigned long)p->addr, *probed_mod) &&
7582b7be 1611 !module_is_coming(*probed_mod)) {
f7fa6ef0
MH
1612 module_put(*probed_mod);
1613 *probed_mod = NULL;
1614 ret = -ENOENT;
f24659d9 1615 }
df019b1d 1616 }
7582b7be 1617
f7fa6ef0 1618out:
a189d035 1619 preempt_enable();
de31c3ca 1620 jump_label_unlock();
1da177e4 1621
f7fa6ef0
MH
1622 return ret;
1623}
1624
55479f64 1625int register_kprobe(struct kprobe *p)
f7fa6ef0
MH
1626{
1627 int ret;
1628 struct kprobe *old_p;
1629 struct module *probed_mod;
1630 kprobe_opcode_t *addr;
bf7a87f1 1631 bool on_func_entry;
f7fa6ef0
MH
1632
1633 /* Adjust probe address from symbol */
bf7a87f1 1634 addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
f7fa6ef0
MH
1635 if (IS_ERR(addr))
1636 return PTR_ERR(addr);
1637 p->addr = addr;
1638
33b1d146 1639 ret = warn_kprobe_rereg(p);
f7fa6ef0
MH
1640 if (ret)
1641 return ret;
1642
1643 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1644 p->flags &= KPROBE_FLAG_DISABLED;
3516a460 1645 p->nmissed = 0;
9861668f 1646 INIT_LIST_HEAD(&p->list);
afd66255 1647
f7fa6ef0
MH
1648 ret = check_kprobe_address_safe(p, &probed_mod);
1649 if (ret)
1650 return ret;
1651
1652 mutex_lock(&kprobe_mutex);
afd66255 1653
bf7a87f1
JO
1654 if (on_func_entry)
1655 p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY;
1656
64f562c6
AM
1657 old_p = get_kprobe(p->addr);
1658 if (old_p) {
223a76b2 1659 /* Since this may unoptimize 'old_p', locking 'text_mutex'. */
64f562c6 1660 ret = register_aggr_kprobe(old_p, p);
1da177e4
LT
1661 goto out;
1662 }
1da177e4 1663
2d1e38f5
TG
1664 cpus_read_lock();
1665 /* Prevent text modification */
1666 mutex_lock(&text_mutex);
ae6aa16f 1667 ret = prepare_kprobe(p);
25764288 1668 mutex_unlock(&text_mutex);
2d1e38f5 1669 cpus_read_unlock();
6f716acd 1670 if (ret)
afd66255 1671 goto out;
49a2a1b8 1672
64f562c6 1673 INIT_HLIST_NODE(&p->hlist);
3516a460 1674 hlist_add_head_rcu(&p->hlist,
1da177e4
LT
1675 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1676
12310e34
JY
1677 if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
1678 ret = arm_kprobe(p);
1679 if (ret) {
1680 hlist_del_rcu(&p->hlist);
ae8b7ce7 1681 synchronize_rcu();
12310e34
JY
1682 goto out;
1683 }
1684 }
afd66255
MH
1685
1686 /* Try to optimize kprobe */
1687 try_to_optimize_kprobe(p);
1da177e4 1688out:
7a7d1cf9 1689 mutex_unlock(&kprobe_mutex);
49a2a1b8 1690
e8386a0c 1691 if (probed_mod)
df019b1d 1692 module_put(probed_mod);
e8386a0c 1693
1da177e4
LT
1694 return ret;
1695}
99081ab5 1696EXPORT_SYMBOL_GPL(register_kprobe);
1da177e4 1697
223a76b2 1698/* Check if all probes on the 'ap' are disabled. */
29e8077a 1699static bool aggr_kprobe_disabled(struct kprobe *ap)
6f0f1dd7
MH
1700{
1701 struct kprobe *kp;
1702
7e6a71d8
MH
1703 lockdep_assert_held(&kprobe_mutex);
1704
1705 list_for_each_entry(kp, &ap->list, list)
6f0f1dd7
MH
1706 if (!kprobe_disabled(kp))
1707 /*
223a76b2
MH
1708 * Since there is an active probe on the list,
1709 * we can't disable this 'ap'.
6f0f1dd7 1710 */
29e8077a 1711 return false;
6f0f1dd7 1712
29e8077a 1713 return true;
6f0f1dd7
MH
1714}
1715
55479f64 1716static struct kprobe *__disable_kprobe(struct kprobe *p)
6f0f1dd7
MH
1717{
1718 struct kprobe *orig_p;
297f9233 1719 int ret;
6f0f1dd7 1720
57d4e317
MH
1721 lockdep_assert_held(&kprobe_mutex);
1722
6f0f1dd7
MH
1723 /* Get an original kprobe for return */
1724 orig_p = __get_valid_kprobe(p);
1725 if (unlikely(orig_p == NULL))
297f9233 1726 return ERR_PTR(-EINVAL);
6f0f1dd7
MH
1727
1728 if (!kprobe_disabled(p)) {
1729 /* Disable probe if it is a child probe */
1730 if (p != orig_p)
1731 p->flags |= KPROBE_FLAG_DISABLED;
1732
1733 /* Try to disarm and disable this/parent probe */
1734 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
69d54b91 1735 /*
9c80e799
KI
1736 * Don't be lazy here. Even if 'kprobes_all_disarmed'
1737 * is false, 'orig_p' might not have been armed yet.
1738 * Note arm_all_kprobes() __tries__ to arm all kprobes
1739 * on the best effort basis.
69d54b91 1740 */
9c80e799 1741 if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) {
297f9233
JY
1742 ret = disarm_kprobe(orig_p, true);
1743 if (ret) {
1744 p->flags &= ~KPROBE_FLAG_DISABLED;
1745 return ERR_PTR(ret);
1746 }
1747 }
6f0f1dd7
MH
1748 orig_p->flags |= KPROBE_FLAG_DISABLED;
1749 }
1750 }
1751
1752 return orig_p;
1753}
1754
de5bd88d
MH
1755/*
1756 * Unregister a kprobe without a scheduler synchronization.
1757 */
55479f64 1758static int __unregister_kprobe_top(struct kprobe *p)
de5bd88d 1759{
6d8e40a8 1760 struct kprobe *ap, *list_p;
de5bd88d 1761
6f0f1dd7
MH
1762 /* Disable kprobe. This will disarm it if needed. */
1763 ap = __disable_kprobe(p);
297f9233
JY
1764 if (IS_ERR(ap))
1765 return PTR_ERR(ap);
de5bd88d 1766
6f0f1dd7 1767 if (ap == p)
bf8f6e5b 1768 /*
6f0f1dd7
MH
1769 * This probe is an independent(and non-optimized) kprobe
1770 * (not an aggrprobe). Remove from the hash list.
bf8f6e5b 1771 */
6f0f1dd7
MH
1772 goto disarmed;
1773
1774 /* Following process expects this probe is an aggrprobe */
1775 WARN_ON(!kprobe_aggrprobe(ap));
1776
6274de49
MH
1777 if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1778 /*
1779 * !disarmed could be happen if the probe is under delayed
1780 * unoptimizing.
1781 */
6f0f1dd7
MH
1782 goto disarmed;
1783 else {
1784 /* If disabling probe has special handlers, update aggrprobe */
e8386a0c 1785 if (p->post_handler && !kprobe_gone(p)) {
7e6a71d8 1786 list_for_each_entry(list_p, &ap->list, list) {
9861668f
MH
1787 if ((list_p != p) && (list_p->post_handler))
1788 goto noclean;
1789 }
5dd7caf0
LH
1790 /*
1791 * For the kprobe-on-ftrace case, we keep the
1792 * post_handler setting to identify this aggrprobe
1793 * armed with kprobe_ipmodify_ops.
1794 */
1795 if (!kprobe_ftrace(ap))
1796 ap->post_handler = NULL;
9861668f
MH
1797 }
1798noclean:
6f0f1dd7
MH
1799 /*
1800 * Remove from the aggrprobe: this path will do nothing in
1801 * __unregister_kprobe_bottom().
1802 */
49a2a1b8 1803 list_del_rcu(&p->list);
6f0f1dd7
MH
1804 if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1805 /*
1806 * Try to optimize this probe again, because post
1807 * handler may have been changed.
1808 */
1809 optimize_kprobe(ap);
49a2a1b8 1810 }
9861668f 1811 return 0;
6f0f1dd7
MH
1812
1813disarmed:
1814 hlist_del_rcu(&ap->hlist);
1815 return 0;
9861668f 1816}
3516a460 1817
55479f64 1818static void __unregister_kprobe_bottom(struct kprobe *p)
9861668f 1819{
6d8e40a8 1820 struct kprobe *ap;
b3e55c72 1821
e8386a0c 1822 if (list_empty(&p->list))
6274de49 1823 /* This is an independent kprobe */
0498b635 1824 arch_remove_kprobe(p);
e8386a0c 1825 else if (list_is_singular(&p->list)) {
6274de49 1826 /* This is the last child of an aggrprobe */
6d8e40a8 1827 ap = list_entry(p->list.next, struct kprobe, list);
e8386a0c 1828 list_del(&p->list);
6d8e40a8 1829 free_aggr_kprobe(ap);
9861668f 1830 }
6274de49 1831 /* Otherwise, do nothing. */
9861668f
MH
1832}
1833
55479f64 1834int register_kprobes(struct kprobe **kps, int num)
9861668f
MH
1835{
1836 int i, ret = 0;
1837
1838 if (num <= 0)
1839 return -EINVAL;
1840 for (i = 0; i < num; i++) {
49ad2fd7 1841 ret = register_kprobe(kps[i]);
67dddaad
MH
1842 if (ret < 0) {
1843 if (i > 0)
1844 unregister_kprobes(kps, i);
9861668f 1845 break;
36721656 1846 }
49a2a1b8 1847 }
9861668f
MH
1848 return ret;
1849}
99081ab5 1850EXPORT_SYMBOL_GPL(register_kprobes);
9861668f 1851
55479f64 1852void unregister_kprobe(struct kprobe *p)
9861668f
MH
1853{
1854 unregister_kprobes(&p, 1);
1855}
99081ab5 1856EXPORT_SYMBOL_GPL(unregister_kprobe);
9861668f 1857
55479f64 1858void unregister_kprobes(struct kprobe **kps, int num)
9861668f
MH
1859{
1860 int i;
1861
1862 if (num <= 0)
1863 return;
1864 mutex_lock(&kprobe_mutex);
1865 for (i = 0; i < num; i++)
1866 if (__unregister_kprobe_top(kps[i]) < 0)
1867 kps[i]->addr = NULL;
1868 mutex_unlock(&kprobe_mutex);
1869
ae8b7ce7 1870 synchronize_rcu();
9861668f
MH
1871 for (i = 0; i < num; i++)
1872 if (kps[i]->addr)
1873 __unregister_kprobe_bottom(kps[i]);
1da177e4 1874}
99081ab5 1875EXPORT_SYMBOL_GPL(unregister_kprobes);
1da177e4 1876
5f6bee34
NR
1877int __weak kprobe_exceptions_notify(struct notifier_block *self,
1878 unsigned long val, void *data)
fc62d020
NR
1879{
1880 return NOTIFY_DONE;
1881}
5f6bee34 1882NOKPROBE_SYMBOL(kprobe_exceptions_notify);
fc62d020 1883
1da177e4 1884static struct notifier_block kprobe_exceptions_nb = {
3d5631e0
AK
1885 .notifier_call = kprobe_exceptions_notify,
1886 .priority = 0x7fffffff /* we need to be notified first */
1887};
1888
9edddaa2 1889#ifdef CONFIG_KRETPROBES
66ada2cc 1890
73f9b911 1891#if !defined(CONFIG_KRETPROBE_ON_RETHOOK)
4bbd9345 1892
1893/* callbacks for objpool of kretprobe instances */
1894static int kretprobe_init_inst(void *nod, void *context)
1895{
1896 struct kretprobe_instance *ri = nod;
1897
1898 ri->rph = context;
1899 return 0;
1900}
1901static int kretprobe_fini_pool(struct objpool_head *head, void *context)
1902{
1903 kfree(context);
1904 return 0;
1905}
1906
43994049
MH
1907static void free_rp_inst_rcu(struct rcu_head *head)
1908{
1909 struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu);
4bbd9345 1910 struct kretprobe_holder *rph = ri->rph;
43994049 1911
4bbd9345 1912 objpool_drop(ri, &rph->pool);
43994049
MH
1913}
1914NOKPROBE_SYMBOL(free_rp_inst_rcu);
1915
1916static void recycle_rp_inst(struct kretprobe_instance *ri)
1917{
1918 struct kretprobe *rp = get_kretprobe(ri);
1919
1920 if (likely(rp))
4bbd9345 1921 objpool_push(ri, &rp->rph->pool);
43994049
MH
1922 else
1923 call_rcu(&ri->rcu, free_rp_inst_rcu);
1924}
1925NOKPROBE_SYMBOL(recycle_rp_inst);
1926
1927/*
1928 * This function is called from delayed_put_task_struct() when a task is
1929 * dead and cleaned up to recycle any kretprobe instances associated with
1930 * this task. These left over instances represent probed functions that
1931 * have been called but will never return.
1932 */
1933void kprobe_flush_task(struct task_struct *tk)
1934{
1935 struct kretprobe_instance *ri;
1936 struct llist_node *node;
1937
1938 /* Early boot, not yet initialized. */
1939 if (unlikely(!kprobes_initialized))
1940 return;
1941
1942 kprobe_busy_begin();
1943
1944 node = __llist_del_all(&tk->kretprobe_instances);
1945 while (node) {
1946 ri = container_of(node, struct kretprobe_instance, llist);
1947 node = node->next;
1948
1949 recycle_rp_inst(ri);
1950 }
1951
1952 kprobe_busy_end();
1953}
1954NOKPROBE_SYMBOL(kprobe_flush_task);
1955
1956static inline void free_rp_inst(struct kretprobe *rp)
1957{
4bbd9345 1958 struct kretprobe_holder *rph = rp->rph;
43994049 1959
4bbd9345 1960 if (!rph)
1961 return;
1962 rp->rph = NULL;
1963 objpool_fini(&rph->pool);
43994049
MH
1964}
1965
03bac0df
MH
1966/* This assumes the 'tsk' is the current task or the is not running. */
1967static kprobe_opcode_t *__kretprobe_find_ret_addr(struct task_struct *tsk,
1968 struct llist_node **cur)
3d7e3382 1969{
d741bf41 1970 struct kretprobe_instance *ri = NULL;
03bac0df
MH
1971 struct llist_node *node = *cur;
1972
1973 if (!node)
1974 node = tsk->kretprobe_instances.first;
1975 else
1976 node = node->next;
66ada2cc 1977
d741bf41
PZ
1978 while (node) {
1979 ri = container_of(node, struct kretprobe_instance, llist);
96fed8ac 1980 if (ri->ret_addr != kretprobe_trampoline_addr()) {
03bac0df
MH
1981 *cur = node;
1982 return ri->ret_addr;
d741bf41 1983 }
d741bf41 1984 node = node->next;
66ada2cc 1985 }
03bac0df 1986 return NULL;
3d7e3382 1987}
03bac0df 1988NOKPROBE_SYMBOL(__kretprobe_find_ret_addr);
1da177e4 1989
03bac0df
MH
1990/**
1991 * kretprobe_find_ret_addr -- Find correct return address modified by kretprobe
1992 * @tsk: Target task
1993 * @fp: A frame pointer
1994 * @cur: a storage of the loop cursor llist_node pointer for next call
1995 *
1996 * Find the correct return address modified by a kretprobe on @tsk in unsigned
1997 * long type. If it finds the return address, this returns that address value,
1998 * or this returns 0.
1999 * The @tsk must be 'current' or a task which is not running. @fp is a hint
2000 * to get the currect return address - which is compared with the
2001 * kretprobe_instance::fp field. The @cur is a loop cursor for searching the
2002 * kretprobe return addresses on the @tsk. The '*@cur' should be NULL at the
2003 * first call, but '@cur' itself must NOT NULL.
2004 */
2005unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
2006 struct llist_node **cur)
2007{
9efd24ec 2008 struct kretprobe_instance *ri;
03bac0df
MH
2009 kprobe_opcode_t *ret;
2010
2011 if (WARN_ON_ONCE(!cur))
2012 return 0;
66ada2cc 2013
03bac0df
MH
2014 do {
2015 ret = __kretprobe_find_ret_addr(tsk, cur);
2016 if (!ret)
2017 break;
2018 ri = container_of(*cur, struct kretprobe_instance, llist);
2019 } while (ri->fp != fp);
2020
2021 return (unsigned long)ret;
2022}
2023NOKPROBE_SYMBOL(kretprobe_find_ret_addr);
2024
bf094cff
MH
2025void __weak arch_kretprobe_fixup_return(struct pt_regs *regs,
2026 kprobe_opcode_t *correct_ret_addr)
2027{
2028 /*
2029 * Do nothing by default. Please fill this to update the fake return
2030 * address on the stack with the correct one on each arch if possible.
2031 */
2032}
66ada2cc
MH
2033
2034unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
66ada2cc
MH
2035 void *frame_pointer)
2036{
d741bf41 2037 struct kretprobe_instance *ri = NULL;
03bac0df 2038 struct llist_node *first, *node = NULL;
e1164787 2039 kprobe_opcode_t *correct_ret_addr;
d741bf41 2040 struct kretprobe *rp;
66ada2cc 2041
03bac0df
MH
2042 /* Find correct address and all nodes for this frame. */
2043 correct_ret_addr = __kretprobe_find_ret_addr(current, &node);
2044 if (!correct_ret_addr) {
2045 pr_err("kretprobe: Return address not found, not execute handler. Maybe there is a bug in the kernel.\n");
2046 BUG_ON(1);
66ada2cc
MH
2047 }
2048
df91c5bc
MH
2049 /*
2050 * Set the return address as the instruction pointer, because if the
2051 * user handler calls stack_trace_save_regs() with this 'regs',
2052 * the stack trace will start from the instruction pointer.
2053 */
2054 instruction_pointer_set(regs, (unsigned long)correct_ret_addr);
66ada2cc 2055
03bac0df
MH
2056 /* Run the user handler of the nodes. */
2057 first = current->kretprobe_instances.first;
d741bf41
PZ
2058 while (first) {
2059 ri = container_of(first, struct kretprobe_instance, llist);
03bac0df
MH
2060
2061 if (WARN_ON_ONCE(ri->fp != frame_pointer))
2062 break;
66ada2cc 2063
d741bf41
PZ
2064 rp = get_kretprobe(ri);
2065 if (rp && rp->handler) {
66ada2cc
MH
2066 struct kprobe *prev = kprobe_running();
2067
d741bf41 2068 __this_cpu_write(current_kprobe, &rp->kp);
66ada2cc 2069 ri->ret_addr = correct_ret_addr;
d741bf41 2070 rp->handler(ri, regs);
66ada2cc
MH
2071 __this_cpu_write(current_kprobe, prev);
2072 }
03bac0df
MH
2073 if (first == node)
2074 break;
2075
2076 first = first->next;
2077 }
2078
bf094cff
MH
2079 arch_kretprobe_fixup_return(regs, correct_ret_addr);
2080
03bac0df
MH
2081 /* Unlink all nodes for this frame. */
2082 first = current->kretprobe_instances.first;
2083 current->kretprobe_instances.first = node->next;
2084 node->next = NULL;
2085
2086 /* Recycle free instances. */
2087 while (first) {
2088 ri = container_of(first, struct kretprobe_instance, llist);
2089 first = first->next;
66ada2cc 2090
b3388178 2091 recycle_rp_inst(ri);
66ada2cc
MH
2092 }
2093
66ada2cc
MH
2094 return (unsigned long)correct_ret_addr;
2095}
2096NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
2097
e65cefe8
AB
2098/*
2099 * This kprobe pre_handler is registered with every kretprobe. When probe
2100 * hits it will set up the return probe.
2101 */
820aede0 2102static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
e65cefe8
AB
2103{
2104 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
4bbd9345 2105 struct kretprobe_holder *rph = rp->rph;
ef53d9c5 2106 struct kretprobe_instance *ri;
e65cefe8 2107
4bbd9345 2108 ri = objpool_pop(&rph->pool);
2109 if (!ri) {
6e426e0f
PZ
2110 rp->nmissed++;
2111 return 0;
2112 }
4c4308cb 2113
6e426e0f 2114 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
4bbd9345 2115 objpool_push(ri, &rph->pool);
6e426e0f 2116 return 0;
ef53d9c5 2117 }
6e426e0f
PZ
2118
2119 arch_prepare_kretprobe(ri, regs);
2120
2121 __llist_add(&ri->llist, &current->kretprobe_instances);
2122
e65cefe8
AB
2123 return 0;
2124}
820aede0 2125NOKPROBE_SYMBOL(pre_handler_kretprobe);
73f9b911
MH
2126#else /* CONFIG_KRETPROBE_ON_RETHOOK */
2127/*
2128 * This kprobe pre_handler is registered with every kretprobe. When probe
2129 * hits it will set up the return probe.
2130 */
2131static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2132{
2133 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
2134 struct kretprobe_instance *ri;
2135 struct rethook_node *rhn;
2136
2137 rhn = rethook_try_get(rp->rh);
2138 if (!rhn) {
2139 rp->nmissed++;
2140 return 0;
2141 }
2142
2143 ri = container_of(rhn, struct kretprobe_instance, node);
2144
2145 if (rp->entry_handler && rp->entry_handler(ri, regs))
2146 rethook_recycle(rhn);
2147 else
2148 rethook_hook(rhn, regs, kprobe_ftrace(p));
2149
2150 return 0;
2151}
2152NOKPROBE_SYMBOL(pre_handler_kretprobe);
2153
2154static void kretprobe_rethook_handler(struct rethook_node *rh, void *data,
cb16330d 2155 unsigned long ret_addr,
73f9b911
MH
2156 struct pt_regs *regs)
2157{
2158 struct kretprobe *rp = (struct kretprobe *)data;
2159 struct kretprobe_instance *ri;
2160 struct kprobe_ctlblk *kcb;
2161
2162 /* The data must NOT be null. This means rethook data structure is broken. */
1d661ed5 2163 if (WARN_ON_ONCE(!data) || !rp->handler)
73f9b911
MH
2164 return;
2165
2166 __this_cpu_write(current_kprobe, &rp->kp);
2167 kcb = get_kprobe_ctlblk();
2168 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
2169
2170 ri = container_of(rh, struct kretprobe_instance, node);
2171 rp->handler(ri, regs);
2172
2173 __this_cpu_write(current_kprobe, NULL);
2174}
2175NOKPROBE_SYMBOL(kretprobe_rethook_handler);
2176
2177#endif /* !CONFIG_KRETPROBE_ON_RETHOOK */
e65cefe8 2178
97c753e6
MH
2179/**
2180 * kprobe_on_func_entry() -- check whether given address is function entry
2181 * @addr: Target address
2182 * @sym: Target symbol name
2183 * @offset: The offset from the symbol or the address
2184 *
2185 * This checks whether the given @addr+@offset or @sym+@offset is on the
2186 * function entry address or not.
2187 * This returns 0 if it is the function entry, or -EINVAL if it is not.
2188 * And also it returns -ENOENT if it fails the symbol or address lookup.
2189 * Caller must pass @addr or @sym (either one must be NULL), or this
2190 * returns -EINVAL.
2191 */
2192int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
1d585e70 2193{
cc66bb91
PZ
2194 bool on_func_entry;
2195 kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset, &on_func_entry);
1d585e70
NR
2196
2197 if (IS_ERR(kp_addr))
97c753e6 2198 return PTR_ERR(kp_addr);
1d585e70 2199
cc66bb91 2200 if (!on_func_entry)
97c753e6
MH
2201 return -EINVAL;
2202
2203 return 0;
1d585e70
NR
2204}
2205
55479f64 2206int register_kretprobe(struct kretprobe *rp)
b94cce92 2207{
97c753e6 2208 int ret;
b94cce92 2209 int i;
b2a5cd69 2210 void *addr;
90ec5e89 2211
97c753e6
MH
2212 ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
2213 if (ret)
2214 return ret;
f438d914 2215
223a76b2 2216 /* If only 'rp->kp.addr' is specified, check reregistering kprobes */
33b1d146 2217 if (rp->kp.addr && warn_kprobe_rereg(&rp->kp))
0188b878
WS
2218 return -EINVAL;
2219
f438d914 2220 if (kretprobe_blacklist_size) {
b2a5cd69 2221 addr = kprobe_addr(&rp->kp);
bc81d48d
MH
2222 if (IS_ERR(addr))
2223 return PTR_ERR(addr);
f438d914
MH
2224
2225 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2226 if (kretprobe_blacklist[i].addr == addr)
2227 return -EINVAL;
2228 }
2229 }
b94cce92 2230
6bbfa441
MH
2231 if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
2232 return -E2BIG;
2233
b94cce92 2234 rp->kp.pre_handler = pre_handler_kretprobe;
7522a842 2235 rp->kp.post_handler = NULL;
b94cce92
HN
2236
2237 /* Pre-allocate memory for max kretprobe instances */
3b7ddab8 2238 if (rp->maxactive <= 0)
c2ef6661 2239 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
3b7ddab8 2240
73f9b911 2241#ifdef CONFIG_KRETPROBE_ON_RETHOOK
4bbd9345 2242 rp->rh = rethook_alloc((void *)rp, kretprobe_rethook_handler,
2243 sizeof(struct kretprobe_instance) +
2244 rp->data_size, rp->maxactive);
2245 if (IS_ERR(rp->rh))
2246 return PTR_ERR(rp->rh);
73f9b911 2247
73f9b911
MH
2248 rp->nmissed = 0;
2249 /* Establish function entry probe point */
2250 ret = register_kprobe(&rp->kp);
2251 if (ret != 0) {
2252 rethook_free(rp->rh);
2253 rp->rh = NULL;
2254 }
2255#else /* !CONFIG_KRETPROBE_ON_RETHOOK */
d741bf41
PZ
2256 rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL);
2257 if (!rp->rph)
2258 return -ENOMEM;
2259
4bbd9345 2260 if (objpool_init(&rp->rph->pool, rp->maxactive, rp->data_size +
2261 sizeof(struct kretprobe_instance), GFP_KERNEL,
2262 rp->rph, kretprobe_init_inst, kretprobe_fini_pool)) {
2263 kfree(rp->rph);
2264 rp->rph = NULL;
2265 return -ENOMEM;
b94cce92 2266 }
d839a656 2267 rcu_assign_pointer(rp->rph->rp, rp);
b94cce92
HN
2268 rp->nmissed = 0;
2269 /* Establish function entry probe point */
49ad2fd7 2270 ret = register_kprobe(&rp->kp);
4a296e07 2271 if (ret != 0)
b94cce92 2272 free_rp_inst(rp);
73f9b911 2273#endif
b94cce92
HN
2274 return ret;
2275}
99081ab5 2276EXPORT_SYMBOL_GPL(register_kretprobe);
b94cce92 2277
55479f64 2278int register_kretprobes(struct kretprobe **rps, int num)
4a296e07
MH
2279{
2280 int ret = 0, i;
2281
2282 if (num <= 0)
2283 return -EINVAL;
2284 for (i = 0; i < num; i++) {
49ad2fd7 2285 ret = register_kretprobe(rps[i]);
67dddaad
MH
2286 if (ret < 0) {
2287 if (i > 0)
2288 unregister_kretprobes(rps, i);
4a296e07
MH
2289 break;
2290 }
2291 }
2292 return ret;
2293}
99081ab5 2294EXPORT_SYMBOL_GPL(register_kretprobes);
4a296e07 2295
55479f64 2296void unregister_kretprobe(struct kretprobe *rp)
4a296e07
MH
2297{
2298 unregister_kretprobes(&rp, 1);
2299}
99081ab5 2300EXPORT_SYMBOL_GPL(unregister_kretprobe);
4a296e07 2301
55479f64 2302void unregister_kretprobes(struct kretprobe **rps, int num)
4a296e07
MH
2303{
2304 int i;
2305
2306 if (num <= 0)
2307 return;
2308 mutex_lock(&kprobe_mutex);
d741bf41 2309 for (i = 0; i < num; i++) {
4a296e07
MH
2310 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
2311 rps[i]->kp.addr = NULL;
73f9b911
MH
2312#ifdef CONFIG_KRETPROBE_ON_RETHOOK
2313 rethook_free(rps[i]->rh);
2314#else
d839a656 2315 rcu_assign_pointer(rps[i]->rph->rp, NULL);
73f9b911 2316#endif
d741bf41 2317 }
4a296e07
MH
2318 mutex_unlock(&kprobe_mutex);
2319
ae8b7ce7 2320 synchronize_rcu();
4a296e07
MH
2321 for (i = 0; i < num; i++) {
2322 if (rps[i]->kp.addr) {
2323 __unregister_kprobe_bottom(&rps[i]->kp);
73f9b911 2324#ifndef CONFIG_KRETPROBE_ON_RETHOOK
d741bf41 2325 free_rp_inst(rps[i]);
73f9b911 2326#endif
4a296e07
MH
2327 }
2328 }
2329}
99081ab5 2330EXPORT_SYMBOL_GPL(unregister_kretprobes);
4a296e07 2331
9edddaa2 2332#else /* CONFIG_KRETPROBES */
55479f64 2333int register_kretprobe(struct kretprobe *rp)
b94cce92 2334{
223a76b2 2335 return -EOPNOTSUPP;
b94cce92 2336}
99081ab5 2337EXPORT_SYMBOL_GPL(register_kretprobe);
b94cce92 2338
55479f64 2339int register_kretprobes(struct kretprobe **rps, int num)
346fd59b 2340{
223a76b2 2341 return -EOPNOTSUPP;
346fd59b 2342}
99081ab5
MH
2343EXPORT_SYMBOL_GPL(register_kretprobes);
2344
55479f64 2345void unregister_kretprobe(struct kretprobe *rp)
b94cce92 2346{
4a296e07 2347}
99081ab5 2348EXPORT_SYMBOL_GPL(unregister_kretprobe);
b94cce92 2349
55479f64 2350void unregister_kretprobes(struct kretprobe **rps, int num)
4a296e07
MH
2351{
2352}
99081ab5 2353EXPORT_SYMBOL_GPL(unregister_kretprobes);
4c4308cb 2354
820aede0 2355static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
4a296e07
MH
2356{
2357 return 0;
b94cce92 2358}
820aede0 2359NOKPROBE_SYMBOL(pre_handler_kretprobe);
b94cce92 2360
4a296e07
MH
2361#endif /* CONFIG_KRETPROBES */
2362
e8386a0c 2363/* Set the kprobe gone and remove its instruction buffer. */
55479f64 2364static void kill_kprobe(struct kprobe *p)
e8386a0c
MH
2365{
2366 struct kprobe *kp;
de5bd88d 2367
7e6a71d8
MH
2368 lockdep_assert_held(&kprobe_mutex);
2369
0c76ef3f
LH
2370 /*
2371 * The module is going away. We should disarm the kprobe which
2372 * is using ftrace, because ftrace framework is still available at
2373 * 'MODULE_STATE_GOING' notification.
2374 */
2375 if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
2376 disarm_kprobe_ftrace(p);
2377
e8386a0c 2378 p->flags |= KPROBE_FLAG_GONE;
afd66255 2379 if (kprobe_aggrprobe(p)) {
e8386a0c
MH
2380 /*
2381 * If this is an aggr_kprobe, we have to list all the
2382 * chained probes and mark them GONE.
2383 */
7e6a71d8 2384 list_for_each_entry(kp, &p->list, list)
e8386a0c
MH
2385 kp->flags |= KPROBE_FLAG_GONE;
2386 p->post_handler = NULL;
afd66255 2387 kill_optimized_kprobe(p);
e8386a0c
MH
2388 }
2389 /*
2390 * Here, we can remove insn_slot safely, because no thread calls
2391 * the original probed function (which will be freed soon) any more.
2392 */
2393 arch_remove_kprobe(p);
2394}
2395
c0614829 2396/* Disable one kprobe */
55479f64 2397int disable_kprobe(struct kprobe *kp)
c0614829
MH
2398{
2399 int ret = 0;
297f9233 2400 struct kprobe *p;
c0614829
MH
2401
2402 mutex_lock(&kprobe_mutex);
2403
6f0f1dd7 2404 /* Disable this kprobe */
297f9233
JY
2405 p = __disable_kprobe(kp);
2406 if (IS_ERR(p))
2407 ret = PTR_ERR(p);
c0614829 2408
c0614829
MH
2409 mutex_unlock(&kprobe_mutex);
2410 return ret;
2411}
2412EXPORT_SYMBOL_GPL(disable_kprobe);
2413
2414/* Enable one kprobe */
55479f64 2415int enable_kprobe(struct kprobe *kp)
c0614829
MH
2416{
2417 int ret = 0;
2418 struct kprobe *p;
2419
2420 mutex_lock(&kprobe_mutex);
2421
2422 /* Check whether specified probe is valid. */
2423 p = __get_valid_kprobe(kp);
2424 if (unlikely(p == NULL)) {
2425 ret = -EINVAL;
2426 goto out;
2427 }
2428
2429 if (kprobe_gone(kp)) {
2430 /* This kprobe has gone, we couldn't enable it. */
2431 ret = -EINVAL;
2432 goto out;
2433 }
2434
2435 if (p != kp)
2436 kp->flags &= ~KPROBE_FLAG_DISABLED;
2437
2438 if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2439 p->flags &= ~KPROBE_FLAG_DISABLED;
12310e34 2440 ret = arm_kprobe(p);
4a6f316d 2441 if (ret) {
12310e34 2442 p->flags |= KPROBE_FLAG_DISABLED;
4a6f316d
LQ
2443 if (p != kp)
2444 kp->flags |= KPROBE_FLAG_DISABLED;
2445 }
c0614829
MH
2446 }
2447out:
2448 mutex_unlock(&kprobe_mutex);
2449 return ret;
2450}
2451EXPORT_SYMBOL_GPL(enable_kprobe);
2452
4458515b 2453/* Caller must NOT call this in usual path. This is only for critical case */
820aede0 2454void dump_kprobe(struct kprobe *kp)
24851d24 2455{
9c89bb8e 2456 pr_err("Dump kprobe:\n.symbol_name = %s, .offset = %x, .addr = %pS\n",
4458515b 2457 kp->symbol_name, kp->offset, kp->addr);
24851d24 2458}
820aede0 2459NOKPROBE_SYMBOL(dump_kprobe);
24851d24 2460
fb1a59fa
MH
2461int kprobe_add_ksym_blacklist(unsigned long entry)
2462{
2463 struct kprobe_blacklist_entry *ent;
2464 unsigned long offset = 0, size = 0;
2465
2466 if (!kernel_text_address(entry) ||
2467 !kallsyms_lookup_size_offset(entry, &size, &offset))
2468 return -EINVAL;
2469
2470 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2471 if (!ent)
2472 return -ENOMEM;
2473 ent->start_addr = entry;
2474 ent->end_addr = entry + size;
2475 INIT_LIST_HEAD(&ent->list);
2476 list_add_tail(&ent->list, &kprobe_blacklist);
2477
2478 return (int)size;
2479}
2480
2481/* Add all symbols in given area into kprobe blacklist */
2482int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
2483{
2484 unsigned long entry;
2485 int ret = 0;
2486
2487 for (entry = start; entry < end; entry += ret) {
2488 ret = kprobe_add_ksym_blacklist(entry);
2489 if (ret < 0)
2490 return ret;
2491 if (ret == 0) /* In case of alias symbol */
2492 ret = 1;
2493 }
2494 return 0;
2495}
2496
d002b8bc
AH
2497int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
2498 char *type, char *sym)
2499{
2500 return -ERANGE;
2501}
2502
2503int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
2504 char *sym)
2505{
2506#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
2507 if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym))
2508 return 0;
2509#ifdef CONFIG_OPTPROBES
2510 if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym))
2511 return 0;
2512#endif
2513#endif
2514 if (!arch_kprobe_get_kallsym(&symnum, value, type, sym))
2515 return 0;
2516 return -ERANGE;
2517}
2518
fb1a59fa
MH
2519int __init __weak arch_populate_kprobe_blacklist(void)
2520{
2521 return 0;
2522}
2523
376e2424
MH
2524/*
2525 * Lookup and populate the kprobe_blacklist.
2526 *
2527 * Unlike the kretprobe blacklist, we'll need to determine
2528 * the range of addresses that belong to the said functions,
2529 * since a kprobe need not necessarily be at the beginning
2530 * of a function.
2531 */
2532static int __init populate_kprobe_blacklist(unsigned long *start,
2533 unsigned long *end)
2534{
fb1a59fa 2535 unsigned long entry;
376e2424 2536 unsigned long *iter;
fb1a59fa 2537 int ret;
376e2424
MH
2538
2539 for (iter = start; iter < end; iter++) {
f2ec8d9a 2540 entry = (unsigned long)dereference_symbol_descriptor((void *)*iter);
fb1a59fa
MH
2541 ret = kprobe_add_ksym_blacklist(entry);
2542 if (ret == -EINVAL)
376e2424 2543 continue;
fb1a59fa
MH
2544 if (ret < 0)
2545 return ret;
376e2424 2546 }
fb1a59fa 2547
223a76b2 2548 /* Symbols in '__kprobes_text' are blacklisted */
fb1a59fa
MH
2549 ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start,
2550 (unsigned long)__kprobes_text_end);
66e9b071
TG
2551 if (ret)
2552 return ret;
2553
223a76b2 2554 /* Symbols in 'noinstr' section are blacklisted */
66e9b071
TG
2555 ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start,
2556 (unsigned long)__noinstr_text_end);
fb1a59fa
MH
2557
2558 return ret ? : arch_populate_kprobe_blacklist();
376e2424
MH
2559}
2560
7582b7be
MRI
2561#ifdef CONFIG_MODULES
2562/* Remove all symbols in given area from kprobe blacklist */
2563static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end)
2564{
2565 struct kprobe_blacklist_entry *ent, *n;
2566
2567 list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) {
2568 if (ent->start_addr < start || ent->start_addr >= end)
2569 continue;
2570 list_del(&ent->list);
2571 kfree(ent);
2572 }
2573}
2574
2575static void kprobe_remove_ksym_blacklist(unsigned long entry)
2576{
2577 kprobe_remove_area_blacklist(entry, entry + 1);
2578}
2579
1e6769b0
MH
2580static void add_module_kprobe_blacklist(struct module *mod)
2581{
2582 unsigned long start, end;
16db6264
MH
2583 int i;
2584
2585 if (mod->kprobe_blacklist) {
2586 for (i = 0; i < mod->num_kprobe_blacklist; i++)
2587 kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]);
2588 }
1e6769b0
MH
2589
2590 start = (unsigned long)mod->kprobes_text_start;
2591 if (start) {
2592 end = start + mod->kprobes_text_size;
2593 kprobe_add_area_blacklist(start, end);
2594 }
66e9b071
TG
2595
2596 start = (unsigned long)mod->noinstr_text_start;
2597 if (start) {
2598 end = start + mod->noinstr_text_size;
2599 kprobe_add_area_blacklist(start, end);
2600 }
1e6769b0
MH
2601}
2602
2603static void remove_module_kprobe_blacklist(struct module *mod)
2604{
2605 unsigned long start, end;
16db6264
MH
2606 int i;
2607
2608 if (mod->kprobe_blacklist) {
2609 for (i = 0; i < mod->num_kprobe_blacklist; i++)
2610 kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]);
2611 }
1e6769b0
MH
2612
2613 start = (unsigned long)mod->kprobes_text_start;
2614 if (start) {
2615 end = start + mod->kprobes_text_size;
2616 kprobe_remove_area_blacklist(start, end);
2617 }
66e9b071
TG
2618
2619 start = (unsigned long)mod->noinstr_text_start;
2620 if (start) {
2621 end = start + mod->noinstr_text_size;
2622 kprobe_remove_area_blacklist(start, end);
2623 }
1e6769b0
MH
2624}
2625
e8386a0c 2626/* Module notifier call back, checking kprobes on the module */
55479f64
MH
2627static int kprobes_module_callback(struct notifier_block *nb,
2628 unsigned long val, void *data)
e8386a0c
MH
2629{
2630 struct module *mod = data;
2631 struct hlist_head *head;
e8386a0c
MH
2632 struct kprobe *p;
2633 unsigned int i;
f24659d9 2634 int checkcore = (val == MODULE_STATE_GOING);
e8386a0c 2635
1e6769b0
MH
2636 if (val == MODULE_STATE_COMING) {
2637 mutex_lock(&kprobe_mutex);
2638 add_module_kprobe_blacklist(mod);
2639 mutex_unlock(&kprobe_mutex);
2640 }
f24659d9 2641 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
e8386a0c
MH
2642 return NOTIFY_DONE;
2643
2644 /*
223a76b2
MH
2645 * When 'MODULE_STATE_GOING' was notified, both of module '.text' and
2646 * '.init.text' sections would be freed. When 'MODULE_STATE_LIVE' was
2647 * notified, only '.init.text' section would be freed. We need to
f24659d9 2648 * disable kprobes which have been inserted in the sections.
e8386a0c
MH
2649 */
2650 mutex_lock(&kprobe_mutex);
2651 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2652 head = &kprobe_table[i];
7e6a71d8 2653 hlist_for_each_entry(p, head, hlist)
f24659d9
MH
2654 if (within_module_init((unsigned long)p->addr, mod) ||
2655 (checkcore &&
2656 within_module_core((unsigned long)p->addr, mod))) {
e8386a0c
MH
2657 /*
2658 * The vaddr this probe is installed will soon
2659 * be vfreed buy not synced to disk. Hence,
2660 * disarming the breakpoint isn't needed.
545a0281
SRV
2661 *
2662 * Note, this will also move any optimized probes
2663 * that are pending to be removed from their
223a76b2 2664 * corresponding lists to the 'freeing_list' and
545a0281 2665 * will not be touched by the delayed
223a76b2 2666 * kprobe_optimizer() work handler.
e8386a0c
MH
2667 */
2668 kill_kprobe(p);
2669 }
2670 }
1e6769b0
MH
2671 if (val == MODULE_STATE_GOING)
2672 remove_module_kprobe_blacklist(mod);
e8386a0c
MH
2673 mutex_unlock(&kprobe_mutex);
2674 return NOTIFY_DONE;
2675}
2676
2677static struct notifier_block kprobe_module_nb = {
2678 .notifier_call = kprobes_module_callback,
2679 .priority = 0
2680};
2681
7582b7be
MRI
2682static int kprobe_register_module_notifier(void)
2683{
2684 return register_module_notifier(&kprobe_module_nb);
2685}
2686#else
2687static int kprobe_register_module_notifier(void)
2688{
2689 return 0;
2690}
2691#endif /* CONFIG_MODULES */
2692
82d083ab
MH
2693void kprobe_free_init_mem(void)
2694{
2695 void *start = (void *)(&__init_begin);
2696 void *end = (void *)(&__init_end);
2697 struct hlist_head *head;
2698 struct kprobe *p;
2699 int i;
2700
2701 mutex_lock(&kprobe_mutex);
2702
223a76b2 2703 /* Kill all kprobes on initmem because the target code has been freed. */
82d083ab
MH
2704 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2705 head = &kprobe_table[i];
2706 hlist_for_each_entry(p, head, hlist) {
2707 if (start <= (void *)p->addr && (void *)p->addr < end)
2708 kill_kprobe(p);
2709 }
2710 }
2711
2712 mutex_unlock(&kprobe_mutex);
2713}
2714
1da177e4
LT
2715static int __init init_kprobes(void)
2716{
ed9492df 2717 int i, err;
1da177e4
LT
2718
2719 /* FIXME allocate the probe table, currently defined statically */
2720 /* initialize all list heads */
d741bf41 2721 for (i = 0; i < KPROBE_TABLE_SIZE; i++)
1da177e4
LT
2722 INIT_HLIST_HEAD(&kprobe_table[i]);
2723
376e2424
MH
2724 err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2725 __stop_kprobe_blacklist);
223a76b2 2726 if (err)
9c89bb8e 2727 pr_err("Failed to populate blacklist (error %d), kprobes not restricted, be careful using them!\n", err);
3d8d996e 2728
f438d914
MH
2729 if (kretprobe_blacklist_size) {
2730 /* lookup the function address from its name */
2731 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
49e0b465 2732 kretprobe_blacklist[i].addr =
290e3070 2733 kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
f438d914 2734 if (!kretprobe_blacklist[i].addr)
9c89bb8e 2735 pr_err("Failed to lookup symbol '%s' for kretprobe blacklist. Maybe the target function is removed or renamed.\n",
f438d914
MH
2736 kretprobe_blacklist[i].name);
2737 }
2738 }
2739
e579abeb
MH
2740 /* By default, kprobes are armed */
2741 kprobes_all_disarmed = false;
bf8f6e5b 2742
c85c9a2c 2743#if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
223a76b2 2744 /* Init 'kprobe_optinsn_slots' for allocation */
c85c9a2c
MH
2745 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2746#endif
2747
6772926b 2748 err = arch_init_kprobes();
802eae7c
RL
2749 if (!err)
2750 err = register_die_notifier(&kprobe_exceptions_nb);
e8386a0c 2751 if (!err)
7582b7be 2752 err = kprobe_register_module_notifier();
e8386a0c 2753
ef53d9c5 2754 kprobes_initialized = (err == 0);
a737a3c6 2755 kprobe_sysctls_init();
1da177e4
LT
2756 return err;
2757}
36dadef2 2758early_initcall(init_kprobes);
1da177e4 2759
c85c9a2c
MH
2760#if defined(CONFIG_OPTPROBES)
2761static int __init init_optprobes(void)
2762{
2763 /*
2764 * Enable kprobe optimization - this kicks the optimizer which
2765 * depends on synchronize_rcu_tasks() and ksoftirqd, that is
2766 * not spawned in early initcall. So delay the optimization.
2767 */
2768 optimize_all_kprobes();
2769
2770 return 0;
2771}
2772subsys_initcall(init_optprobes);
2773#endif
2774
346fd59b 2775#ifdef CONFIG_DEBUG_FS
55479f64 2776static void report_probe(struct seq_file *pi, struct kprobe *p,
afd66255 2777 const char *sym, int offset, char *modname, struct kprobe *pp)
346fd59b
SD
2778{
2779 char *kprobe_type;
81365a94 2780 void *addr = p->addr;
346fd59b
SD
2781
2782 if (p->pre_handler == pre_handler_kretprobe)
2783 kprobe_type = "r";
346fd59b
SD
2784 else
2785 kprobe_type = "k";
afd66255 2786
60f7bb66 2787 if (!kallsyms_show_value(pi->file->f_cred))
81365a94
MH
2788 addr = NULL;
2789
346fd59b 2790 if (sym)
81365a94
MH
2791 seq_printf(pi, "%px %s %s+0x%x %s ",
2792 addr, kprobe_type, sym, offset,
afd66255 2793 (modname ? modname : " "));
81365a94
MH
2794 else /* try to use %pS */
2795 seq_printf(pi, "%px %s %pS ",
2796 addr, kprobe_type, p->addr);
afd66255
MH
2797
2798 if (!pp)
2799 pp = p;
ae6aa16f 2800 seq_printf(pi, "%s%s%s%s\n",
afd66255
MH
2801 (kprobe_gone(p) ? "[GONE]" : ""),
2802 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
ae6aa16f
MH
2803 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2804 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
346fd59b
SD
2805}
2806
55479f64 2807static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
346fd59b
SD
2808{
2809 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2810}
2811
55479f64 2812static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
346fd59b
SD
2813{
2814 (*pos)++;
2815 if (*pos >= KPROBE_TABLE_SIZE)
2816 return NULL;
2817 return pos;
2818}
2819
55479f64 2820static void kprobe_seq_stop(struct seq_file *f, void *v)
346fd59b
SD
2821{
2822 /* Nothing to do */
2823}
2824
55479f64 2825static int show_kprobe_addr(struct seq_file *pi, void *v)
346fd59b
SD
2826{
2827 struct hlist_head *head;
346fd59b 2828 struct kprobe *p, *kp;
9efd24ec 2829 const char *sym;
346fd59b 2830 unsigned int i = *(loff_t *) v;
ffb45122 2831 unsigned long offset = 0;
ab767865 2832 char *modname, namebuf[KSYM_NAME_LEN];
346fd59b
SD
2833
2834 head = &kprobe_table[i];
2835 preempt_disable();
b67bfe0d 2836 hlist_for_each_entry_rcu(p, head, hlist) {
ffb45122 2837 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
346fd59b 2838 &offset, &modname, namebuf);
afd66255 2839 if (kprobe_aggrprobe(p)) {
346fd59b 2840 list_for_each_entry_rcu(kp, &p->list, list)
afd66255 2841 report_probe(pi, kp, sym, offset, modname, p);
346fd59b 2842 } else
afd66255 2843 report_probe(pi, p, sym, offset, modname, NULL);
346fd59b
SD
2844 }
2845 preempt_enable();
2846 return 0;
2847}
2848
eac2cece 2849static const struct seq_operations kprobes_sops = {
346fd59b
SD
2850 .start = kprobe_seq_start,
2851 .next = kprobe_seq_next,
2852 .stop = kprobe_seq_stop,
2853 .show = show_kprobe_addr
2854};
2855
eac2cece 2856DEFINE_SEQ_ATTRIBUTE(kprobes);
346fd59b 2857
63724740
MH
2858/* kprobes/blacklist -- shows which functions can not be probed */
2859static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2860{
4fdd8887 2861 mutex_lock(&kprobe_mutex);
63724740
MH
2862 return seq_list_start(&kprobe_blacklist, *pos);
2863}
2864
2865static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2866{
2867 return seq_list_next(v, &kprobe_blacklist, pos);
2868}
2869
2870static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2871{
2872 struct kprobe_blacklist_entry *ent =
2873 list_entry(v, struct kprobe_blacklist_entry, list);
2874
ffb9bd68 2875 /*
223a76b2 2876 * If '/proc/kallsyms' is not showing kernel address, we won't
ffb9bd68
MH
2877 * show them here either.
2878 */
60f7bb66 2879 if (!kallsyms_show_value(m->file->f_cred))
ffb9bd68
MH
2880 seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
2881 (void *)ent->start_addr);
2882 else
2883 seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
2884 (void *)ent->end_addr, (void *)ent->start_addr);
63724740
MH
2885 return 0;
2886}
2887
4fdd8887
MH
2888static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v)
2889{
2890 mutex_unlock(&kprobe_mutex);
2891}
2892
eac2cece 2893static const struct seq_operations kprobe_blacklist_sops = {
63724740
MH
2894 .start = kprobe_blacklist_seq_start,
2895 .next = kprobe_blacklist_seq_next,
4fdd8887 2896 .stop = kprobe_blacklist_seq_stop,
63724740
MH
2897 .show = kprobe_blacklist_seq_show,
2898};
eac2cece 2899DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist);
63724740 2900
12310e34 2901static int arm_all_kprobes(void)
bf8f6e5b
AM
2902{
2903 struct hlist_head *head;
bf8f6e5b 2904 struct kprobe *p;
12310e34
JY
2905 unsigned int i, total = 0, errors = 0;
2906 int err, ret = 0;
bf8f6e5b
AM
2907
2908 mutex_lock(&kprobe_mutex);
2909
e579abeb
MH
2910 /* If kprobes are armed, just return */
2911 if (!kprobes_all_disarmed)
bf8f6e5b
AM
2912 goto already_enabled;
2913
977ad481
WN
2914 /*
2915 * optimize_kprobe() called by arm_kprobe() checks
2916 * kprobes_all_disarmed, so set kprobes_all_disarmed before
2917 * arm_kprobe.
2918 */
2919 kprobes_all_disarmed = false;
afd66255 2920 /* Arming kprobes doesn't optimize kprobe itself */
bf8f6e5b
AM
2921 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2922 head = &kprobe_table[i];
12310e34 2923 /* Arm all kprobes on a best-effort basis */
7e6a71d8 2924 hlist_for_each_entry(p, head, hlist) {
12310e34
JY
2925 if (!kprobe_disabled(p)) {
2926 err = arm_kprobe(p);
2927 if (err) {
2928 errors++;
2929 ret = err;
2930 }
2931 total++;
2932 }
2933 }
bf8f6e5b
AM
2934 }
2935
12310e34 2936 if (errors)
9c89bb8e 2937 pr_warn("Kprobes globally enabled, but failed to enable %d out of %d probes. Please check which kprobes are kept disabled via debugfs.\n",
12310e34
JY
2938 errors, total);
2939 else
2940 pr_info("Kprobes globally enabled\n");
bf8f6e5b
AM
2941
2942already_enabled:
2943 mutex_unlock(&kprobe_mutex);
12310e34 2944 return ret;
bf8f6e5b
AM
2945}
2946
297f9233 2947static int disarm_all_kprobes(void)
bf8f6e5b
AM
2948{
2949 struct hlist_head *head;
bf8f6e5b 2950 struct kprobe *p;
297f9233
JY
2951 unsigned int i, total = 0, errors = 0;
2952 int err, ret = 0;
bf8f6e5b
AM
2953
2954 mutex_lock(&kprobe_mutex);
2955
e579abeb 2956 /* If kprobes are already disarmed, just return */
6274de49
MH
2957 if (kprobes_all_disarmed) {
2958 mutex_unlock(&kprobe_mutex);
297f9233 2959 return 0;
6274de49 2960 }
bf8f6e5b 2961
e579abeb 2962 kprobes_all_disarmed = true;
afd66255 2963
bf8f6e5b
AM
2964 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2965 head = &kprobe_table[i];
297f9233 2966 /* Disarm all kprobes on a best-effort basis */
7e6a71d8 2967 hlist_for_each_entry(p, head, hlist) {
297f9233
JY
2968 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
2969 err = disarm_kprobe(p, false);
2970 if (err) {
2971 errors++;
2972 ret = err;
2973 }
2974 total++;
2975 }
bf8f6e5b
AM
2976 }
2977 }
297f9233
JY
2978
2979 if (errors)
9c89bb8e 2980 pr_warn("Kprobes globally disabled, but failed to disable %d out of %d probes. Please check which kprobes are kept enabled via debugfs.\n",
297f9233
JY
2981 errors, total);
2982 else
2983 pr_info("Kprobes globally disabled\n");
2984
bf8f6e5b 2985 mutex_unlock(&kprobe_mutex);
bf8f6e5b 2986
6274de49
MH
2987 /* Wait for disarming all kprobes by optimizer */
2988 wait_for_kprobe_optimizer();
297f9233
JY
2989
2990 return ret;
bf8f6e5b
AM
2991}
2992
2993/*
2994 * XXX: The debugfs bool file interface doesn't allow for callbacks
2995 * when the bool state is switched. We can reuse that facility when
2996 * available
2997 */
2998static ssize_t read_enabled_file_bool(struct file *file,
2999 char __user *user_buf, size_t count, loff_t *ppos)
3000{
3001 char buf[3];
3002
e579abeb 3003 if (!kprobes_all_disarmed)
bf8f6e5b
AM
3004 buf[0] = '1';
3005 else
3006 buf[0] = '0';
3007 buf[1] = '\n';
3008 buf[2] = 0x00;
3009 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
3010}
3011
3012static ssize_t write_enabled_file_bool(struct file *file,
3013 const char __user *user_buf, size_t count, loff_t *ppos)
3014{
5d6de7d7
PA
3015 bool enable;
3016 int ret;
bf8f6e5b 3017
5d6de7d7
PA
3018 ret = kstrtobool_from_user(user_buf, count, &enable);
3019 if (ret)
3020 return ret;
bf8f6e5b 3021
5d6de7d7 3022 ret = enable ? arm_all_kprobes() : disarm_all_kprobes();
12310e34
JY
3023 if (ret)
3024 return ret;
3025
bf8f6e5b
AM
3026 return count;
3027}
3028
828c0950 3029static const struct file_operations fops_kp = {
bf8f6e5b
AM
3030 .read = read_enabled_file_bool,
3031 .write = write_enabled_file_bool,
6038f373 3032 .llseek = default_llseek,
bf8f6e5b
AM
3033};
3034
55479f64 3035static int __init debugfs_kprobe_init(void)
346fd59b 3036{
8c0fd1fa 3037 struct dentry *dir;
346fd59b
SD
3038
3039 dir = debugfs_create_dir("kprobes", NULL);
346fd59b 3040
eac2cece 3041 debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops);
346fd59b 3042
8f7262cd 3043 debugfs_create_file("enabled", 0600, dir, NULL, &fops_kp);
63724740 3044
8c0fd1fa 3045 debugfs_create_file("blacklist", 0400, dir, NULL,
eac2cece 3046 &kprobe_blacklist_fops);
bf8f6e5b 3047
346fd59b
SD
3048 return 0;
3049}
3050
3051late_initcall(debugfs_kprobe_init);
3052#endif /* CONFIG_DEBUG_FS */