Merge tag 'riscv-dt-fixes-for-v6.2-rc4' of https://git.kernel.org/pub/scm/linux/kerne...
[linux-2.6-block.git] / kernel / events / uprobes.c
CommitLineData
720e596a 1// SPDX-License-Identifier: GPL-2.0+
2b144498 2/*
7b2d81d4 3 * User-space Probes (UProbes)
2b144498 4 *
35aa621b 5 * Copyright (C) IBM Corporation, 2008-2012
2b144498
SD
6 * Authors:
7 * Srikar Dronamraju
8 * Jim Keniston
90eec103 9 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
2b144498
SD
10 */
11
12#include <linux/kernel.h>
13#include <linux/highmem.h>
14#include <linux/pagemap.h> /* read_mapping_page */
15#include <linux/slab.h>
16#include <linux/sched.h>
6e84f315 17#include <linux/sched/mm.h>
f7ccbae4 18#include <linux/sched/coredump.h>
e8440c14 19#include <linux/export.h>
2b144498
SD
20#include <linux/rmap.h> /* anon_vma_prepare */
21#include <linux/mmu_notifier.h> /* set_pte_at_notify */
5fcd079a 22#include <linux/swap.h> /* folio_free_swap */
0326f5a9
SD
23#include <linux/ptrace.h> /* user_enable_single_step */
24#include <linux/kdebug.h> /* notifier mechanism */
194f8dcb 25#include "../../mm/internal.h" /* munlock_vma_page */
32cdba1e 26#include <linux/percpu-rwsem.h>
aa59c53f 27#include <linux/task_work.h>
40814f68 28#include <linux/shmem_fs.h>
f385cb85 29#include <linux/khugepaged.h>
7b2d81d4 30
2b144498
SD
31#include <linux/uprobes.h>
32
d4b3b638
SD
33#define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
34#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
35
2b144498 36static struct rb_root uprobes_tree = RB_ROOT;
441f1eb7
ON
37/*
38 * allows us to skip the uprobe_mmap if there are no uprobe events active
39 * at this time. Probably a fine grained per inode count is better?
40 */
41#define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
7b2d81d4 42
2b144498
SD
43static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */
44
45#define UPROBES_HASH_SZ 13
2b144498
SD
46/* serialize uprobe->pending_list */
47static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
7b2d81d4 48#define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
2b144498 49
2bf1acc2 50DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem);
32cdba1e 51
cb9a19fe 52/* Have a copy of original instruction */
71434f2f 53#define UPROBE_COPY_INSN 0
cb9a19fe 54
3ff54efd
SD
55struct uprobe {
56 struct rb_node rb_node; /* node in the rb tree */
ce59b8e9 57 refcount_t ref;
e591c8d7 58 struct rw_semaphore register_rwsem;
3ff54efd
SD
59 struct rw_semaphore consumer_rwsem;
60 struct list_head pending_list;
61 struct uprobe_consumer *consumers;
62 struct inode *inode; /* Also hold a ref to inode */
63 loff_t offset;
1cc33161 64 loff_t ref_ctr_offset;
71434f2f 65 unsigned long flags;
ad439356
ON
66
67 /*
68 * The generic code assumes that it has two members of unknown type
69 * owned by the arch-specific code:
70 *
71 * insn - copy_insn() saves the original instruction here for
72 * arch_uprobe_analyze_insn().
73 *
74 * ixol - potentially modified instruction to execute out of
75 * line, copied to xol_area by xol_get_insn_slot().
76 */
3ff54efd
SD
77 struct arch_uprobe arch;
78};
79
1cc33161
RB
80struct delayed_uprobe {
81 struct list_head list;
82 struct uprobe *uprobe;
83 struct mm_struct *mm;
84};
85
86static DEFINE_MUTEX(delayed_uprobe_lock);
87static LIST_HEAD(delayed_uprobe_list);
88
c912dae6 89/*
ad439356
ON
90 * Execute out of line area: anonymous executable mapping installed
91 * by the probed task to execute the copy of the original instruction
92 * mangled by set_swbp().
93 *
c912dae6
ON
94 * On a breakpoint hit, thread contests for a slot. It frees the
95 * slot after singlestep. Currently a fixed number of slots are
96 * allocated.
97 */
98struct xol_area {
704bde3c
ON
99 wait_queue_head_t wq; /* if all slots are busy */
100 atomic_t slot_count; /* number of in-use slots */
101 unsigned long *bitmap; /* 0 = free slot */
c912dae6 102
704bde3c
ON
103 struct vm_special_mapping xol_mapping;
104 struct page *pages[2];
c912dae6
ON
105 /*
106 * We keep the vma's vm_start rather than a pointer to the vma
107 * itself. The probed process or a naughty kernel module could make
108 * the vma go away, and we must handle that reasonably gracefully.
109 */
704bde3c 110 unsigned long vaddr; /* Page(s) of instruction slots */
c912dae6
ON
111};
112
2b144498
SD
113/*
114 * valid_vma: Verify if the specified vma is an executable vma
115 * Relax restrictions while unregistering: vm_flags might have
116 * changed after breakpoint was inserted.
117 * - is_register: indicates if we are in register context.
118 * - Return 1 if the specified virtual address is in an
119 * executable vma.
120 */
121static bool valid_vma(struct vm_area_struct *vma, bool is_register)
122{
13f59c5e 123 vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
2b144498 124
e40cfce6
ON
125 if (is_register)
126 flags |= VM_WRITE;
2b144498 127
e40cfce6 128 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
2b144498
SD
129}
130
57683f72 131static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
2b144498 132{
57683f72 133 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
2b144498
SD
134}
135
cb113b47
ON
136static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
137{
138 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
139}
140
2b144498
SD
141/**
142 * __replace_page - replace page in vma by new page.
143 * based on replace_page in mm/ksm.c
144 *
145 * @vma: vma that holds the pte pointing to page
c517ee74 146 * @addr: address the old @page is mapped at
fb4fb04f
SL
147 * @old_page: the page we are replacing by new_page
148 * @new_page: the modified page we replace page by
2b144498 149 *
fb4fb04f
SL
150 * If @new_page is NULL, only unmap @old_page.
151 *
152 * Returns 0 on success, negative error code otherwise.
2b144498 153 */
c517ee74 154static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
bdfaa2ee 155 struct page *old_page, struct page *new_page)
2b144498 156{
5fcd079a 157 struct folio *old_folio = page_folio(old_page);
82e66bf7 158 struct folio *new_folio;
2b144498 159 struct mm_struct *mm = vma->vm_mm;
5fcd079a 160 DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0);
9f92448c 161 int err;
ac46d4f3 162 struct mmu_notifier_range range;
00501b53 163
7269f999 164 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
6f4f13e8 165 addr + PAGE_SIZE);
ac46d4f3 166
fb4fb04f 167 if (new_page) {
82e66bf7
MWO
168 new_folio = page_folio(new_page);
169 err = mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL);
fb4fb04f
SL
170 if (err)
171 return err;
172 }
2b144498 173
5fcd079a
MWO
174 /* For folio_free_swap() below */
175 folio_lock(old_folio);
9f92448c 176
ac46d4f3 177 mmu_notifier_invalidate_range_start(&range);
9f92448c 178 err = -EAGAIN;
9d82c694 179 if (!page_vma_mapped_walk(&pvmw))
9f92448c 180 goto unlock;
14fa2daa 181 VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
2b144498 182
fb4fb04f 183 if (new_page) {
82e66bf7 184 folio_get(new_folio);
40f2bbf7 185 page_add_new_anon_rmap(new_page, vma, addr);
82e66bf7 186 folio_add_lru_vma(new_folio, vma);
fb4fb04f
SL
187 } else
188 /* no new page, just dec_mm_counter for old_page */
189 dec_mm_counter(mm, MM_ANONPAGES);
2b144498 190
5fcd079a 191 if (!folio_test_anon(old_folio)) {
bdfaa2ee 192 dec_mm_counter(mm, mm_counter_file(old_page));
7396fa81
SD
193 inc_mm_counter(mm, MM_ANONPAGES);
194 }
195
14fa2daa
KS
196 flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
197 ptep_clear_flush_notify(vma, addr, pvmw.pte);
fb4fb04f
SL
198 if (new_page)
199 set_pte_at_notify(mm, addr, pvmw.pte,
200 mk_pte(new_page, vma->vm_page_prot));
2b144498 201
cea86fe2 202 page_remove_rmap(old_page, vma, false);
5fcd079a
MWO
203 if (!folio_mapped(old_folio))
204 folio_free_swap(old_folio);
14fa2daa 205 page_vma_mapped_walk_done(&pvmw);
5fcd079a 206 folio_put(old_folio);
194f8dcb 207
9f92448c
ON
208 err = 0;
209 unlock:
ac46d4f3 210 mmu_notifier_invalidate_range_end(&range);
5fcd079a 211 folio_unlock(old_folio);
9f92448c 212 return err;
2b144498
SD
213}
214
215/**
5cb4ac3a 216 * is_swbp_insn - check if instruction is breakpoint instruction.
2b144498 217 * @insn: instruction to be checked.
5cb4ac3a 218 * Default implementation of is_swbp_insn
2b144498
SD
219 * Returns true if @insn is a breakpoint instruction.
220 */
5cb4ac3a 221bool __weak is_swbp_insn(uprobe_opcode_t *insn)
2b144498 222{
5cb4ac3a 223 return *insn == UPROBE_SWBP_INSN;
2b144498
SD
224}
225
0908ad6e
AM
226/**
227 * is_trap_insn - check if instruction is breakpoint instruction.
228 * @insn: instruction to be checked.
229 * Default implementation of is_trap_insn
230 * Returns true if @insn is a breakpoint instruction.
231 *
232 * This function is needed for the case where an architecture has multiple
233 * trap instructions (like powerpc).
234 */
235bool __weak is_trap_insn(uprobe_opcode_t *insn)
236{
237 return is_swbp_insn(insn);
238}
239
ab0d805c 240static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
cceb55aa
ON
241{
242 void *kaddr = kmap_atomic(page);
ab0d805c 243 memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
cceb55aa
ON
244 kunmap_atomic(kaddr);
245}
246
5669ccee
ON
247static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
248{
249 void *kaddr = kmap_atomic(page);
250 memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
251 kunmap_atomic(kaddr);
252}
253
ed6f6a50
ON
254static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
255{
256 uprobe_opcode_t old_opcode;
257 bool is_swbp;
258
0908ad6e
AM
259 /*
260 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
261 * We do not check if it is any other 'trap variant' which could
262 * be conditional trap instruction such as the one powerpc supports.
263 *
264 * The logic is that we do not care if the underlying instruction
265 * is a trap variant; uprobes always wins over any other (gdb)
266 * breakpoint.
267 */
ab0d805c 268 copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
ed6f6a50
ON
269 is_swbp = is_swbp_insn(&old_opcode);
270
271 if (is_swbp_insn(new_opcode)) {
272 if (is_swbp) /* register: already installed? */
273 return 0;
274 } else {
275 if (!is_swbp) /* unregister: was it changed by us? */
076a365b 276 return 0;
ed6f6a50
ON
277 }
278
279 return 1;
280}
281
1cc33161
RB
282static struct delayed_uprobe *
283delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm)
284{
285 struct delayed_uprobe *du;
286
287 list_for_each_entry(du, &delayed_uprobe_list, list)
288 if (du->uprobe == uprobe && du->mm == mm)
289 return du;
290 return NULL;
291}
292
293static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm)
294{
295 struct delayed_uprobe *du;
296
297 if (delayed_uprobe_check(uprobe, mm))
298 return 0;
299
300 du = kzalloc(sizeof(*du), GFP_KERNEL);
301 if (!du)
302 return -ENOMEM;
303
304 du->uprobe = uprobe;
305 du->mm = mm;
306 list_add(&du->list, &delayed_uprobe_list);
307 return 0;
308}
309
310static void delayed_uprobe_delete(struct delayed_uprobe *du)
311{
312 if (WARN_ON(!du))
313 return;
314 list_del(&du->list);
315 kfree(du);
316}
317
318static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm)
319{
320 struct list_head *pos, *q;
321 struct delayed_uprobe *du;
322
323 if (!uprobe && !mm)
324 return;
325
326 list_for_each_safe(pos, q, &delayed_uprobe_list) {
327 du = list_entry(pos, struct delayed_uprobe, list);
328
329 if (uprobe && du->uprobe != uprobe)
330 continue;
331 if (mm && du->mm != mm)
332 continue;
333
334 delayed_uprobe_delete(du);
335 }
336}
337
338static bool valid_ref_ctr_vma(struct uprobe *uprobe,
339 struct vm_area_struct *vma)
340{
341 unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset);
342
343 return uprobe->ref_ctr_offset &&
344 vma->vm_file &&
345 file_inode(vma->vm_file) == uprobe->inode &&
346 (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
347 vma->vm_start <= vaddr &&
348 vma->vm_end > vaddr;
349}
350
351static struct vm_area_struct *
352find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
353{
fcb72a58 354 VMA_ITERATOR(vmi, mm, 0);
1cc33161
RB
355 struct vm_area_struct *tmp;
356
fcb72a58 357 for_each_vma(vmi, tmp)
1cc33161
RB
358 if (valid_ref_ctr_vma(uprobe, tmp))
359 return tmp;
360
361 return NULL;
362}
363
364static int
365__update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
366{
367 void *kaddr;
368 struct page *page;
369 struct vm_area_struct *vma;
370 int ret;
371 short *ptr;
372
373 if (!vaddr || !d)
374 return -EINVAL;
375
64019a2e 376 ret = get_user_pages_remote(mm, vaddr, 1,
1cc33161
RB
377 FOLL_WRITE, &page, &vma, NULL);
378 if (unlikely(ret <= 0)) {
379 /*
380 * We are asking for 1 page. If get_user_pages_remote() fails,
381 * it may return 0, in that case we have to return error.
382 */
383 return ret == 0 ? -EBUSY : ret;
384 }
385
386 kaddr = kmap_atomic(page);
387 ptr = kaddr + (vaddr & ~PAGE_MASK);
388
389 if (unlikely(*ptr + d < 0)) {
390 pr_warn("ref_ctr going negative. vaddr: 0x%lx, "
391 "curr val: %d, delta: %d\n", vaddr, *ptr, d);
392 ret = -EINVAL;
393 goto out;
394 }
395
396 *ptr += d;
397 ret = 0;
398out:
399 kunmap_atomic(kaddr);
400 put_page(page);
401 return ret;
402}
403
404static void update_ref_ctr_warn(struct uprobe *uprobe,
405 struct mm_struct *mm, short d)
406{
407 pr_warn("ref_ctr %s failed for inode: 0x%lx offset: "
408 "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n",
409 d > 0 ? "increment" : "decrement", uprobe->inode->i_ino,
410 (unsigned long long) uprobe->offset,
411 (unsigned long long) uprobe->ref_ctr_offset, mm);
412}
413
414static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
415 short d)
416{
417 struct vm_area_struct *rc_vma;
418 unsigned long rc_vaddr;
419 int ret = 0;
420
421 rc_vma = find_ref_ctr_vma(uprobe, mm);
422
423 if (rc_vma) {
424 rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset);
425 ret = __update_ref_ctr(mm, rc_vaddr, d);
426 if (ret)
427 update_ref_ctr_warn(uprobe, mm, d);
428
429 if (d > 0)
430 return ret;
431 }
432
433 mutex_lock(&delayed_uprobe_lock);
434 if (d > 0)
435 ret = delayed_uprobe_add(uprobe, mm);
436 else
437 delayed_uprobe_remove(uprobe, mm);
438 mutex_unlock(&delayed_uprobe_lock);
439
440 return ret;
441}
442
2b144498
SD
443/*
444 * NOTE:
445 * Expect the breakpoint instruction to be the smallest size instruction for
446 * the architecture. If an arch has variable length instruction and the
447 * breakpoint instruction is not of the smallest length instruction
0908ad6e 448 * supported by that architecture then we need to modify is_trap_at_addr and
f72d41fa
ON
449 * uprobe_write_opcode accordingly. This would never be a problem for archs
450 * that have fixed length instructions.
29dedee0 451 *
f72d41fa 452 * uprobe_write_opcode - write the opcode at a given virtual address.
9ce4d216 453 * @auprobe: arch specific probepoint information.
2b144498 454 * @mm: the probed process address space.
2b144498
SD
455 * @vaddr: the virtual address to store the opcode.
456 * @opcode: opcode to be written at @vaddr.
457 *
c1e8d7c6 458 * Called with mm->mmap_lock held for write.
2b144498
SD
459 * Return 0 (success) or a negative errno.
460 */
6d43743e
RB
461int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
462 unsigned long vaddr, uprobe_opcode_t opcode)
2b144498 463{
1cc33161 464 struct uprobe *uprobe;
2b144498 465 struct page *old_page, *new_page;
2b144498 466 struct vm_area_struct *vma;
1cc33161 467 int ret, is_register, ref_ctr_updated = 0;
f385cb85 468 bool orig_page_huge = false;
aa5de305 469 unsigned int gup_flags = FOLL_FORCE;
1cc33161
RB
470
471 is_register = is_swbp_insn(&opcode);
472 uprobe = container_of(auprobe, struct uprobe, arch);
f403072c 473
5323ce71 474retry:
aa5de305
SL
475 if (is_register)
476 gup_flags |= FOLL_SPLIT_PMD;
2b144498 477 /* Read the page with vaddr into memory */
64019a2e 478 ret = get_user_pages_remote(mm, vaddr, 1, gup_flags,
aa5de305 479 &old_page, &vma, NULL);
2b144498
SD
480 if (ret <= 0)
481 return ret;
7b2d81d4 482
ed6f6a50
ON
483 ret = verify_opcode(old_page, vaddr, &opcode);
484 if (ret <= 0)
485 goto put_old;
486
aa5de305
SL
487 if (WARN(!is_register && PageCompound(old_page),
488 "uprobe unregister should never work on compound page\n")) {
489 ret = -EINVAL;
490 goto put_old;
491 }
492
1cc33161
RB
493 /* We are going to replace instruction, update ref_ctr. */
494 if (!ref_ctr_updated && uprobe->ref_ctr_offset) {
495 ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
496 if (ret)
497 goto put_old;
498
499 ref_ctr_updated = 1;
500 }
501
fb4fb04f
SL
502 ret = 0;
503 if (!is_register && !PageAnon(old_page))
504 goto put_old;
505
29dedee0
ON
506 ret = anon_vma_prepare(vma);
507 if (ret)
508 goto put_old;
509
2b144498
SD
510 ret = -ENOMEM;
511 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
512 if (!new_page)
9f92448c 513 goto put_old;
2b144498 514
29dedee0 515 __SetPageUptodate(new_page);
3f47107c
ON
516 copy_highpage(new_page, old_page);
517 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
2b144498 518
fb4fb04f
SL
519 if (!is_register) {
520 struct page *orig_page;
521 pgoff_t index;
522
523 VM_BUG_ON_PAGE(!PageAnon(old_page), old_page);
524
525 index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
526 orig_page = find_get_page(vma->vm_file->f_inode->i_mapping,
527 index);
528
529 if (orig_page) {
530 if (PageUptodate(orig_page) &&
531 pages_identical(new_page, orig_page)) {
532 /* let go new_page */
533 put_page(new_page);
534 new_page = NULL;
f385cb85
SL
535
536 if (PageCompound(orig_page))
537 orig_page_huge = true;
fb4fb04f
SL
538 }
539 put_page(orig_page);
540 }
541 }
542
c517ee74 543 ret = __replace_page(vma, vaddr, old_page, new_page);
fb4fb04f
SL
544 if (new_page)
545 put_page(new_page);
9f92448c 546put_old:
7b2d81d4
IM
547 put_page(old_page);
548
5323ce71
ON
549 if (unlikely(ret == -EAGAIN))
550 goto retry;
1cc33161
RB
551
552 /* Revert back reference counter if instruction update failed. */
553 if (ret && is_register && ref_ctr_updated)
554 update_ref_ctr(uprobe, mm, -1);
555
f385cb85
SL
556 /* try collapse pmd for compound page */
557 if (!ret && orig_page_huge)
34488399 558 collapse_pte_mapped_thp(mm, vaddr, false);
f385cb85 559
2b144498
SD
560 return ret;
561}
562
2b144498 563/**
5cb4ac3a 564 * set_swbp - store breakpoint at a given address.
e3343e6a 565 * @auprobe: arch specific probepoint information.
2b144498 566 * @mm: the probed process address space.
2b144498
SD
567 * @vaddr: the virtual address to insert the opcode.
568 *
569 * For mm @mm, store the breakpoint instruction at @vaddr.
570 * Return 0 (success) or a negative errno.
571 */
5cb4ac3a 572int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
2b144498 573{
6d43743e 574 return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
2b144498
SD
575}
576
577/**
578 * set_orig_insn - Restore the original instruction.
579 * @mm: the probed process address space.
e3343e6a 580 * @auprobe: arch specific probepoint information.
2b144498 581 * @vaddr: the virtual address to insert the opcode.
2b144498
SD
582 *
583 * For mm @mm, restore the original opcode (opcode) at @vaddr.
584 * Return 0 (success) or a negative errno.
585 */
7b2d81d4 586int __weak
ded86e7c 587set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
2b144498 588{
6d43743e
RB
589 return uprobe_write_opcode(auprobe, mm, vaddr,
590 *(uprobe_opcode_t *)&auprobe->insn);
2b144498
SD
591}
592
f231722a
ON
593static struct uprobe *get_uprobe(struct uprobe *uprobe)
594{
ce59b8e9 595 refcount_inc(&uprobe->ref);
f231722a
ON
596 return uprobe;
597}
598
599static void put_uprobe(struct uprobe *uprobe)
600{
ce59b8e9 601 if (refcount_dec_and_test(&uprobe->ref)) {
1cc33161
RB
602 /*
603 * If application munmap(exec_vma) before uprobe_unregister()
604 * gets called, we don't get a chance to remove uprobe from
605 * delayed_uprobe_list from remove_breakpoint(). Do it here.
606 */
1aed58e6 607 mutex_lock(&delayed_uprobe_lock);
1cc33161 608 delayed_uprobe_remove(uprobe, NULL);
1aed58e6 609 mutex_unlock(&delayed_uprobe_lock);
f231722a 610 kfree(uprobe);
1cc33161 611 }
f231722a
ON
612}
613
a905e84e
PZ
614static __always_inline
615int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset,
616 const struct uprobe *r)
2b144498 617{
a905e84e 618 if (l_inode < r->inode)
2b144498 619 return -1;
7b2d81d4 620
a905e84e 621 if (l_inode > r->inode)
2b144498 622 return 1;
2b144498 623
a905e84e 624 if (l_offset < r->offset)
7b2d81d4
IM
625 return -1;
626
a905e84e 627 if (l_offset > r->offset)
7b2d81d4 628 return 1;
2b144498
SD
629
630 return 0;
631}
632
a905e84e
PZ
633#define __node_2_uprobe(node) \
634 rb_entry((node), struct uprobe, rb_node)
635
636struct __uprobe_key {
637 struct inode *inode;
638 loff_t offset;
639};
640
641static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b)
642{
643 const struct __uprobe_key *a = key;
644 return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b));
645}
646
647static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b)
648{
649 struct uprobe *u = __node_2_uprobe(a);
650 return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b));
651}
652
2b144498
SD
653static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
654{
a905e84e
PZ
655 struct __uprobe_key key = {
656 .inode = inode,
657 .offset = offset,
658 };
659 struct rb_node *node = rb_find(&key, &uprobes_tree, __uprobe_cmp_key);
660
661 if (node)
b0d6d478 662 return get_uprobe(__node_2_uprobe(node));
a905e84e 663
2b144498
SD
664 return NULL;
665}
666
667/*
668 * Find a uprobe corresponding to a given inode:offset
669 * Acquires uprobes_treelock
670 */
671static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
672{
673 struct uprobe *uprobe;
2b144498 674
6f47caa0 675 spin_lock(&uprobes_treelock);
2b144498 676 uprobe = __find_uprobe(inode, offset);
6f47caa0 677 spin_unlock(&uprobes_treelock);
7b2d81d4 678
2b144498
SD
679 return uprobe;
680}
681
682static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
683{
a905e84e 684 struct rb_node *node;
2b144498 685
a905e84e
PZ
686 node = rb_find_add(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp);
687 if (node)
688 return get_uprobe(__node_2_uprobe(node));
2b144498 689
2b144498 690 /* get access + creation ref */
ce59b8e9 691 refcount_set(&uprobe->ref, 2);
a905e84e 692 return NULL;
2b144498
SD
693}
694
695/*
7b2d81d4 696 * Acquire uprobes_treelock.
2b144498
SD
697 * Matching uprobe already exists in rbtree;
698 * increment (access refcount) and return the matching uprobe.
699 *
700 * No matching uprobe; insert the uprobe in rb_tree;
701 * get a double refcount (access + creation) and return NULL.
702 */
703static struct uprobe *insert_uprobe(struct uprobe *uprobe)
704{
2b144498
SD
705 struct uprobe *u;
706
6f47caa0 707 spin_lock(&uprobes_treelock);
2b144498 708 u = __insert_uprobe(uprobe);
6f47caa0 709 spin_unlock(&uprobes_treelock);
7b2d81d4 710
2b144498
SD
711 return u;
712}
713
22bad382
RB
714static void
715ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe)
716{
717 pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx "
718 "ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n",
719 uprobe->inode->i_ino, (unsigned long long) uprobe->offset,
720 (unsigned long long) cur_uprobe->ref_ctr_offset,
721 (unsigned long long) uprobe->ref_ctr_offset);
722}
723
1cc33161
RB
724static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
725 loff_t ref_ctr_offset)
2b144498
SD
726{
727 struct uprobe *uprobe, *cur_uprobe;
728
729 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
730 if (!uprobe)
731 return NULL;
732
61f94203 733 uprobe->inode = inode;
2b144498 734 uprobe->offset = offset;
1cc33161 735 uprobe->ref_ctr_offset = ref_ctr_offset;
e591c8d7 736 init_rwsem(&uprobe->register_rwsem);
2b144498 737 init_rwsem(&uprobe->consumer_rwsem);
2b144498
SD
738
739 /* add to uprobes_tree, sorted on inode:offset */
740 cur_uprobe = insert_uprobe(uprobe);
2b144498
SD
741 /* a uprobe exists for this inode:offset combination */
742 if (cur_uprobe) {
22bad382
RB
743 if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) {
744 ref_ctr_mismatch_warn(cur_uprobe, uprobe);
745 put_uprobe(cur_uprobe);
746 kfree(uprobe);
747 return ERR_PTR(-EINVAL);
748 }
2b144498
SD
749 kfree(uprobe);
750 uprobe = cur_uprobe;
7b2d81d4
IM
751 }
752
2b144498
SD
753 return uprobe;
754}
755
9a98e03c 756static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
2b144498
SD
757{
758 down_write(&uprobe->consumer_rwsem);
e3343e6a
SD
759 uc->next = uprobe->consumers;
760 uprobe->consumers = uc;
2b144498 761 up_write(&uprobe->consumer_rwsem);
2b144498
SD
762}
763
764/*
e3343e6a
SD
765 * For uprobe @uprobe, delete the consumer @uc.
766 * Return true if the @uc is deleted successfully
2b144498
SD
767 * or return false.
768 */
e3343e6a 769static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
2b144498
SD
770{
771 struct uprobe_consumer **con;
772 bool ret = false;
773
774 down_write(&uprobe->consumer_rwsem);
775 for (con = &uprobe->consumers; *con; con = &(*con)->next) {
e3343e6a
SD
776 if (*con == uc) {
777 *con = uc->next;
2b144498
SD
778 ret = true;
779 break;
780 }
781 }
782 up_write(&uprobe->consumer_rwsem);
7b2d81d4 783
2b144498
SD
784 return ret;
785}
786
2ded0980
ON
787static int __copy_insn(struct address_space *mapping, struct file *filp,
788 void *insn, int nbytes, loff_t offset)
2b144498 789{
2b144498 790 struct page *page;
2b144498 791 /*
40814f68 792 * Ensure that the page that has the original instruction is populated
7e0a1265 793 * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(),
40814f68 794 * see uprobe_register().
2b144498 795 */
7e0a1265 796 if (mapping->a_ops->read_folio)
09cbfeaf 797 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
40814f68 798 else
09cbfeaf 799 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
2b144498
SD
800 if (IS_ERR(page))
801 return PTR_ERR(page);
802
2edb7b55 803 copy_from_page(page, offset, insn, nbytes);
09cbfeaf 804 put_page(page);
7b2d81d4 805
2b144498
SD
806 return 0;
807}
808
d436615e 809static int copy_insn(struct uprobe *uprobe, struct file *filp)
2b144498 810{
2ded0980
ON
811 struct address_space *mapping = uprobe->inode->i_mapping;
812 loff_t offs = uprobe->offset;
803200e2
ON
813 void *insn = &uprobe->arch.insn;
814 int size = sizeof(uprobe->arch.insn);
2ded0980
ON
815 int len, err = -EIO;
816
817 /* Copy only available bytes, -EIO if nothing was read */
818 do {
819 if (offs >= i_size_read(uprobe->inode))
820 break;
821
822 len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
823 err = __copy_insn(mapping, filp, insn, len, offs);
fc36f595 824 if (err)
2ded0980
ON
825 break;
826
827 insn += len;
828 offs += len;
829 size -= len;
830 } while (size);
831
832 return err;
2b144498
SD
833}
834
cb9a19fe
ON
835static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
836 struct mm_struct *mm, unsigned long vaddr)
837{
838 int ret = 0;
839
71434f2f 840 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
cb9a19fe
ON
841 return ret;
842
d4d3ccc6
ON
843 /* TODO: move this into _register, until then we abuse this sem. */
844 down_write(&uprobe->consumer_rwsem);
71434f2f 845 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
4710f05f
ON
846 goto out;
847
cb9a19fe
ON
848 ret = copy_insn(uprobe, file);
849 if (ret)
850 goto out;
851
852 ret = -ENOTSUPP;
803200e2 853 if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
cb9a19fe
ON
854 goto out;
855
856 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
857 if (ret)
858 goto out;
859
09d3f015 860 smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
71434f2f 861 set_bit(UPROBE_COPY_INSN, &uprobe->flags);
cb9a19fe
ON
862
863 out:
d4d3ccc6 864 up_write(&uprobe->consumer_rwsem);
4710f05f 865
cb9a19fe
ON
866 return ret;
867}
868
8a7f2fa0
ON
869static inline bool consumer_filter(struct uprobe_consumer *uc,
870 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
806a98bd 871{
8a7f2fa0 872 return !uc->filter || uc->filter(uc, ctx, mm);
806a98bd
ON
873}
874
8a7f2fa0
ON
875static bool filter_chain(struct uprobe *uprobe,
876 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
63633cbf 877{
1ff6fee5
ON
878 struct uprobe_consumer *uc;
879 bool ret = false;
880
881 down_read(&uprobe->consumer_rwsem);
882 for (uc = uprobe->consumers; uc; uc = uc->next) {
8a7f2fa0 883 ret = consumer_filter(uc, ctx, mm);
1ff6fee5
ON
884 if (ret)
885 break;
886 }
887 up_read(&uprobe->consumer_rwsem);
888
889 return ret;
63633cbf
ON
890}
891
e3343e6a
SD
892static int
893install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
816c03fb 894 struct vm_area_struct *vma, unsigned long vaddr)
2b144498 895{
f8ac4ec9 896 bool first_uprobe;
2b144498
SD
897 int ret;
898
cb9a19fe
ON
899 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
900 if (ret)
901 return ret;
682968e0 902
f8ac4ec9
ON
903 /*
904 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
905 * the task can hit this breakpoint right after __replace_page().
906 */
907 first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
908 if (first_uprobe)
909 set_bit(MMF_HAS_UPROBES, &mm->flags);
910
816c03fb 911 ret = set_swbp(&uprobe->arch, mm, vaddr);
9f68f672
ON
912 if (!ret)
913 clear_bit(MMF_RECALC_UPROBES, &mm->flags);
914 else if (first_uprobe)
f8ac4ec9 915 clear_bit(MMF_HAS_UPROBES, &mm->flags);
2b144498
SD
916
917 return ret;
918}
919
076a365b 920static int
816c03fb 921remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
2b144498 922{
9f68f672 923 set_bit(MMF_RECALC_UPROBES, &mm->flags);
076a365b 924 return set_orig_insn(&uprobe->arch, mm, vaddr);
2b144498
SD
925}
926
06b7bcd8
ON
927static inline bool uprobe_is_active(struct uprobe *uprobe)
928{
929 return !RB_EMPTY_NODE(&uprobe->rb_node);
930}
0326f5a9 931/*
778b032d
ON
932 * There could be threads that have already hit the breakpoint. They
933 * will recheck the current insn and restart if find_uprobe() fails.
934 * See find_active_uprobe().
0326f5a9 935 */
2b144498
SD
936static void delete_uprobe(struct uprobe *uprobe)
937{
06b7bcd8
ON
938 if (WARN_ON(!uprobe_is_active(uprobe)))
939 return;
940
6f47caa0 941 spin_lock(&uprobes_treelock);
2b144498 942 rb_erase(&uprobe->rb_node, &uprobes_tree);
6f47caa0 943 spin_unlock(&uprobes_treelock);
06b7bcd8 944 RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
2b144498 945 put_uprobe(uprobe);
2b144498
SD
946}
947
26872090
ON
948struct map_info {
949 struct map_info *next;
950 struct mm_struct *mm;
816c03fb 951 unsigned long vaddr;
26872090
ON
952};
953
954static inline struct map_info *free_map_info(struct map_info *info)
2b144498 955{
26872090
ON
956 struct map_info *next = info->next;
957 kfree(info);
958 return next;
959}
960
961static struct map_info *
962build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
963{
964 unsigned long pgoff = offset >> PAGE_SHIFT;
2b144498 965 struct vm_area_struct *vma;
26872090
ON
966 struct map_info *curr = NULL;
967 struct map_info *prev = NULL;
968 struct map_info *info;
969 int more = 0;
2b144498 970
26872090 971 again:
4a23717a 972 i_mmap_lock_read(mapping);
6b2dbba8 973 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
2b144498
SD
974 if (!valid_vma(vma, is_register))
975 continue;
976
7a5bfb66
ON
977 if (!prev && !more) {
978 /*
c8c06efa 979 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
7a5bfb66
ON
980 * reclaim. This is optimistic, no harm done if it fails.
981 */
982 prev = kmalloc(sizeof(struct map_info),
983 GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
984 if (prev)
985 prev->next = NULL;
986 }
26872090
ON
987 if (!prev) {
988 more++;
989 continue;
2b144498 990 }
2b144498 991
388f7934 992 if (!mmget_not_zero(vma->vm_mm))
26872090 993 continue;
7b2d81d4 994
26872090
ON
995 info = prev;
996 prev = prev->next;
997 info->next = curr;
998 curr = info;
2b144498 999
26872090 1000 info->mm = vma->vm_mm;
57683f72 1001 info->vaddr = offset_to_vaddr(vma, offset);
26872090 1002 }
4a23717a 1003 i_mmap_unlock_read(mapping);
2b144498 1004
26872090
ON
1005 if (!more)
1006 goto out;
1007
1008 prev = curr;
1009 while (curr) {
1010 mmput(curr->mm);
1011 curr = curr->next;
1012 }
7b2d81d4 1013
26872090
ON
1014 do {
1015 info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
1016 if (!info) {
1017 curr = ERR_PTR(-ENOMEM);
1018 goto out;
1019 }
1020 info->next = prev;
1021 prev = info;
1022 } while (--more);
1023
1024 goto again;
1025 out:
1026 while (prev)
1027 prev = free_map_info(prev);
1028 return curr;
2b144498
SD
1029}
1030
bdf8647c
ON
1031static int
1032register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
2b144498 1033{
bdf8647c 1034 bool is_register = !!new;
26872090
ON
1035 struct map_info *info;
1036 int err = 0;
2b144498 1037
32cdba1e 1038 percpu_down_write(&dup_mmap_sem);
26872090
ON
1039 info = build_map_info(uprobe->inode->i_mapping,
1040 uprobe->offset, is_register);
32cdba1e
ON
1041 if (IS_ERR(info)) {
1042 err = PTR_ERR(info);
1043 goto out;
1044 }
7b2d81d4 1045
26872090
ON
1046 while (info) {
1047 struct mm_struct *mm = info->mm;
1048 struct vm_area_struct *vma;
7b2d81d4 1049
076a365b 1050 if (err && is_register)
26872090 1051 goto free;
7b2d81d4 1052
d8ed45c5 1053 mmap_write_lock(mm);
f4d6dfe5
ON
1054 vma = find_vma(mm, info->vaddr);
1055 if (!vma || !valid_vma(vma, is_register) ||
f281769e 1056 file_inode(vma->vm_file) != uprobe->inode)
26872090
ON
1057 goto unlock;
1058
f4d6dfe5
ON
1059 if (vma->vm_start > info->vaddr ||
1060 vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
26872090 1061 goto unlock;
2b144498 1062
806a98bd
ON
1063 if (is_register) {
1064 /* consult only the "caller", new consumer. */
bdf8647c 1065 if (consumer_filter(new,
8a7f2fa0 1066 UPROBE_FILTER_REGISTER, mm))
806a98bd
ON
1067 err = install_breakpoint(uprobe, mm, vma, info->vaddr);
1068 } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
8a7f2fa0
ON
1069 if (!filter_chain(uprobe,
1070 UPROBE_FILTER_UNREGISTER, mm))
806a98bd
ON
1071 err |= remove_breakpoint(uprobe, mm, info->vaddr);
1072 }
78f74116 1073
26872090 1074 unlock:
d8ed45c5 1075 mmap_write_unlock(mm);
26872090
ON
1076 free:
1077 mmput(mm);
1078 info = free_map_info(info);
2b144498 1079 }
32cdba1e
ON
1080 out:
1081 percpu_up_write(&dup_mmap_sem);
26872090 1082 return err;
2b144498
SD
1083}
1084
38e967ae
RB
1085static void
1086__uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
2b144498 1087{
04aab9b2
ON
1088 int err;
1089
06d07139 1090 if (WARN_ON(!consumer_del(uprobe, uc)))
04aab9b2 1091 return;
2b144498 1092
bdf8647c 1093 err = register_for_each_vma(uprobe, NULL);
bb929284
ON
1094 /* TODO : cant unregister? schedule a worker thread */
1095 if (!uprobe->consumers && !err)
1096 delete_uprobe(uprobe);
2b144498
SD
1097}
1098
1099/*
7140ad38 1100 * uprobe_unregister - unregister an already registered probe.
38e967ae
RB
1101 * @inode: the file in which the probe has to be removed.
1102 * @offset: offset from the start of the file.
1103 * @uc: identify which probe if multiple probes are colocated.
1104 */
1105void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
1106{
1107 struct uprobe *uprobe;
1108
1109 uprobe = find_uprobe(inode, offset);
1110 if (WARN_ON(!uprobe))
1111 return;
1112
1113 down_write(&uprobe->register_rwsem);
1114 __uprobe_unregister(uprobe, uc);
1115 up_write(&uprobe->register_rwsem);
1116 put_uprobe(uprobe);
1117}
1118EXPORT_SYMBOL_GPL(uprobe_unregister);
1119
1120/*
1121 * __uprobe_register - register a probe
2b144498
SD
1122 * @inode: the file in which the probe has to be placed.
1123 * @offset: offset from the start of the file.
e3343e6a 1124 * @uc: information on howto handle the probe..
2b144498 1125 *
38e967ae 1126 * Apart from the access refcount, __uprobe_register() takes a creation
2b144498
SD
1127 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
1128 * inserted into the rbtree (i.e first consumer for a @inode:@offset
7b2d81d4 1129 * tuple). Creation refcount stops uprobe_unregister from freeing the
2b144498 1130 * @uprobe even before the register operation is complete. Creation
e3343e6a 1131 * refcount is released when the last @uc for the @uprobe
38e967ae 1132 * unregisters. Caller of __uprobe_register() is required to keep @inode
61f94203 1133 * (and the containing mount) referenced.
2b144498
SD
1134 *
1135 * Return errno if it cannot successully install probes
1136 * else return 0 (success)
1137 */
38e967ae 1138static int __uprobe_register(struct inode *inode, loff_t offset,
1cc33161 1139 loff_t ref_ctr_offset, struct uprobe_consumer *uc)
2b144498
SD
1140{
1141 struct uprobe *uprobe;
7b2d81d4 1142 int ret;
2b144498 1143
ea024870
AA
1144 /* Uprobe must have at least one set consumer */
1145 if (!uc->handler && !uc->ret_handler)
1146 return -EINVAL;
1147
40814f68 1148 /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
5efe7448 1149 if (!inode->i_mapping->a_ops->read_folio &&
5efe7448 1150 !shmem_mapping(inode->i_mapping))
41ccba02 1151 return -EIO;
f0744af7 1152 /* Racy, just to catch the obvious mistakes */
2b144498 1153 if (offset > i_size_read(inode))
7b2d81d4 1154 return -EINVAL;
2b144498 1155
013b2deb
ON
1156 /*
1157 * This ensures that copy_from_page(), copy_to_page() and
1158 * __update_ref_ctr() can't cross page boundary.
1159 */
1160 if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE))
1161 return -EINVAL;
1162 if (!IS_ALIGNED(ref_ctr_offset, sizeof(short)))
1163 return -EINVAL;
1164
66d06dff 1165 retry:
1cc33161 1166 uprobe = alloc_uprobe(inode, offset, ref_ctr_offset);
66d06dff
ON
1167 if (!uprobe)
1168 return -ENOMEM;
22bad382
RB
1169 if (IS_ERR(uprobe))
1170 return PTR_ERR(uprobe);
1171
66d06dff
ON
1172 /*
1173 * We can race with uprobe_unregister()->delete_uprobe().
1174 * Check uprobe_is_active() and retry if it is false.
1175 */
1176 down_write(&uprobe->register_rwsem);
1177 ret = -EAGAIN;
1178 if (likely(uprobe_is_active(uprobe))) {
38e967ae
RB
1179 consumer_add(uprobe, uc);
1180 ret = register_for_each_vma(uprobe, uc);
9a98e03c 1181 if (ret)
04aab9b2 1182 __uprobe_unregister(uprobe, uc);
2b144498 1183 }
66d06dff
ON
1184 up_write(&uprobe->register_rwsem);
1185 put_uprobe(uprobe);
2b144498 1186
66d06dff
ON
1187 if (unlikely(ret == -EAGAIN))
1188 goto retry;
2b144498
SD
1189 return ret;
1190}
38e967ae
RB
1191
1192int uprobe_register(struct inode *inode, loff_t offset,
1193 struct uprobe_consumer *uc)
1194{
1cc33161 1195 return __uprobe_register(inode, offset, 0, uc);
38e967ae 1196}
e8440c14 1197EXPORT_SYMBOL_GPL(uprobe_register);
2b144498 1198
1cc33161
RB
1199int uprobe_register_refctr(struct inode *inode, loff_t offset,
1200 loff_t ref_ctr_offset, struct uprobe_consumer *uc)
1201{
1202 return __uprobe_register(inode, offset, ref_ctr_offset, uc);
1203}
1204EXPORT_SYMBOL_GPL(uprobe_register_refctr);
1205
bdf8647c 1206/*
788faab7 1207 * uprobe_apply - unregister an already registered probe.
bdf8647c
ON
1208 * @inode: the file in which the probe has to be removed.
1209 * @offset: offset from the start of the file.
1210 * @uc: consumer which wants to add more or remove some breakpoints
1211 * @add: add or remove the breakpoints
1212 */
1213int uprobe_apply(struct inode *inode, loff_t offset,
1214 struct uprobe_consumer *uc, bool add)
1215{
1216 struct uprobe *uprobe;
1217 struct uprobe_consumer *con;
1218 int ret = -ENOENT;
1219
1220 uprobe = find_uprobe(inode, offset);
06d07139 1221 if (WARN_ON(!uprobe))
bdf8647c
ON
1222 return ret;
1223
1224 down_write(&uprobe->register_rwsem);
1225 for (con = uprobe->consumers; con && con != uc ; con = con->next)
1226 ;
1227 if (con)
1228 ret = register_for_each_vma(uprobe, add ? uc : NULL);
1229 up_write(&uprobe->register_rwsem);
1230 put_uprobe(uprobe);
1231
1232 return ret;
1233}
1234
da1816b1
ON
1235static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
1236{
fcb72a58 1237 VMA_ITERATOR(vmi, mm, 0);
da1816b1
ON
1238 struct vm_area_struct *vma;
1239 int err = 0;
1240
d8ed45c5 1241 mmap_read_lock(mm);
fcb72a58 1242 for_each_vma(vmi, vma) {
da1816b1
ON
1243 unsigned long vaddr;
1244 loff_t offset;
1245
1246 if (!valid_vma(vma, false) ||
f281769e 1247 file_inode(vma->vm_file) != uprobe->inode)
da1816b1
ON
1248 continue;
1249
1250 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1251 if (uprobe->offset < offset ||
1252 uprobe->offset >= offset + vma->vm_end - vma->vm_start)
1253 continue;
1254
1255 vaddr = offset_to_vaddr(vma, uprobe->offset);
1256 err |= remove_breakpoint(uprobe, mm, vaddr);
1257 }
d8ed45c5 1258 mmap_read_unlock(mm);
da1816b1
ON
1259
1260 return err;
1261}
1262
891c3970
ON
1263static struct rb_node *
1264find_node_in_range(struct inode *inode, loff_t min, loff_t max)
2b144498 1265{
2b144498 1266 struct rb_node *n = uprobes_tree.rb_node;
2b144498
SD
1267
1268 while (n) {
891c3970 1269 struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
2b144498 1270
891c3970 1271 if (inode < u->inode) {
2b144498 1272 n = n->rb_left;
891c3970 1273 } else if (inode > u->inode) {
2b144498 1274 n = n->rb_right;
891c3970
ON
1275 } else {
1276 if (max < u->offset)
1277 n = n->rb_left;
1278 else if (min > u->offset)
1279 n = n->rb_right;
1280 else
1281 break;
1282 }
2b144498 1283 }
7b2d81d4 1284
891c3970 1285 return n;
2b144498
SD
1286}
1287
1288/*
891c3970 1289 * For a given range in vma, build a list of probes that need to be inserted.
2b144498 1290 */
891c3970
ON
1291static void build_probe_list(struct inode *inode,
1292 struct vm_area_struct *vma,
1293 unsigned long start, unsigned long end,
1294 struct list_head *head)
2b144498 1295{
891c3970 1296 loff_t min, max;
891c3970
ON
1297 struct rb_node *n, *t;
1298 struct uprobe *u;
7b2d81d4 1299
891c3970 1300 INIT_LIST_HEAD(head);
cb113b47 1301 min = vaddr_to_offset(vma, start);
891c3970 1302 max = min + (end - start) - 1;
2b144498 1303
6f47caa0 1304 spin_lock(&uprobes_treelock);
891c3970
ON
1305 n = find_node_in_range(inode, min, max);
1306 if (n) {
1307 for (t = n; t; t = rb_prev(t)) {
1308 u = rb_entry(t, struct uprobe, rb_node);
1309 if (u->inode != inode || u->offset < min)
1310 break;
1311 list_add(&u->pending_list, head);
f231722a 1312 get_uprobe(u);
891c3970
ON
1313 }
1314 for (t = n; (t = rb_next(t)); ) {
1315 u = rb_entry(t, struct uprobe, rb_node);
1316 if (u->inode != inode || u->offset > max)
1317 break;
1318 list_add(&u->pending_list, head);
f231722a 1319 get_uprobe(u);
891c3970 1320 }
2b144498 1321 }
6f47caa0 1322 spin_unlock(&uprobes_treelock);
2b144498
SD
1323}
1324
1cc33161
RB
1325/* @vma contains reference counter, not the probed instruction. */
1326static int delayed_ref_ctr_inc(struct vm_area_struct *vma)
1327{
1328 struct list_head *pos, *q;
1329 struct delayed_uprobe *du;
1330 unsigned long vaddr;
1331 int ret = 0, err = 0;
1332
1333 mutex_lock(&delayed_uprobe_lock);
1334 list_for_each_safe(pos, q, &delayed_uprobe_list) {
1335 du = list_entry(pos, struct delayed_uprobe, list);
1336
1337 if (du->mm != vma->vm_mm ||
1338 !valid_ref_ctr_vma(du->uprobe, vma))
1339 continue;
1340
1341 vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset);
1342 ret = __update_ref_ctr(vma->vm_mm, vaddr, 1);
1343 if (ret) {
1344 update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1);
1345 if (!err)
1346 err = ret;
1347 }
1348 delayed_uprobe_delete(du);
1349 }
1350 mutex_unlock(&delayed_uprobe_lock);
1351 return err;
1352}
1353
2b144498 1354/*
c1e8d7c6 1355 * Called from mmap_region/vma_adjust with mm->mmap_lock acquired.
2b144498 1356 *
5e5be71a
ON
1357 * Currently we ignore all errors and always return 0, the callers
1358 * can't handle the failure anyway.
2b144498 1359 */
7b2d81d4 1360int uprobe_mmap(struct vm_area_struct *vma)
2b144498
SD
1361{
1362 struct list_head tmp_list;
665605a2 1363 struct uprobe *uprobe, *u;
2b144498 1364 struct inode *inode;
2b144498 1365
1cc33161
RB
1366 if (no_uprobe_events())
1367 return 0;
1368
1369 if (vma->vm_file &&
1370 (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
1371 test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags))
1372 delayed_ref_ctr_inc(vma);
1373
1374 if (!valid_vma(vma, true))
7b2d81d4 1375 return 0;
2b144498 1376
f281769e 1377 inode = file_inode(vma->vm_file);
2b144498 1378 if (!inode)
7b2d81d4 1379 return 0;
2b144498 1380
2b144498 1381 mutex_lock(uprobes_mmap_hash(inode));
891c3970 1382 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
806a98bd
ON
1383 /*
1384 * We can race with uprobe_unregister(), this uprobe can be already
1385 * removed. But in this case filter_chain() must return false, all
1386 * consumers have gone away.
1387 */
665605a2 1388 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
806a98bd 1389 if (!fatal_signal_pending(current) &&
8a7f2fa0 1390 filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
57683f72 1391 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
5e5be71a 1392 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
2b144498
SD
1393 }
1394 put_uprobe(uprobe);
1395 }
2b144498
SD
1396 mutex_unlock(uprobes_mmap_hash(inode));
1397
5e5be71a 1398 return 0;
2b144498
SD
1399}
1400
9f68f672
ON
1401static bool
1402vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1403{
1404 loff_t min, max;
1405 struct inode *inode;
1406 struct rb_node *n;
1407
f281769e 1408 inode = file_inode(vma->vm_file);
9f68f672
ON
1409
1410 min = vaddr_to_offset(vma, start);
1411 max = min + (end - start) - 1;
1412
1413 spin_lock(&uprobes_treelock);
1414 n = find_node_in_range(inode, min, max);
1415 spin_unlock(&uprobes_treelock);
1416
1417 return !!n;
1418}
1419
682968e0
SD
1420/*
1421 * Called in context of a munmap of a vma.
1422 */
cbc91f71 1423void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
682968e0 1424{
441f1eb7 1425 if (no_uprobe_events() || !valid_vma(vma, false))
682968e0
SD
1426 return;
1427
2fd611a9
ON
1428 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1429 return;
1430
9f68f672
ON
1431 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1432 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
f8ac4ec9
ON
1433 return;
1434
9f68f672
ON
1435 if (vma_has_uprobes(vma, start, end))
1436 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
682968e0
SD
1437}
1438
d4b3b638 1439/* Slot allocation for XOL */
6441ec8b 1440static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
d4b3b638 1441{
704bde3c
ON
1442 struct vm_area_struct *vma;
1443 int ret;
d4b3b638 1444
d8ed45c5 1445 if (mmap_write_lock_killable(mm))
598fdc1d
MH
1446 return -EINTR;
1447
704bde3c
ON
1448 if (mm->uprobes_state.xol_area) {
1449 ret = -EALREADY;
d4b3b638 1450 goto fail;
704bde3c 1451 }
d4b3b638 1452
af0d95af
ON
1453 if (!area->vaddr) {
1454 /* Try to map as high as possible, this is only a hint. */
1455 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1456 PAGE_SIZE, 0, 0);
ff68dac6 1457 if (IS_ERR_VALUE(area->vaddr)) {
af0d95af
ON
1458 ret = area->vaddr;
1459 goto fail;
1460 }
d4b3b638
SD
1461 }
1462
704bde3c
ON
1463 vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1464 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1465 &area->xol_mapping);
1466 if (IS_ERR(vma)) {
1467 ret = PTR_ERR(vma);
d4b3b638 1468 goto fail;
704bde3c 1469 }
d4b3b638 1470
704bde3c 1471 ret = 0;
5c6338b4
PM
1472 /* pairs with get_xol_area() */
1473 smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
c8a82538 1474 fail:
d8ed45c5 1475 mmap_write_unlock(mm);
d4b3b638
SD
1476
1477 return ret;
1478}
1479
af0d95af 1480static struct xol_area *__create_xol_area(unsigned long vaddr)
d4b3b638 1481{
9b545df8 1482 struct mm_struct *mm = current->mm;
e78aebfd 1483 uprobe_opcode_t insn = UPROBE_SWBP_INSN;
6441ec8b 1484 struct xol_area *area;
9b545df8 1485
af0d95af 1486 area = kmalloc(sizeof(*area), GFP_KERNEL);
d4b3b638 1487 if (unlikely(!area))
c8a82538 1488 goto out;
d4b3b638 1489
6396bb22
KC
1490 area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long),
1491 GFP_KERNEL);
d4b3b638 1492 if (!area->bitmap)
c8a82538
ON
1493 goto free_area;
1494
704bde3c 1495 area->xol_mapping.name = "[uprobes]";
869ae761 1496 area->xol_mapping.fault = NULL;
704bde3c 1497 area->xol_mapping.pages = area->pages;
f58bea2f
ON
1498 area->pages[0] = alloc_page(GFP_HIGHUSER);
1499 if (!area->pages[0])
c8a82538 1500 goto free_bitmap;
f58bea2f 1501 area->pages[1] = NULL;
d4b3b638 1502
af0d95af 1503 area->vaddr = vaddr;
6441ec8b
ON
1504 init_waitqueue_head(&area->wq);
1505 /* Reserve the 1st slot for get_trampoline_vaddr() */
e78aebfd 1506 set_bit(0, area->bitmap);
e78aebfd 1507 atomic_set(&area->slot_count, 1);
297e765e 1508 arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
e78aebfd 1509
6441ec8b 1510 if (!xol_add_vma(mm, area))
d4b3b638
SD
1511 return area;
1512
f58bea2f 1513 __free_page(area->pages[0]);
c8a82538 1514 free_bitmap:
d4b3b638 1515 kfree(area->bitmap);
c8a82538 1516 free_area:
d4b3b638 1517 kfree(area);
c8a82538 1518 out:
6441ec8b
ON
1519 return NULL;
1520}
1521
1522/*
1523 * get_xol_area - Allocate process's xol_area if necessary.
1524 * This area will be used for storing instructions for execution out of line.
1525 *
1526 * Returns the allocated area or NULL.
1527 */
1528static struct xol_area *get_xol_area(void)
1529{
1530 struct mm_struct *mm = current->mm;
1531 struct xol_area *area;
1532
1533 if (!mm->uprobes_state.xol_area)
af0d95af 1534 __create_xol_area(0);
6441ec8b 1535
5c6338b4
PM
1536 /* Pairs with xol_add_vma() smp_store_release() */
1537 area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */
9b545df8 1538 return area;
d4b3b638
SD
1539}
1540
1541/*
1542 * uprobe_clear_state - Free the area allocated for slots.
1543 */
1544void uprobe_clear_state(struct mm_struct *mm)
1545{
1546 struct xol_area *area = mm->uprobes_state.xol_area;
1547
1cc33161
RB
1548 mutex_lock(&delayed_uprobe_lock);
1549 delayed_uprobe_remove(NULL, mm);
1550 mutex_unlock(&delayed_uprobe_lock);
1551
d4b3b638
SD
1552 if (!area)
1553 return;
1554
f58bea2f 1555 put_page(area->pages[0]);
d4b3b638
SD
1556 kfree(area->bitmap);
1557 kfree(area);
1558}
1559
32cdba1e
ON
1560void uprobe_start_dup_mmap(void)
1561{
1562 percpu_down_read(&dup_mmap_sem);
1563}
1564
1565void uprobe_end_dup_mmap(void)
1566{
1567 percpu_up_read(&dup_mmap_sem);
1568}
1569
f8ac4ec9
ON
1570void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1571{
9f68f672 1572 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
f8ac4ec9 1573 set_bit(MMF_HAS_UPROBES, &newmm->flags);
9f68f672
ON
1574 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1575 set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1576 }
f8ac4ec9
ON
1577}
1578
d4b3b638
SD
1579/*
1580 * - search for a free slot.
1581 */
1582static unsigned long xol_take_insn_slot(struct xol_area *area)
1583{
1584 unsigned long slot_addr;
1585 int slot_nr;
1586
1587 do {
1588 slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1589 if (slot_nr < UINSNS_PER_PAGE) {
1590 if (!test_and_set_bit(slot_nr, area->bitmap))
1591 break;
1592
1593 slot_nr = UINSNS_PER_PAGE;
1594 continue;
1595 }
1596 wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1597 } while (slot_nr >= UINSNS_PER_PAGE);
1598
1599 slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1600 atomic_inc(&area->slot_count);
1601
1602 return slot_addr;
1603}
1604
1605/*
a6cb3f6d 1606 * xol_get_insn_slot - allocate a slot for xol.
d4b3b638
SD
1607 * Returns the allocated slot address or 0.
1608 */
a6cb3f6d 1609static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
d4b3b638
SD
1610{
1611 struct xol_area *area;
a6cb3f6d 1612 unsigned long xol_vaddr;
d4b3b638 1613
9b545df8
ON
1614 area = get_xol_area();
1615 if (!area)
1616 return 0;
d4b3b638 1617
a6cb3f6d
ON
1618 xol_vaddr = xol_take_insn_slot(area);
1619 if (unlikely(!xol_vaddr))
d4b3b638
SD
1620 return 0;
1621
f58bea2f 1622 arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
72e6ae28 1623 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
d4b3b638 1624
a6cb3f6d 1625 return xol_vaddr;
d4b3b638
SD
1626}
1627
1628/*
1629 * xol_free_insn_slot - If slot was earlier allocated by
1630 * @xol_get_insn_slot(), make the slot available for
1631 * subsequent requests.
1632 */
1633static void xol_free_insn_slot(struct task_struct *tsk)
1634{
1635 struct xol_area *area;
1636 unsigned long vma_end;
1637 unsigned long slot_addr;
1638
1639 if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1640 return;
1641
1642 slot_addr = tsk->utask->xol_vaddr;
af4355e9 1643 if (unlikely(!slot_addr))
d4b3b638
SD
1644 return;
1645
1646 area = tsk->mm->uprobes_state.xol_area;
1647 vma_end = area->vaddr + PAGE_SIZE;
1648 if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1649 unsigned long offset;
1650 int slot_nr;
1651
1652 offset = slot_addr - area->vaddr;
1653 slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1654 if (slot_nr >= UINSNS_PER_PAGE)
1655 return;
1656
1657 clear_bit(slot_nr, area->bitmap);
1658 atomic_dec(&area->slot_count);
2a742ced 1659 smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
d4b3b638
SD
1660 if (waitqueue_active(&area->wq))
1661 wake_up(&area->wq);
1662
1663 tsk->utask->xol_vaddr = 0;
1664 }
1665}
1666
72e6ae28
VK
1667void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1668 void *src, unsigned long len)
1669{
1670 /* Initialize the slot */
1671 copy_to_page(page, vaddr, src, len);
1672
1673 /*
885f7f8e 1674 * We probably need flush_icache_user_page() but it needs vma.
72e6ae28
VK
1675 * This should work on most of architectures by default. If
1676 * architecture needs to do something different it can define
1677 * its own version of the function.
1678 */
1679 flush_dcache_page(page);
1680}
1681
0326f5a9
SD
1682/**
1683 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1684 * @regs: Reflects the saved state of the task after it has hit a breakpoint
1685 * instruction.
1686 * Return the address of the breakpoint instruction.
1687 */
1688unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1689{
1690 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1691}
1692
b02ef20a
ON
1693unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1694{
1695 struct uprobe_task *utask = current->utask;
1696
1697 if (unlikely(utask && utask->active_uprobe))
1698 return utask->vaddr;
1699
1700 return instruction_pointer(regs);
1701}
1702
2bb5e840
ON
1703static struct return_instance *free_ret_instance(struct return_instance *ri)
1704{
1705 struct return_instance *next = ri->next;
1706 put_uprobe(ri->uprobe);
1707 kfree(ri);
1708 return next;
1709}
1710
0326f5a9
SD
1711/*
1712 * Called with no locks held.
788faab7 1713 * Called in context of an exiting or an exec-ing thread.
0326f5a9
SD
1714 */
1715void uprobe_free_utask(struct task_struct *t)
1716{
1717 struct uprobe_task *utask = t->utask;
2bb5e840 1718 struct return_instance *ri;
0326f5a9 1719
0326f5a9
SD
1720 if (!utask)
1721 return;
1722
1723 if (utask->active_uprobe)
1724 put_uprobe(utask->active_uprobe);
1725
0dfd0eb8 1726 ri = utask->return_instances;
2bb5e840
ON
1727 while (ri)
1728 ri = free_ret_instance(ri);
0dfd0eb8 1729
d4b3b638 1730 xol_free_insn_slot(t);
0326f5a9
SD
1731 kfree(utask);
1732 t->utask = NULL;
1733}
1734
0326f5a9 1735/*
c034f48e 1736 * Allocate a uprobe_task object for the task if necessary.
5a2df662 1737 * Called when the thread hits a breakpoint.
0326f5a9
SD
1738 *
1739 * Returns:
1740 * - pointer to new uprobe_task on success
1741 * - NULL otherwise
1742 */
5a2df662 1743static struct uprobe_task *get_utask(void)
0326f5a9 1744{
5a2df662
ON
1745 if (!current->utask)
1746 current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1747 return current->utask;
0326f5a9
SD
1748}
1749
248d3a7b
ON
1750static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1751{
1752 struct uprobe_task *n_utask;
1753 struct return_instance **p, *o, *n;
1754
1755 n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1756 if (!n_utask)
1757 return -ENOMEM;
1758 t->utask = n_utask;
1759
1760 p = &n_utask->return_instances;
1761 for (o = o_utask->return_instances; o; o = o->next) {
1762 n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1763 if (!n)
1764 return -ENOMEM;
1765
1766 *n = *o;
f231722a 1767 get_uprobe(n->uprobe);
248d3a7b
ON
1768 n->next = NULL;
1769
1770 *p = n;
1771 p = &n->next;
1772 n_utask->depth++;
1773 }
1774
1775 return 0;
1776}
1777
1778static void uprobe_warn(struct task_struct *t, const char *msg)
1779{
1780 pr_warn("uprobe: %s:%d failed to %s\n",
1781 current->comm, current->pid, msg);
1782}
1783
aa59c53f
ON
1784static void dup_xol_work(struct callback_head *work)
1785{
aa59c53f
ON
1786 if (current->flags & PF_EXITING)
1787 return;
1788
598fdc1d
MH
1789 if (!__create_xol_area(current->utask->dup_xol_addr) &&
1790 !fatal_signal_pending(current))
aa59c53f
ON
1791 uprobe_warn(current, "dup xol area");
1792}
1793
b68e0749
ON
1794/*
1795 * Called in context of a new clone/fork from copy_process.
1796 */
3ab67966 1797void uprobe_copy_process(struct task_struct *t, unsigned long flags)
b68e0749 1798{
248d3a7b
ON
1799 struct uprobe_task *utask = current->utask;
1800 struct mm_struct *mm = current->mm;
aa59c53f 1801 struct xol_area *area;
248d3a7b 1802
b68e0749 1803 t->utask = NULL;
248d3a7b 1804
3ab67966
ON
1805 if (!utask || !utask->return_instances)
1806 return;
1807
1808 if (mm == t->mm && !(flags & CLONE_VFORK))
248d3a7b
ON
1809 return;
1810
1811 if (dup_utask(t, utask))
1812 return uprobe_warn(t, "dup ret instances");
aa59c53f
ON
1813
1814 /* The task can fork() after dup_xol_work() fails */
1815 area = mm->uprobes_state.xol_area;
1816 if (!area)
1817 return uprobe_warn(t, "dup xol area");
1818
3ab67966
ON
1819 if (mm == t->mm)
1820 return;
1821
32473431
ON
1822 t->utask->dup_xol_addr = area->vaddr;
1823 init_task_work(&t->utask->dup_xol_work, dup_xol_work);
91989c70 1824 task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME);
b68e0749
ON
1825}
1826
e78aebfd
AA
1827/*
1828 * Current area->vaddr notion assume the trampoline address is always
1829 * equal area->vaddr.
1830 *
1831 * Returns -1 in case the xol_area is not allocated.
1832 */
1833static unsigned long get_trampoline_vaddr(void)
1834{
1835 struct xol_area *area;
1836 unsigned long trampoline_vaddr = -1;
1837
5c6338b4
PM
1838 /* Pairs with xol_add_vma() smp_store_release() */
1839 area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */
e78aebfd
AA
1840 if (area)
1841 trampoline_vaddr = area->vaddr;
1842
1843 return trampoline_vaddr;
1844}
1845
db087ef6
ON
1846static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
1847 struct pt_regs *regs)
a5b7e1a8
ON
1848{
1849 struct return_instance *ri = utask->return_instances;
db087ef6 1850 enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
86dcb702
ON
1851
1852 while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
a5b7e1a8
ON
1853 ri = free_ret_instance(ri);
1854 utask->depth--;
1855 }
1856 utask->return_instances = ri;
1857}
1858
0dfd0eb8
AA
1859static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
1860{
1861 struct return_instance *ri;
1862 struct uprobe_task *utask;
1863 unsigned long orig_ret_vaddr, trampoline_vaddr;
db087ef6 1864 bool chained;
0dfd0eb8
AA
1865
1866 if (!get_xol_area())
1867 return;
1868
1869 utask = get_utask();
1870 if (!utask)
1871 return;
1872
ded49c55
AA
1873 if (utask->depth >= MAX_URETPROBE_DEPTH) {
1874 printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1875 " nestedness limit pid/tgid=%d/%d\n",
1876 current->pid, current->tgid);
1877 return;
1878 }
1879
6c58d0e4 1880 ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
0dfd0eb8 1881 if (!ri)
6c58d0e4 1882 return;
0dfd0eb8
AA
1883
1884 trampoline_vaddr = get_trampoline_vaddr();
1885 orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
1886 if (orig_ret_vaddr == -1)
1887 goto fail;
1888
a5b7e1a8 1889 /* drop the entries invalidated by longjmp() */
db087ef6
ON
1890 chained = (orig_ret_vaddr == trampoline_vaddr);
1891 cleanup_return_instances(utask, chained, regs);
a5b7e1a8 1892
0dfd0eb8
AA
1893 /*
1894 * We don't want to keep trampoline address in stack, rather keep the
1895 * original return address of first caller thru all the consequent
1896 * instances. This also makes breakpoint unwrapping easier.
1897 */
db087ef6 1898 if (chained) {
0dfd0eb8
AA
1899 if (!utask->return_instances) {
1900 /*
1901 * This situation is not possible. Likely we have an
1902 * attack from user-space.
1903 */
6c58d0e4 1904 uprobe_warn(current, "handle tail call");
0dfd0eb8
AA
1905 goto fail;
1906 }
0dfd0eb8
AA
1907 orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
1908 }
1909
f231722a 1910 ri->uprobe = get_uprobe(uprobe);
0dfd0eb8 1911 ri->func = instruction_pointer(regs);
7b868e48 1912 ri->stack = user_stack_pointer(regs);
0dfd0eb8
AA
1913 ri->orig_ret_vaddr = orig_ret_vaddr;
1914 ri->chained = chained;
1915
ded49c55 1916 utask->depth++;
0dfd0eb8
AA
1917 ri->next = utask->return_instances;
1918 utask->return_instances = ri;
1919
1920 return;
0dfd0eb8
AA
1921 fail:
1922 kfree(ri);
1923}
1924
0326f5a9
SD
1925/* Prepare to single-step probed instruction out of line. */
1926static int
a6cb3f6d 1927pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
0326f5a9 1928{
a6cb3f6d
ON
1929 struct uprobe_task *utask;
1930 unsigned long xol_vaddr;
aba51024 1931 int err;
a6cb3f6d 1932
608e7427
ON
1933 utask = get_utask();
1934 if (!utask)
1935 return -ENOMEM;
a6cb3f6d
ON
1936
1937 xol_vaddr = xol_get_insn_slot(uprobe);
1938 if (!xol_vaddr)
1939 return -ENOMEM;
1940
1941 utask->xol_vaddr = xol_vaddr;
1942 utask->vaddr = bp_vaddr;
d4b3b638 1943
aba51024
ON
1944 err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1945 if (unlikely(err)) {
1946 xol_free_insn_slot(current);
1947 return err;
1948 }
1949
608e7427
ON
1950 utask->active_uprobe = uprobe;
1951 utask->state = UTASK_SSTEP;
aba51024 1952 return 0;
0326f5a9
SD
1953}
1954
1955/*
1956 * If we are singlestepping, then ensure this thread is not connected to
1957 * non-fatal signals until completion of singlestep. When xol insn itself
1958 * triggers the signal, restart the original insn even if the task is
1959 * already SIGKILL'ed (since coredump should report the correct ip). This
1960 * is even more important if the task has a handler for SIGSEGV/etc, The
1961 * _same_ instruction should be repeated again after return from the signal
1962 * handler, and SSTEP can never finish in this case.
1963 */
1964bool uprobe_deny_signal(void)
1965{
1966 struct task_struct *t = current;
1967 struct uprobe_task *utask = t->utask;
1968
1969 if (likely(!utask || !utask->active_uprobe))
1970 return false;
1971
1972 WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1973
5c251e9d 1974 if (task_sigpending(t)) {
0326f5a9
SD
1975 spin_lock_irq(&t->sighand->siglock);
1976 clear_tsk_thread_flag(t, TIF_SIGPENDING);
1977 spin_unlock_irq(&t->sighand->siglock);
1978
1979 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1980 utask->state = UTASK_SSTEP_TRAPPED;
1981 set_tsk_thread_flag(t, TIF_UPROBE);
0326f5a9
SD
1982 }
1983 }
1984
1985 return true;
1986}
1987
499a4f3e
ON
1988static void mmf_recalc_uprobes(struct mm_struct *mm)
1989{
fcb72a58 1990 VMA_ITERATOR(vmi, mm, 0);
499a4f3e
ON
1991 struct vm_area_struct *vma;
1992
fcb72a58 1993 for_each_vma(vmi, vma) {
499a4f3e
ON
1994 if (!valid_vma(vma, false))
1995 continue;
1996 /*
1997 * This is not strictly accurate, we can race with
1998 * uprobe_unregister() and see the already removed
1999 * uprobe if delete_uprobe() was not yet called.
63633cbf 2000 * Or this uprobe can be filtered out.
499a4f3e
ON
2001 */
2002 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
2003 return;
2004 }
2005
2006 clear_bit(MMF_HAS_UPROBES, &mm->flags);
2007}
2008
0908ad6e 2009static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
ec75fba9
ON
2010{
2011 struct page *page;
2012 uprobe_opcode_t opcode;
2013 int result;
2014
013b2deb
ON
2015 if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE)))
2016 return -EINVAL;
2017
ec75fba9 2018 pagefault_disable();
bd28b145 2019 result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
ec75fba9
ON
2020 pagefault_enable();
2021
2022 if (likely(result == 0))
2023 goto out;
2024
1e987790
DH
2025 /*
2026 * The NULL 'tsk' here ensures that any faults that occur here
2027 * will not be accounted to the task. 'mm' *is* current->mm,
2028 * but we treat this as a 'remote' access since it is
2029 * essentially a kernel access to the memory.
2030 */
64019a2e 2031 result = get_user_pages_remote(mm, vaddr, 1, FOLL_FORCE, &page,
5b56d49f 2032 NULL, NULL);
ec75fba9
ON
2033 if (result < 0)
2034 return result;
2035
ab0d805c 2036 copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
ec75fba9
ON
2037 put_page(page);
2038 out:
0908ad6e
AM
2039 /* This needs to return true for any variant of the trap insn */
2040 return is_trap_insn(&opcode);
ec75fba9
ON
2041}
2042
d790d346 2043static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
0326f5a9 2044{
3a9ea052
ON
2045 struct mm_struct *mm = current->mm;
2046 struct uprobe *uprobe = NULL;
0326f5a9 2047 struct vm_area_struct *vma;
0326f5a9 2048
d8ed45c5 2049 mmap_read_lock(mm);
9016dded
LH
2050 vma = vma_lookup(mm, bp_vaddr);
2051 if (vma) {
3a9ea052 2052 if (valid_vma(vma, false)) {
f281769e 2053 struct inode *inode = file_inode(vma->vm_file);
cb113b47 2054 loff_t offset = vaddr_to_offset(vma, bp_vaddr);
0326f5a9 2055
3a9ea052
ON
2056 uprobe = find_uprobe(inode, offset);
2057 }
d790d346
ON
2058
2059 if (!uprobe)
0908ad6e 2060 *is_swbp = is_trap_at_addr(mm, bp_vaddr);
d790d346
ON
2061 } else {
2062 *is_swbp = -EFAULT;
0326f5a9 2063 }
499a4f3e
ON
2064
2065 if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
2066 mmf_recalc_uprobes(mm);
d8ed45c5 2067 mmap_read_unlock(mm);
0326f5a9 2068
3a9ea052
ON
2069 return uprobe;
2070}
2071
da1816b1
ON
2072static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
2073{
2074 struct uprobe_consumer *uc;
2075 int remove = UPROBE_HANDLER_REMOVE;
0dfd0eb8 2076 bool need_prep = false; /* prepare return uprobe, when needed */
da1816b1
ON
2077
2078 down_read(&uprobe->register_rwsem);
2079 for (uc = uprobe->consumers; uc; uc = uc->next) {
ea024870 2080 int rc = 0;
da1816b1 2081
ea024870
AA
2082 if (uc->handler) {
2083 rc = uc->handler(uc, regs);
2084 WARN(rc & ~UPROBE_HANDLER_MASK,
d75f773c 2085 "bad rc=0x%x from %ps()\n", rc, uc->handler);
ea024870 2086 }
0dfd0eb8
AA
2087
2088 if (uc->ret_handler)
2089 need_prep = true;
2090
da1816b1
ON
2091 remove &= rc;
2092 }
2093
0dfd0eb8
AA
2094 if (need_prep && !remove)
2095 prepare_uretprobe(uprobe, regs); /* put bp at return */
2096
da1816b1
ON
2097 if (remove && uprobe->consumers) {
2098 WARN_ON(!uprobe_is_active(uprobe));
2099 unapply_uprobe(uprobe, current->mm);
2100 }
2101 up_read(&uprobe->register_rwsem);
2102}
2103
fec8898d
AA
2104static void
2105handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
2106{
2107 struct uprobe *uprobe = ri->uprobe;
2108 struct uprobe_consumer *uc;
2109
2110 down_read(&uprobe->register_rwsem);
2111 for (uc = uprobe->consumers; uc; uc = uc->next) {
2112 if (uc->ret_handler)
2113 uc->ret_handler(uc, ri->func, regs);
2114 }
2115 up_read(&uprobe->register_rwsem);
2116}
2117
a83cfeb9
ON
2118static struct return_instance *find_next_ret_chain(struct return_instance *ri)
2119{
2120 bool chained;
2121
2122 do {
2123 chained = ri->chained;
2124 ri = ri->next; /* can't be NULL if chained */
2125 } while (chained);
2126
2127 return ri;
2128}
2129
0b5256c7 2130static void handle_trampoline(struct pt_regs *regs)
fec8898d
AA
2131{
2132 struct uprobe_task *utask;
a83cfeb9 2133 struct return_instance *ri, *next;
5eeb50de 2134 bool valid;
fec8898d
AA
2135
2136 utask = current->utask;
2137 if (!utask)
0b5256c7 2138 goto sigill;
fec8898d
AA
2139
2140 ri = utask->return_instances;
2141 if (!ri)
0b5256c7 2142 goto sigill;
fec8898d 2143
a83cfeb9 2144 do {
5eeb50de
ON
2145 /*
2146 * We should throw out the frames invalidated by longjmp().
2147 * If this chain is valid, then the next one should be alive
2148 * or NULL; the latter case means that nobody but ri->func
2149 * could hit this trampoline on return. TODO: sigaltstack().
2150 */
2151 next = find_next_ret_chain(ri);
86dcb702 2152 valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
5eeb50de
ON
2153
2154 instruction_pointer_set(regs, ri->orig_ret_vaddr);
2155 do {
2156 if (valid)
2157 handle_uretprobe_chain(ri, regs);
2158 ri = free_ret_instance(ri);
2159 utask->depth--;
2160 } while (ri != next);
2161 } while (!valid);
fec8898d
AA
2162
2163 utask->return_instances = ri;
0b5256c7
ON
2164 return;
2165
2166 sigill:
2167 uprobe_warn(current, "handle uretprobe, sending SIGILL.");
3cf5d076 2168 force_sig(SIGILL);
fec8898d 2169
fec8898d
AA
2170}
2171
6fe50a28
DL
2172bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
2173{
2174 return false;
2175}
2176
86dcb702
ON
2177bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
2178 struct pt_regs *regs)
97da8976
ON
2179{
2180 return true;
2181}
2182
3a9ea052
ON
2183/*
2184 * Run handler and ask thread to singlestep.
2185 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
2186 */
2187static void handle_swbp(struct pt_regs *regs)
2188{
3a9ea052
ON
2189 struct uprobe *uprobe;
2190 unsigned long bp_vaddr;
3f649ab7 2191 int is_swbp;
3a9ea052
ON
2192
2193 bp_vaddr = uprobe_get_swbp_addr(regs);
0b5256c7
ON
2194 if (bp_vaddr == get_trampoline_vaddr())
2195 return handle_trampoline(regs);
fec8898d
AA
2196
2197 uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
0326f5a9 2198 if (!uprobe) {
56bb4cf6
ON
2199 if (is_swbp > 0) {
2200 /* No matching uprobe; signal SIGTRAP. */
fe5ed7ab 2201 force_sig(SIGTRAP);
56bb4cf6
ON
2202 } else {
2203 /*
2204 * Either we raced with uprobe_unregister() or we can't
2205 * access this memory. The latter is only possible if
2206 * another thread plays with our ->mm. In both cases
2207 * we can simply restart. If this vma was unmapped we
2208 * can pretend this insn was not executed yet and get
2209 * the (correct) SIGSEGV after restart.
2210 */
2211 instruction_pointer_set(regs, bp_vaddr);
2212 }
0326f5a9
SD
2213 return;
2214 }
74e59dfc
ON
2215
2216 /* change it in advance for ->handler() and restart */
2217 instruction_pointer_set(regs, bp_vaddr);
2218
142b18dd
ON
2219 /*
2220 * TODO: move copy_insn/etc into _register and remove this hack.
2221 * After we hit the bp, _unregister + _register can install the
2222 * new and not-yet-analyzed uprobe at the same address, restart.
2223 */
71434f2f 2224 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
74e59dfc 2225 goto out;
0326f5a9 2226
09d3f015
AP
2227 /*
2228 * Pairs with the smp_wmb() in prepare_uprobe().
2229 *
2230 * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
2231 * we must also see the stores to &uprobe->arch performed by the
2232 * prepare_uprobe() call.
2233 */
2234 smp_rmb();
2235
72fd293a
ON
2236 /* Tracing handlers use ->utask to communicate with fetch methods */
2237 if (!get_utask())
2238 goto out;
2239
6fe50a28
DL
2240 if (arch_uprobe_ignore(&uprobe->arch, regs))
2241 goto out;
2242
0326f5a9 2243 handler_chain(uprobe, regs);
6fe50a28 2244
8a6b1732 2245 if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
0578a970 2246 goto out;
0326f5a9 2247
608e7427 2248 if (!pre_ssout(uprobe, regs, bp_vaddr))
0326f5a9 2249 return;
0326f5a9 2250
8a6b1732 2251 /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
0578a970 2252out:
8bd87445 2253 put_uprobe(uprobe);
0326f5a9
SD
2254}
2255
2256/*
2257 * Perform required fix-ups and disable singlestep.
2258 * Allow pending signals to take effect.
2259 */
2260static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
2261{
2262 struct uprobe *uprobe;
014940ba 2263 int err = 0;
0326f5a9
SD
2264
2265 uprobe = utask->active_uprobe;
2266 if (utask->state == UTASK_SSTEP_ACK)
014940ba 2267 err = arch_uprobe_post_xol(&uprobe->arch, regs);
0326f5a9
SD
2268 else if (utask->state == UTASK_SSTEP_TRAPPED)
2269 arch_uprobe_abort_xol(&uprobe->arch, regs);
2270 else
2271 WARN_ON_ONCE(1);
2272
2273 put_uprobe(uprobe);
2274 utask->active_uprobe = NULL;
2275 utask->state = UTASK_RUNNING;
d4b3b638 2276 xol_free_insn_slot(current);
0326f5a9
SD
2277
2278 spin_lock_irq(&current->sighand->siglock);
2279 recalc_sigpending(); /* see uprobe_deny_signal() */
2280 spin_unlock_irq(&current->sighand->siglock);
014940ba
ON
2281
2282 if (unlikely(err)) {
2283 uprobe_warn(current, "execute the probed insn, sending SIGILL.");
3cf5d076 2284 force_sig(SIGILL);
014940ba 2285 }
0326f5a9
SD
2286}
2287
2288/*
1b08e907
ON
2289 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
2290 * allows the thread to return from interrupt. After that handle_swbp()
2291 * sets utask->active_uprobe.
0326f5a9 2292 *
1b08e907
ON
2293 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
2294 * and allows the thread to return from interrupt.
0326f5a9
SD
2295 *
2296 * While returning to userspace, thread notices the TIF_UPROBE flag and calls
2297 * uprobe_notify_resume().
2298 */
2299void uprobe_notify_resume(struct pt_regs *regs)
2300{
2301 struct uprobe_task *utask;
2302
db023ea5
ON
2303 clear_thread_flag(TIF_UPROBE);
2304
0326f5a9 2305 utask = current->utask;
1b08e907 2306 if (utask && utask->active_uprobe)
0326f5a9 2307 handle_singlestep(utask, regs);
1b08e907
ON
2308 else
2309 handle_swbp(regs);
0326f5a9
SD
2310}
2311
2312/*
2313 * uprobe_pre_sstep_notifier gets called from interrupt context as part of
2314 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
2315 */
2316int uprobe_pre_sstep_notifier(struct pt_regs *regs)
2317{
0dfd0eb8
AA
2318 if (!current->mm)
2319 return 0;
2320
2321 if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
2322 (!current->utask || !current->utask->return_instances))
0326f5a9
SD
2323 return 0;
2324
0326f5a9 2325 set_thread_flag(TIF_UPROBE);
0326f5a9
SD
2326 return 1;
2327}
2328
2329/*
2330 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
2331 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
2332 */
2333int uprobe_post_sstep_notifier(struct pt_regs *regs)
2334{
2335 struct uprobe_task *utask = current->utask;
2336
2337 if (!current->mm || !utask || !utask->active_uprobe)
2338 /* task is currently not uprobed */
2339 return 0;
2340
2341 utask->state = UTASK_SSTEP_ACK;
2342 set_thread_flag(TIF_UPROBE);
2343 return 1;
2344}
2345
2346static struct notifier_block uprobe_exception_nb = {
2347 .notifier_call = arch_uprobe_exception_notify,
2348 .priority = INT_MAX-1, /* notified after kprobes, kgdb */
2349};
2350
aad42dd4 2351void __init uprobes_init(void)
2b144498
SD
2352{
2353 int i;
2354
66d06dff 2355 for (i = 0; i < UPROBES_HASH_SZ; i++)
2b144498 2356 mutex_init(&uprobes_mmap_mutex[i]);
0326f5a9 2357
aad42dd4 2358 BUG_ON(register_die_notifier(&uprobe_exception_nb));
2b144498 2359}