Merge tag 'sunxi-fixes-for-4.17' of https://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / kernel / events / uprobes.c
CommitLineData
2b144498 1/*
7b2d81d4 2 * User-space Probes (UProbes)
2b144498
SD
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
35aa621b 18 * Copyright (C) IBM Corporation, 2008-2012
2b144498
SD
19 * Authors:
20 * Srikar Dronamraju
21 * Jim Keniston
90eec103 22 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
2b144498
SD
23 */
24
25#include <linux/kernel.h>
26#include <linux/highmem.h>
27#include <linux/pagemap.h> /* read_mapping_page */
28#include <linux/slab.h>
29#include <linux/sched.h>
6e84f315 30#include <linux/sched/mm.h>
f7ccbae4 31#include <linux/sched/coredump.h>
e8440c14 32#include <linux/export.h>
2b144498
SD
33#include <linux/rmap.h> /* anon_vma_prepare */
34#include <linux/mmu_notifier.h> /* set_pte_at_notify */
35#include <linux/swap.h> /* try_to_free_swap */
0326f5a9
SD
36#include <linux/ptrace.h> /* user_enable_single_step */
37#include <linux/kdebug.h> /* notifier mechanism */
194f8dcb 38#include "../../mm/internal.h" /* munlock_vma_page */
32cdba1e 39#include <linux/percpu-rwsem.h>
aa59c53f 40#include <linux/task_work.h>
40814f68 41#include <linux/shmem_fs.h>
7b2d81d4 42
2b144498
SD
43#include <linux/uprobes.h>
44
d4b3b638
SD
45#define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
46#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
47
2b144498 48static struct rb_root uprobes_tree = RB_ROOT;
441f1eb7
ON
49/*
50 * allows us to skip the uprobe_mmap if there are no uprobe events active
51 * at this time. Probably a fine grained per inode count is better?
52 */
53#define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
7b2d81d4 54
2b144498
SD
55static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */
56
57#define UPROBES_HASH_SZ 13
2b144498
SD
58/* serialize uprobe->pending_list */
59static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
7b2d81d4 60#define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
2b144498 61
32cdba1e
ON
62static struct percpu_rw_semaphore dup_mmap_sem;
63
cb9a19fe 64/* Have a copy of original instruction */
71434f2f 65#define UPROBE_COPY_INSN 0
cb9a19fe 66
3ff54efd
SD
67struct uprobe {
68 struct rb_node rb_node; /* node in the rb tree */
69 atomic_t ref;
e591c8d7 70 struct rw_semaphore register_rwsem;
3ff54efd
SD
71 struct rw_semaphore consumer_rwsem;
72 struct list_head pending_list;
73 struct uprobe_consumer *consumers;
74 struct inode *inode; /* Also hold a ref to inode */
75 loff_t offset;
71434f2f 76 unsigned long flags;
ad439356
ON
77
78 /*
79 * The generic code assumes that it has two members of unknown type
80 * owned by the arch-specific code:
81 *
82 * insn - copy_insn() saves the original instruction here for
83 * arch_uprobe_analyze_insn().
84 *
85 * ixol - potentially modified instruction to execute out of
86 * line, copied to xol_area by xol_get_insn_slot().
87 */
3ff54efd
SD
88 struct arch_uprobe arch;
89};
90
c912dae6 91/*
ad439356
ON
92 * Execute out of line area: anonymous executable mapping installed
93 * by the probed task to execute the copy of the original instruction
94 * mangled by set_swbp().
95 *
c912dae6
ON
96 * On a breakpoint hit, thread contests for a slot. It frees the
97 * slot after singlestep. Currently a fixed number of slots are
98 * allocated.
99 */
100struct xol_area {
704bde3c
ON
101 wait_queue_head_t wq; /* if all slots are busy */
102 atomic_t slot_count; /* number of in-use slots */
103 unsigned long *bitmap; /* 0 = free slot */
c912dae6 104
704bde3c
ON
105 struct vm_special_mapping xol_mapping;
106 struct page *pages[2];
c912dae6
ON
107 /*
108 * We keep the vma's vm_start rather than a pointer to the vma
109 * itself. The probed process or a naughty kernel module could make
110 * the vma go away, and we must handle that reasonably gracefully.
111 */
704bde3c 112 unsigned long vaddr; /* Page(s) of instruction slots */
c912dae6
ON
113};
114
2b144498
SD
115/*
116 * valid_vma: Verify if the specified vma is an executable vma
117 * Relax restrictions while unregistering: vm_flags might have
118 * changed after breakpoint was inserted.
119 * - is_register: indicates if we are in register context.
120 * - Return 1 if the specified virtual address is in an
121 * executable vma.
122 */
123static bool valid_vma(struct vm_area_struct *vma, bool is_register)
124{
13f59c5e 125 vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
2b144498 126
e40cfce6
ON
127 if (is_register)
128 flags |= VM_WRITE;
2b144498 129
e40cfce6 130 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
2b144498
SD
131}
132
57683f72 133static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
2b144498 134{
57683f72 135 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
2b144498
SD
136}
137
cb113b47
ON
138static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
139{
140 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
141}
142
2b144498
SD
143/**
144 * __replace_page - replace page in vma by new page.
145 * based on replace_page in mm/ksm.c
146 *
147 * @vma: vma that holds the pte pointing to page
c517ee74 148 * @addr: address the old @page is mapped at
2b144498
SD
149 * @page: the cowed page we are replacing by kpage
150 * @kpage: the modified page we replace page by
151 *
152 * Returns 0 on success, -EFAULT on failure.
153 */
c517ee74 154static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
bdfaa2ee 155 struct page *old_page, struct page *new_page)
2b144498
SD
156{
157 struct mm_struct *mm = vma->vm_mm;
14fa2daa
KS
158 struct page_vma_mapped_walk pvmw = {
159 .page = old_page,
160 .vma = vma,
161 .address = addr,
162 };
9f92448c 163 int err;
6bdb913f
HE
164 /* For mmu_notifiers */
165 const unsigned long mmun_start = addr;
166 const unsigned long mmun_end = addr + PAGE_SIZE;
00501b53
JW
167 struct mem_cgroup *memcg;
168
14fa2daa
KS
169 VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page);
170
bdfaa2ee 171 err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg,
f627c2f5 172 false);
00501b53
JW
173 if (err)
174 return err;
2b144498 175
194f8dcb 176 /* For try_to_free_swap() and munlock_vma_page() below */
bdfaa2ee 177 lock_page(old_page);
9f92448c 178
6bdb913f 179 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
9f92448c 180 err = -EAGAIN;
14fa2daa 181 if (!page_vma_mapped_walk(&pvmw)) {
bdfaa2ee 182 mem_cgroup_cancel_charge(new_page, memcg, false);
9f92448c 183 goto unlock;
6c4687cc 184 }
14fa2daa 185 VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
2b144498 186
bdfaa2ee
ON
187 get_page(new_page);
188 page_add_new_anon_rmap(new_page, vma, addr, false);
189 mem_cgroup_commit_charge(new_page, memcg, false, false);
190 lru_cache_add_active_or_unevictable(new_page, vma);
2b144498 191
bdfaa2ee
ON
192 if (!PageAnon(old_page)) {
193 dec_mm_counter(mm, mm_counter_file(old_page));
7396fa81
SD
194 inc_mm_counter(mm, MM_ANONPAGES);
195 }
196
14fa2daa
KS
197 flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
198 ptep_clear_flush_notify(vma, addr, pvmw.pte);
199 set_pte_at_notify(mm, addr, pvmw.pte,
200 mk_pte(new_page, vma->vm_page_prot));
2b144498 201
bdfaa2ee
ON
202 page_remove_rmap(old_page, false);
203 if (!page_mapped(old_page))
204 try_to_free_swap(old_page);
14fa2daa 205 page_vma_mapped_walk_done(&pvmw);
2b144498 206
194f8dcb 207 if (vma->vm_flags & VM_LOCKED)
bdfaa2ee
ON
208 munlock_vma_page(old_page);
209 put_page(old_page);
194f8dcb 210
9f92448c
ON
211 err = 0;
212 unlock:
6bdb913f 213 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
bdfaa2ee 214 unlock_page(old_page);
9f92448c 215 return err;
2b144498
SD
216}
217
218/**
5cb4ac3a 219 * is_swbp_insn - check if instruction is breakpoint instruction.
2b144498 220 * @insn: instruction to be checked.
5cb4ac3a 221 * Default implementation of is_swbp_insn
2b144498
SD
222 * Returns true if @insn is a breakpoint instruction.
223 */
5cb4ac3a 224bool __weak is_swbp_insn(uprobe_opcode_t *insn)
2b144498 225{
5cb4ac3a 226 return *insn == UPROBE_SWBP_INSN;
2b144498
SD
227}
228
0908ad6e
AM
229/**
230 * is_trap_insn - check if instruction is breakpoint instruction.
231 * @insn: instruction to be checked.
232 * Default implementation of is_trap_insn
233 * Returns true if @insn is a breakpoint instruction.
234 *
235 * This function is needed for the case where an architecture has multiple
236 * trap instructions (like powerpc).
237 */
238bool __weak is_trap_insn(uprobe_opcode_t *insn)
239{
240 return is_swbp_insn(insn);
241}
242
ab0d805c 243static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
cceb55aa
ON
244{
245 void *kaddr = kmap_atomic(page);
ab0d805c 246 memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
cceb55aa
ON
247 kunmap_atomic(kaddr);
248}
249
5669ccee
ON
250static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
251{
252 void *kaddr = kmap_atomic(page);
253 memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
254 kunmap_atomic(kaddr);
255}
256
ed6f6a50
ON
257static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
258{
259 uprobe_opcode_t old_opcode;
260 bool is_swbp;
261
0908ad6e
AM
262 /*
263 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
264 * We do not check if it is any other 'trap variant' which could
265 * be conditional trap instruction such as the one powerpc supports.
266 *
267 * The logic is that we do not care if the underlying instruction
268 * is a trap variant; uprobes always wins over any other (gdb)
269 * breakpoint.
270 */
ab0d805c 271 copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
ed6f6a50
ON
272 is_swbp = is_swbp_insn(&old_opcode);
273
274 if (is_swbp_insn(new_opcode)) {
275 if (is_swbp) /* register: already installed? */
276 return 0;
277 } else {
278 if (!is_swbp) /* unregister: was it changed by us? */
076a365b 279 return 0;
ed6f6a50
ON
280 }
281
282 return 1;
283}
284
2b144498
SD
285/*
286 * NOTE:
287 * Expect the breakpoint instruction to be the smallest size instruction for
288 * the architecture. If an arch has variable length instruction and the
289 * breakpoint instruction is not of the smallest length instruction
0908ad6e 290 * supported by that architecture then we need to modify is_trap_at_addr and
f72d41fa
ON
291 * uprobe_write_opcode accordingly. This would never be a problem for archs
292 * that have fixed length instructions.
29dedee0 293 *
f72d41fa 294 * uprobe_write_opcode - write the opcode at a given virtual address.
2b144498 295 * @mm: the probed process address space.
2b144498
SD
296 * @vaddr: the virtual address to store the opcode.
297 * @opcode: opcode to be written at @vaddr.
298 *
29dedee0 299 * Called with mm->mmap_sem held for write.
2b144498
SD
300 * Return 0 (success) or a negative errno.
301 */
f72d41fa 302int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
cceb55aa 303 uprobe_opcode_t opcode)
2b144498
SD
304{
305 struct page *old_page, *new_page;
2b144498 306 struct vm_area_struct *vma;
2b144498 307 int ret;
f403072c 308
5323ce71 309retry:
2b144498 310 /* Read the page with vaddr into memory */
c8394812
KS
311 ret = get_user_pages_remote(NULL, mm, vaddr, 1,
312 FOLL_FORCE | FOLL_SPLIT, &old_page, &vma, NULL);
2b144498
SD
313 if (ret <= 0)
314 return ret;
7b2d81d4 315
ed6f6a50
ON
316 ret = verify_opcode(old_page, vaddr, &opcode);
317 if (ret <= 0)
318 goto put_old;
319
29dedee0
ON
320 ret = anon_vma_prepare(vma);
321 if (ret)
322 goto put_old;
323
2b144498
SD
324 ret = -ENOMEM;
325 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
326 if (!new_page)
9f92448c 327 goto put_old;
2b144498 328
29dedee0 329 __SetPageUptodate(new_page);
3f47107c
ON
330 copy_highpage(new_page, old_page);
331 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
2b144498 332
c517ee74 333 ret = __replace_page(vma, vaddr, old_page, new_page);
09cbfeaf 334 put_page(new_page);
9f92448c 335put_old:
7b2d81d4
IM
336 put_page(old_page);
337
5323ce71
ON
338 if (unlikely(ret == -EAGAIN))
339 goto retry;
2b144498
SD
340 return ret;
341}
342
2b144498 343/**
5cb4ac3a 344 * set_swbp - store breakpoint at a given address.
e3343e6a 345 * @auprobe: arch specific probepoint information.
2b144498 346 * @mm: the probed process address space.
2b144498
SD
347 * @vaddr: the virtual address to insert the opcode.
348 *
349 * For mm @mm, store the breakpoint instruction at @vaddr.
350 * Return 0 (success) or a negative errno.
351 */
5cb4ac3a 352int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
2b144498 353{
f72d41fa 354 return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
2b144498
SD
355}
356
357/**
358 * set_orig_insn - Restore the original instruction.
359 * @mm: the probed process address space.
e3343e6a 360 * @auprobe: arch specific probepoint information.
2b144498 361 * @vaddr: the virtual address to insert the opcode.
2b144498
SD
362 *
363 * For mm @mm, restore the original opcode (opcode) at @vaddr.
364 * Return 0 (success) or a negative errno.
365 */
7b2d81d4 366int __weak
ded86e7c 367set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
2b144498 368{
803200e2 369 return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn);
2b144498
SD
370}
371
f231722a
ON
372static struct uprobe *get_uprobe(struct uprobe *uprobe)
373{
374 atomic_inc(&uprobe->ref);
375 return uprobe;
376}
377
378static void put_uprobe(struct uprobe *uprobe)
379{
380 if (atomic_dec_and_test(&uprobe->ref))
381 kfree(uprobe);
382}
383
2b144498
SD
384static int match_uprobe(struct uprobe *l, struct uprobe *r)
385{
386 if (l->inode < r->inode)
387 return -1;
7b2d81d4 388
2b144498
SD
389 if (l->inode > r->inode)
390 return 1;
2b144498 391
7b2d81d4
IM
392 if (l->offset < r->offset)
393 return -1;
394
395 if (l->offset > r->offset)
396 return 1;
2b144498
SD
397
398 return 0;
399}
400
401static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
402{
403 struct uprobe u = { .inode = inode, .offset = offset };
404 struct rb_node *n = uprobes_tree.rb_node;
405 struct uprobe *uprobe;
406 int match;
407
408 while (n) {
409 uprobe = rb_entry(n, struct uprobe, rb_node);
410 match = match_uprobe(&u, uprobe);
f231722a
ON
411 if (!match)
412 return get_uprobe(uprobe);
7b2d81d4 413
2b144498
SD
414 if (match < 0)
415 n = n->rb_left;
416 else
417 n = n->rb_right;
418 }
419 return NULL;
420}
421
422/*
423 * Find a uprobe corresponding to a given inode:offset
424 * Acquires uprobes_treelock
425 */
426static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
427{
428 struct uprobe *uprobe;
2b144498 429
6f47caa0 430 spin_lock(&uprobes_treelock);
2b144498 431 uprobe = __find_uprobe(inode, offset);
6f47caa0 432 spin_unlock(&uprobes_treelock);
7b2d81d4 433
2b144498
SD
434 return uprobe;
435}
436
437static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
438{
439 struct rb_node **p = &uprobes_tree.rb_node;
440 struct rb_node *parent = NULL;
441 struct uprobe *u;
442 int match;
443
444 while (*p) {
445 parent = *p;
446 u = rb_entry(parent, struct uprobe, rb_node);
447 match = match_uprobe(uprobe, u);
f231722a
ON
448 if (!match)
449 return get_uprobe(u);
2b144498
SD
450
451 if (match < 0)
452 p = &parent->rb_left;
453 else
454 p = &parent->rb_right;
455
456 }
7b2d81d4 457
2b144498
SD
458 u = NULL;
459 rb_link_node(&uprobe->rb_node, parent, p);
460 rb_insert_color(&uprobe->rb_node, &uprobes_tree);
461 /* get access + creation ref */
462 atomic_set(&uprobe->ref, 2);
7b2d81d4 463
2b144498
SD
464 return u;
465}
466
467/*
7b2d81d4 468 * Acquire uprobes_treelock.
2b144498
SD
469 * Matching uprobe already exists in rbtree;
470 * increment (access refcount) and return the matching uprobe.
471 *
472 * No matching uprobe; insert the uprobe in rb_tree;
473 * get a double refcount (access + creation) and return NULL.
474 */
475static struct uprobe *insert_uprobe(struct uprobe *uprobe)
476{
2b144498
SD
477 struct uprobe *u;
478
6f47caa0 479 spin_lock(&uprobes_treelock);
2b144498 480 u = __insert_uprobe(uprobe);
6f47caa0 481 spin_unlock(&uprobes_treelock);
7b2d81d4 482
2b144498
SD
483 return u;
484}
485
2b144498
SD
486static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
487{
488 struct uprobe *uprobe, *cur_uprobe;
489
490 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
491 if (!uprobe)
492 return NULL;
493
61f94203 494 uprobe->inode = inode;
2b144498 495 uprobe->offset = offset;
e591c8d7 496 init_rwsem(&uprobe->register_rwsem);
2b144498 497 init_rwsem(&uprobe->consumer_rwsem);
2b144498
SD
498
499 /* add to uprobes_tree, sorted on inode:offset */
500 cur_uprobe = insert_uprobe(uprobe);
2b144498
SD
501 /* a uprobe exists for this inode:offset combination */
502 if (cur_uprobe) {
503 kfree(uprobe);
504 uprobe = cur_uprobe;
7b2d81d4
IM
505 }
506
2b144498
SD
507 return uprobe;
508}
509
9a98e03c 510static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
2b144498
SD
511{
512 down_write(&uprobe->consumer_rwsem);
e3343e6a
SD
513 uc->next = uprobe->consumers;
514 uprobe->consumers = uc;
2b144498 515 up_write(&uprobe->consumer_rwsem);
2b144498
SD
516}
517
518/*
e3343e6a
SD
519 * For uprobe @uprobe, delete the consumer @uc.
520 * Return true if the @uc is deleted successfully
2b144498
SD
521 * or return false.
522 */
e3343e6a 523static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
2b144498
SD
524{
525 struct uprobe_consumer **con;
526 bool ret = false;
527
528 down_write(&uprobe->consumer_rwsem);
529 for (con = &uprobe->consumers; *con; con = &(*con)->next) {
e3343e6a
SD
530 if (*con == uc) {
531 *con = uc->next;
2b144498
SD
532 ret = true;
533 break;
534 }
535 }
536 up_write(&uprobe->consumer_rwsem);
7b2d81d4 537
2b144498
SD
538 return ret;
539}
540
2ded0980
ON
541static int __copy_insn(struct address_space *mapping, struct file *filp,
542 void *insn, int nbytes, loff_t offset)
2b144498 543{
2b144498 544 struct page *page;
2b144498 545 /*
40814f68
ON
546 * Ensure that the page that has the original instruction is populated
547 * and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
548 * see uprobe_register().
2b144498 549 */
40814f68 550 if (mapping->a_ops->readpage)
09cbfeaf 551 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
40814f68 552 else
09cbfeaf 553 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
2b144498
SD
554 if (IS_ERR(page))
555 return PTR_ERR(page);
556
2edb7b55 557 copy_from_page(page, offset, insn, nbytes);
09cbfeaf 558 put_page(page);
7b2d81d4 559
2b144498
SD
560 return 0;
561}
562
d436615e 563static int copy_insn(struct uprobe *uprobe, struct file *filp)
2b144498 564{
2ded0980
ON
565 struct address_space *mapping = uprobe->inode->i_mapping;
566 loff_t offs = uprobe->offset;
803200e2
ON
567 void *insn = &uprobe->arch.insn;
568 int size = sizeof(uprobe->arch.insn);
2ded0980
ON
569 int len, err = -EIO;
570
571 /* Copy only available bytes, -EIO if nothing was read */
572 do {
573 if (offs >= i_size_read(uprobe->inode))
574 break;
575
576 len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
577 err = __copy_insn(mapping, filp, insn, len, offs);
fc36f595 578 if (err)
2ded0980
ON
579 break;
580
581 insn += len;
582 offs += len;
583 size -= len;
584 } while (size);
585
586 return err;
2b144498
SD
587}
588
cb9a19fe
ON
589static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
590 struct mm_struct *mm, unsigned long vaddr)
591{
592 int ret = 0;
593
71434f2f 594 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
cb9a19fe
ON
595 return ret;
596
d4d3ccc6
ON
597 /* TODO: move this into _register, until then we abuse this sem. */
598 down_write(&uprobe->consumer_rwsem);
71434f2f 599 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
4710f05f
ON
600 goto out;
601
cb9a19fe
ON
602 ret = copy_insn(uprobe, file);
603 if (ret)
604 goto out;
605
606 ret = -ENOTSUPP;
803200e2 607 if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
cb9a19fe
ON
608 goto out;
609
610 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
611 if (ret)
612 goto out;
613
f72d41fa 614 /* uprobe_write_opcode() assumes we don't cross page boundary */
cb9a19fe
ON
615 BUG_ON((uprobe->offset & ~PAGE_MASK) +
616 UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
617
618 smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
71434f2f 619 set_bit(UPROBE_COPY_INSN, &uprobe->flags);
cb9a19fe
ON
620
621 out:
d4d3ccc6 622 up_write(&uprobe->consumer_rwsem);
4710f05f 623
cb9a19fe
ON
624 return ret;
625}
626
8a7f2fa0
ON
627static inline bool consumer_filter(struct uprobe_consumer *uc,
628 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
806a98bd 629{
8a7f2fa0 630 return !uc->filter || uc->filter(uc, ctx, mm);
806a98bd
ON
631}
632
8a7f2fa0
ON
633static bool filter_chain(struct uprobe *uprobe,
634 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
63633cbf 635{
1ff6fee5
ON
636 struct uprobe_consumer *uc;
637 bool ret = false;
638
639 down_read(&uprobe->consumer_rwsem);
640 for (uc = uprobe->consumers; uc; uc = uc->next) {
8a7f2fa0 641 ret = consumer_filter(uc, ctx, mm);
1ff6fee5
ON
642 if (ret)
643 break;
644 }
645 up_read(&uprobe->consumer_rwsem);
646
647 return ret;
63633cbf
ON
648}
649
e3343e6a
SD
650static int
651install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
816c03fb 652 struct vm_area_struct *vma, unsigned long vaddr)
2b144498 653{
f8ac4ec9 654 bool first_uprobe;
2b144498
SD
655 int ret;
656
cb9a19fe
ON
657 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
658 if (ret)
659 return ret;
682968e0 660
f8ac4ec9
ON
661 /*
662 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
663 * the task can hit this breakpoint right after __replace_page().
664 */
665 first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
666 if (first_uprobe)
667 set_bit(MMF_HAS_UPROBES, &mm->flags);
668
816c03fb 669 ret = set_swbp(&uprobe->arch, mm, vaddr);
9f68f672
ON
670 if (!ret)
671 clear_bit(MMF_RECALC_UPROBES, &mm->flags);
672 else if (first_uprobe)
f8ac4ec9 673 clear_bit(MMF_HAS_UPROBES, &mm->flags);
2b144498
SD
674
675 return ret;
676}
677
076a365b 678static int
816c03fb 679remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
2b144498 680{
9f68f672 681 set_bit(MMF_RECALC_UPROBES, &mm->flags);
076a365b 682 return set_orig_insn(&uprobe->arch, mm, vaddr);
2b144498
SD
683}
684
06b7bcd8
ON
685static inline bool uprobe_is_active(struct uprobe *uprobe)
686{
687 return !RB_EMPTY_NODE(&uprobe->rb_node);
688}
0326f5a9 689/*
778b032d
ON
690 * There could be threads that have already hit the breakpoint. They
691 * will recheck the current insn and restart if find_uprobe() fails.
692 * See find_active_uprobe().
0326f5a9 693 */
2b144498
SD
694static void delete_uprobe(struct uprobe *uprobe)
695{
06b7bcd8
ON
696 if (WARN_ON(!uprobe_is_active(uprobe)))
697 return;
698
6f47caa0 699 spin_lock(&uprobes_treelock);
2b144498 700 rb_erase(&uprobe->rb_node, &uprobes_tree);
6f47caa0 701 spin_unlock(&uprobes_treelock);
06b7bcd8 702 RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
2b144498 703 put_uprobe(uprobe);
2b144498
SD
704}
705
26872090
ON
706struct map_info {
707 struct map_info *next;
708 struct mm_struct *mm;
816c03fb 709 unsigned long vaddr;
26872090
ON
710};
711
712static inline struct map_info *free_map_info(struct map_info *info)
2b144498 713{
26872090
ON
714 struct map_info *next = info->next;
715 kfree(info);
716 return next;
717}
718
719static struct map_info *
720build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
721{
722 unsigned long pgoff = offset >> PAGE_SHIFT;
2b144498 723 struct vm_area_struct *vma;
26872090
ON
724 struct map_info *curr = NULL;
725 struct map_info *prev = NULL;
726 struct map_info *info;
727 int more = 0;
2b144498 728
26872090 729 again:
4a23717a 730 i_mmap_lock_read(mapping);
6b2dbba8 731 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
2b144498
SD
732 if (!valid_vma(vma, is_register))
733 continue;
734
7a5bfb66
ON
735 if (!prev && !more) {
736 /*
c8c06efa 737 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
7a5bfb66
ON
738 * reclaim. This is optimistic, no harm done if it fails.
739 */
740 prev = kmalloc(sizeof(struct map_info),
741 GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
742 if (prev)
743 prev->next = NULL;
744 }
26872090
ON
745 if (!prev) {
746 more++;
747 continue;
2b144498 748 }
2b144498 749
388f7934 750 if (!mmget_not_zero(vma->vm_mm))
26872090 751 continue;
7b2d81d4 752
26872090
ON
753 info = prev;
754 prev = prev->next;
755 info->next = curr;
756 curr = info;
2b144498 757
26872090 758 info->mm = vma->vm_mm;
57683f72 759 info->vaddr = offset_to_vaddr(vma, offset);
26872090 760 }
4a23717a 761 i_mmap_unlock_read(mapping);
2b144498 762
26872090
ON
763 if (!more)
764 goto out;
765
766 prev = curr;
767 while (curr) {
768 mmput(curr->mm);
769 curr = curr->next;
770 }
7b2d81d4 771
26872090
ON
772 do {
773 info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
774 if (!info) {
775 curr = ERR_PTR(-ENOMEM);
776 goto out;
777 }
778 info->next = prev;
779 prev = info;
780 } while (--more);
781
782 goto again;
783 out:
784 while (prev)
785 prev = free_map_info(prev);
786 return curr;
2b144498
SD
787}
788
bdf8647c
ON
789static int
790register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
2b144498 791{
bdf8647c 792 bool is_register = !!new;
26872090
ON
793 struct map_info *info;
794 int err = 0;
2b144498 795
32cdba1e 796 percpu_down_write(&dup_mmap_sem);
26872090
ON
797 info = build_map_info(uprobe->inode->i_mapping,
798 uprobe->offset, is_register);
32cdba1e
ON
799 if (IS_ERR(info)) {
800 err = PTR_ERR(info);
801 goto out;
802 }
7b2d81d4 803
26872090
ON
804 while (info) {
805 struct mm_struct *mm = info->mm;
806 struct vm_area_struct *vma;
7b2d81d4 807
076a365b 808 if (err && is_register)
26872090 809 goto free;
7b2d81d4 810
77fc4af1 811 down_write(&mm->mmap_sem);
f4d6dfe5
ON
812 vma = find_vma(mm, info->vaddr);
813 if (!vma || !valid_vma(vma, is_register) ||
f281769e 814 file_inode(vma->vm_file) != uprobe->inode)
26872090
ON
815 goto unlock;
816
f4d6dfe5
ON
817 if (vma->vm_start > info->vaddr ||
818 vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
26872090 819 goto unlock;
2b144498 820
806a98bd
ON
821 if (is_register) {
822 /* consult only the "caller", new consumer. */
bdf8647c 823 if (consumer_filter(new,
8a7f2fa0 824 UPROBE_FILTER_REGISTER, mm))
806a98bd
ON
825 err = install_breakpoint(uprobe, mm, vma, info->vaddr);
826 } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
8a7f2fa0
ON
827 if (!filter_chain(uprobe,
828 UPROBE_FILTER_UNREGISTER, mm))
806a98bd
ON
829 err |= remove_breakpoint(uprobe, mm, info->vaddr);
830 }
78f74116 831
26872090
ON
832 unlock:
833 up_write(&mm->mmap_sem);
834 free:
835 mmput(mm);
836 info = free_map_info(info);
2b144498 837 }
32cdba1e
ON
838 out:
839 percpu_up_write(&dup_mmap_sem);
26872090 840 return err;
2b144498
SD
841}
842
9a98e03c 843static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc)
2b144498 844{
9a98e03c 845 consumer_add(uprobe, uc);
bdf8647c 846 return register_for_each_vma(uprobe, uc);
2b144498
SD
847}
848
04aab9b2 849static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
2b144498 850{
04aab9b2
ON
851 int err;
852
06d07139 853 if (WARN_ON(!consumer_del(uprobe, uc)))
04aab9b2 854 return;
2b144498 855
bdf8647c 856 err = register_for_each_vma(uprobe, NULL);
bb929284
ON
857 /* TODO : cant unregister? schedule a worker thread */
858 if (!uprobe->consumers && !err)
859 delete_uprobe(uprobe);
2b144498
SD
860}
861
862/*
7b2d81d4 863 * uprobe_register - register a probe
2b144498
SD
864 * @inode: the file in which the probe has to be placed.
865 * @offset: offset from the start of the file.
e3343e6a 866 * @uc: information on howto handle the probe..
2b144498 867 *
7b2d81d4 868 * Apart from the access refcount, uprobe_register() takes a creation
2b144498
SD
869 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
870 * inserted into the rbtree (i.e first consumer for a @inode:@offset
7b2d81d4 871 * tuple). Creation refcount stops uprobe_unregister from freeing the
2b144498 872 * @uprobe even before the register operation is complete. Creation
e3343e6a 873 * refcount is released when the last @uc for the @uprobe
61f94203
SL
874 * unregisters. Caller of uprobe_register() is required to keep @inode
875 * (and the containing mount) referenced.
2b144498
SD
876 *
877 * Return errno if it cannot successully install probes
878 * else return 0 (success)
879 */
e3343e6a 880int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
2b144498
SD
881{
882 struct uprobe *uprobe;
7b2d81d4 883 int ret;
2b144498 884
ea024870
AA
885 /* Uprobe must have at least one set consumer */
886 if (!uc->handler && !uc->ret_handler)
887 return -EINVAL;
888
40814f68
ON
889 /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
890 if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping))
41ccba02 891 return -EIO;
f0744af7 892 /* Racy, just to catch the obvious mistakes */
2b144498 893 if (offset > i_size_read(inode))
7b2d81d4 894 return -EINVAL;
2b144498 895
66d06dff 896 retry:
2b144498 897 uprobe = alloc_uprobe(inode, offset);
66d06dff
ON
898 if (!uprobe)
899 return -ENOMEM;
900 /*
901 * We can race with uprobe_unregister()->delete_uprobe().
902 * Check uprobe_is_active() and retry if it is false.
903 */
904 down_write(&uprobe->register_rwsem);
905 ret = -EAGAIN;
906 if (likely(uprobe_is_active(uprobe))) {
9a98e03c
ON
907 ret = __uprobe_register(uprobe, uc);
908 if (ret)
04aab9b2 909 __uprobe_unregister(uprobe, uc);
2b144498 910 }
66d06dff
ON
911 up_write(&uprobe->register_rwsem);
912 put_uprobe(uprobe);
2b144498 913
66d06dff
ON
914 if (unlikely(ret == -EAGAIN))
915 goto retry;
2b144498
SD
916 return ret;
917}
e8440c14 918EXPORT_SYMBOL_GPL(uprobe_register);
2b144498 919
bdf8647c
ON
920/*
921 * uprobe_apply - unregister a already registered probe.
922 * @inode: the file in which the probe has to be removed.
923 * @offset: offset from the start of the file.
924 * @uc: consumer which wants to add more or remove some breakpoints
925 * @add: add or remove the breakpoints
926 */
927int uprobe_apply(struct inode *inode, loff_t offset,
928 struct uprobe_consumer *uc, bool add)
929{
930 struct uprobe *uprobe;
931 struct uprobe_consumer *con;
932 int ret = -ENOENT;
933
934 uprobe = find_uprobe(inode, offset);
06d07139 935 if (WARN_ON(!uprobe))
bdf8647c
ON
936 return ret;
937
938 down_write(&uprobe->register_rwsem);
939 for (con = uprobe->consumers; con && con != uc ; con = con->next)
940 ;
941 if (con)
942 ret = register_for_each_vma(uprobe, add ? uc : NULL);
943 up_write(&uprobe->register_rwsem);
944 put_uprobe(uprobe);
945
946 return ret;
947}
948
2b144498 949/*
7b2d81d4 950 * uprobe_unregister - unregister a already registered probe.
2b144498
SD
951 * @inode: the file in which the probe has to be removed.
952 * @offset: offset from the start of the file.
e3343e6a 953 * @uc: identify which probe if multiple probes are colocated.
2b144498 954 */
e3343e6a 955void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
2b144498 956{
7b2d81d4 957 struct uprobe *uprobe;
2b144498 958
2b144498 959 uprobe = find_uprobe(inode, offset);
06d07139 960 if (WARN_ON(!uprobe))
2b144498
SD
961 return;
962
e591c8d7 963 down_write(&uprobe->register_rwsem);
04aab9b2 964 __uprobe_unregister(uprobe, uc);
e591c8d7 965 up_write(&uprobe->register_rwsem);
c91368c4 966 put_uprobe(uprobe);
2b144498 967}
e8440c14 968EXPORT_SYMBOL_GPL(uprobe_unregister);
2b144498 969
da1816b1
ON
970static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
971{
972 struct vm_area_struct *vma;
973 int err = 0;
974
975 down_read(&mm->mmap_sem);
976 for (vma = mm->mmap; vma; vma = vma->vm_next) {
977 unsigned long vaddr;
978 loff_t offset;
979
980 if (!valid_vma(vma, false) ||
f281769e 981 file_inode(vma->vm_file) != uprobe->inode)
da1816b1
ON
982 continue;
983
984 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
985 if (uprobe->offset < offset ||
986 uprobe->offset >= offset + vma->vm_end - vma->vm_start)
987 continue;
988
989 vaddr = offset_to_vaddr(vma, uprobe->offset);
990 err |= remove_breakpoint(uprobe, mm, vaddr);
991 }
992 up_read(&mm->mmap_sem);
993
994 return err;
995}
996
891c3970
ON
997static struct rb_node *
998find_node_in_range(struct inode *inode, loff_t min, loff_t max)
2b144498 999{
2b144498 1000 struct rb_node *n = uprobes_tree.rb_node;
2b144498
SD
1001
1002 while (n) {
891c3970 1003 struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
2b144498 1004
891c3970 1005 if (inode < u->inode) {
2b144498 1006 n = n->rb_left;
891c3970 1007 } else if (inode > u->inode) {
2b144498 1008 n = n->rb_right;
891c3970
ON
1009 } else {
1010 if (max < u->offset)
1011 n = n->rb_left;
1012 else if (min > u->offset)
1013 n = n->rb_right;
1014 else
1015 break;
1016 }
2b144498 1017 }
7b2d81d4 1018
891c3970 1019 return n;
2b144498
SD
1020}
1021
1022/*
891c3970 1023 * For a given range in vma, build a list of probes that need to be inserted.
2b144498 1024 */
891c3970
ON
1025static void build_probe_list(struct inode *inode,
1026 struct vm_area_struct *vma,
1027 unsigned long start, unsigned long end,
1028 struct list_head *head)
2b144498 1029{
891c3970 1030 loff_t min, max;
891c3970
ON
1031 struct rb_node *n, *t;
1032 struct uprobe *u;
7b2d81d4 1033
891c3970 1034 INIT_LIST_HEAD(head);
cb113b47 1035 min = vaddr_to_offset(vma, start);
891c3970 1036 max = min + (end - start) - 1;
2b144498 1037
6f47caa0 1038 spin_lock(&uprobes_treelock);
891c3970
ON
1039 n = find_node_in_range(inode, min, max);
1040 if (n) {
1041 for (t = n; t; t = rb_prev(t)) {
1042 u = rb_entry(t, struct uprobe, rb_node);
1043 if (u->inode != inode || u->offset < min)
1044 break;
1045 list_add(&u->pending_list, head);
f231722a 1046 get_uprobe(u);
891c3970
ON
1047 }
1048 for (t = n; (t = rb_next(t)); ) {
1049 u = rb_entry(t, struct uprobe, rb_node);
1050 if (u->inode != inode || u->offset > max)
1051 break;
1052 list_add(&u->pending_list, head);
f231722a 1053 get_uprobe(u);
891c3970 1054 }
2b144498 1055 }
6f47caa0 1056 spin_unlock(&uprobes_treelock);
2b144498
SD
1057}
1058
1059/*
5e5be71a 1060 * Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
2b144498 1061 *
5e5be71a
ON
1062 * Currently we ignore all errors and always return 0, the callers
1063 * can't handle the failure anyway.
2b144498 1064 */
7b2d81d4 1065int uprobe_mmap(struct vm_area_struct *vma)
2b144498
SD
1066{
1067 struct list_head tmp_list;
665605a2 1068 struct uprobe *uprobe, *u;
2b144498 1069 struct inode *inode;
2b144498 1070
441f1eb7 1071 if (no_uprobe_events() || !valid_vma(vma, true))
7b2d81d4 1072 return 0;
2b144498 1073
f281769e 1074 inode = file_inode(vma->vm_file);
2b144498 1075 if (!inode)
7b2d81d4 1076 return 0;
2b144498 1077
2b144498 1078 mutex_lock(uprobes_mmap_hash(inode));
891c3970 1079 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
806a98bd
ON
1080 /*
1081 * We can race with uprobe_unregister(), this uprobe can be already
1082 * removed. But in this case filter_chain() must return false, all
1083 * consumers have gone away.
1084 */
665605a2 1085 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
806a98bd 1086 if (!fatal_signal_pending(current) &&
8a7f2fa0 1087 filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
57683f72 1088 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
5e5be71a 1089 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
2b144498
SD
1090 }
1091 put_uprobe(uprobe);
1092 }
2b144498
SD
1093 mutex_unlock(uprobes_mmap_hash(inode));
1094
5e5be71a 1095 return 0;
2b144498
SD
1096}
1097
9f68f672
ON
1098static bool
1099vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1100{
1101 loff_t min, max;
1102 struct inode *inode;
1103 struct rb_node *n;
1104
f281769e 1105 inode = file_inode(vma->vm_file);
9f68f672
ON
1106
1107 min = vaddr_to_offset(vma, start);
1108 max = min + (end - start) - 1;
1109
1110 spin_lock(&uprobes_treelock);
1111 n = find_node_in_range(inode, min, max);
1112 spin_unlock(&uprobes_treelock);
1113
1114 return !!n;
1115}
1116
682968e0
SD
1117/*
1118 * Called in context of a munmap of a vma.
1119 */
cbc91f71 1120void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
682968e0 1121{
441f1eb7 1122 if (no_uprobe_events() || !valid_vma(vma, false))
682968e0
SD
1123 return;
1124
2fd611a9
ON
1125 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1126 return;
1127
9f68f672
ON
1128 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1129 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
f8ac4ec9
ON
1130 return;
1131
9f68f672
ON
1132 if (vma_has_uprobes(vma, start, end))
1133 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
682968e0
SD
1134}
1135
d4b3b638 1136/* Slot allocation for XOL */
6441ec8b 1137static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
d4b3b638 1138{
704bde3c
ON
1139 struct vm_area_struct *vma;
1140 int ret;
d4b3b638 1141
598fdc1d
MH
1142 if (down_write_killable(&mm->mmap_sem))
1143 return -EINTR;
1144
704bde3c
ON
1145 if (mm->uprobes_state.xol_area) {
1146 ret = -EALREADY;
d4b3b638 1147 goto fail;
704bde3c 1148 }
d4b3b638 1149
af0d95af
ON
1150 if (!area->vaddr) {
1151 /* Try to map as high as possible, this is only a hint. */
1152 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1153 PAGE_SIZE, 0, 0);
1154 if (area->vaddr & ~PAGE_MASK) {
1155 ret = area->vaddr;
1156 goto fail;
1157 }
d4b3b638
SD
1158 }
1159
704bde3c
ON
1160 vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1161 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1162 &area->xol_mapping);
1163 if (IS_ERR(vma)) {
1164 ret = PTR_ERR(vma);
d4b3b638 1165 goto fail;
704bde3c 1166 }
d4b3b638 1167
704bde3c 1168 ret = 0;
5c6338b4
PM
1169 /* pairs with get_xol_area() */
1170 smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
c8a82538 1171 fail:
d4b3b638 1172 up_write(&mm->mmap_sem);
d4b3b638
SD
1173
1174 return ret;
1175}
1176
af0d95af 1177static struct xol_area *__create_xol_area(unsigned long vaddr)
d4b3b638 1178{
9b545df8 1179 struct mm_struct *mm = current->mm;
e78aebfd 1180 uprobe_opcode_t insn = UPROBE_SWBP_INSN;
6441ec8b 1181 struct xol_area *area;
9b545df8 1182
af0d95af 1183 area = kmalloc(sizeof(*area), GFP_KERNEL);
d4b3b638 1184 if (unlikely(!area))
c8a82538 1185 goto out;
d4b3b638
SD
1186
1187 area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
d4b3b638 1188 if (!area->bitmap)
c8a82538
ON
1189 goto free_area;
1190
704bde3c 1191 area->xol_mapping.name = "[uprobes]";
869ae761 1192 area->xol_mapping.fault = NULL;
704bde3c 1193 area->xol_mapping.pages = area->pages;
f58bea2f
ON
1194 area->pages[0] = alloc_page(GFP_HIGHUSER);
1195 if (!area->pages[0])
c8a82538 1196 goto free_bitmap;
f58bea2f 1197 area->pages[1] = NULL;
d4b3b638 1198
af0d95af 1199 area->vaddr = vaddr;
6441ec8b
ON
1200 init_waitqueue_head(&area->wq);
1201 /* Reserve the 1st slot for get_trampoline_vaddr() */
e78aebfd 1202 set_bit(0, area->bitmap);
e78aebfd 1203 atomic_set(&area->slot_count, 1);
297e765e 1204 arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
e78aebfd 1205
6441ec8b 1206 if (!xol_add_vma(mm, area))
d4b3b638
SD
1207 return area;
1208
f58bea2f 1209 __free_page(area->pages[0]);
c8a82538 1210 free_bitmap:
d4b3b638 1211 kfree(area->bitmap);
c8a82538 1212 free_area:
d4b3b638 1213 kfree(area);
c8a82538 1214 out:
6441ec8b
ON
1215 return NULL;
1216}
1217
1218/*
1219 * get_xol_area - Allocate process's xol_area if necessary.
1220 * This area will be used for storing instructions for execution out of line.
1221 *
1222 * Returns the allocated area or NULL.
1223 */
1224static struct xol_area *get_xol_area(void)
1225{
1226 struct mm_struct *mm = current->mm;
1227 struct xol_area *area;
1228
1229 if (!mm->uprobes_state.xol_area)
af0d95af 1230 __create_xol_area(0);
6441ec8b 1231
5c6338b4
PM
1232 /* Pairs with xol_add_vma() smp_store_release() */
1233 area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */
9b545df8 1234 return area;
d4b3b638
SD
1235}
1236
1237/*
1238 * uprobe_clear_state - Free the area allocated for slots.
1239 */
1240void uprobe_clear_state(struct mm_struct *mm)
1241{
1242 struct xol_area *area = mm->uprobes_state.xol_area;
1243
1244 if (!area)
1245 return;
1246
f58bea2f 1247 put_page(area->pages[0]);
d4b3b638
SD
1248 kfree(area->bitmap);
1249 kfree(area);
1250}
1251
32cdba1e
ON
1252void uprobe_start_dup_mmap(void)
1253{
1254 percpu_down_read(&dup_mmap_sem);
1255}
1256
1257void uprobe_end_dup_mmap(void)
1258{
1259 percpu_up_read(&dup_mmap_sem);
1260}
1261
f8ac4ec9
ON
1262void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1263{
9f68f672 1264 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
f8ac4ec9 1265 set_bit(MMF_HAS_UPROBES, &newmm->flags);
9f68f672
ON
1266 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1267 set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1268 }
f8ac4ec9
ON
1269}
1270
d4b3b638
SD
1271/*
1272 * - search for a free slot.
1273 */
1274static unsigned long xol_take_insn_slot(struct xol_area *area)
1275{
1276 unsigned long slot_addr;
1277 int slot_nr;
1278
1279 do {
1280 slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1281 if (slot_nr < UINSNS_PER_PAGE) {
1282 if (!test_and_set_bit(slot_nr, area->bitmap))
1283 break;
1284
1285 slot_nr = UINSNS_PER_PAGE;
1286 continue;
1287 }
1288 wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1289 } while (slot_nr >= UINSNS_PER_PAGE);
1290
1291 slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1292 atomic_inc(&area->slot_count);
1293
1294 return slot_addr;
1295}
1296
1297/*
a6cb3f6d 1298 * xol_get_insn_slot - allocate a slot for xol.
d4b3b638
SD
1299 * Returns the allocated slot address or 0.
1300 */
a6cb3f6d 1301static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
d4b3b638
SD
1302{
1303 struct xol_area *area;
a6cb3f6d 1304 unsigned long xol_vaddr;
d4b3b638 1305
9b545df8
ON
1306 area = get_xol_area();
1307 if (!area)
1308 return 0;
d4b3b638 1309
a6cb3f6d
ON
1310 xol_vaddr = xol_take_insn_slot(area);
1311 if (unlikely(!xol_vaddr))
d4b3b638
SD
1312 return 0;
1313
f58bea2f 1314 arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
72e6ae28 1315 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
d4b3b638 1316
a6cb3f6d 1317 return xol_vaddr;
d4b3b638
SD
1318}
1319
1320/*
1321 * xol_free_insn_slot - If slot was earlier allocated by
1322 * @xol_get_insn_slot(), make the slot available for
1323 * subsequent requests.
1324 */
1325static void xol_free_insn_slot(struct task_struct *tsk)
1326{
1327 struct xol_area *area;
1328 unsigned long vma_end;
1329 unsigned long slot_addr;
1330
1331 if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1332 return;
1333
1334 slot_addr = tsk->utask->xol_vaddr;
af4355e9 1335 if (unlikely(!slot_addr))
d4b3b638
SD
1336 return;
1337
1338 area = tsk->mm->uprobes_state.xol_area;
1339 vma_end = area->vaddr + PAGE_SIZE;
1340 if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1341 unsigned long offset;
1342 int slot_nr;
1343
1344 offset = slot_addr - area->vaddr;
1345 slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1346 if (slot_nr >= UINSNS_PER_PAGE)
1347 return;
1348
1349 clear_bit(slot_nr, area->bitmap);
1350 atomic_dec(&area->slot_count);
2a742ced 1351 smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
d4b3b638
SD
1352 if (waitqueue_active(&area->wq))
1353 wake_up(&area->wq);
1354
1355 tsk->utask->xol_vaddr = 0;
1356 }
1357}
1358
72e6ae28
VK
1359void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1360 void *src, unsigned long len)
1361{
1362 /* Initialize the slot */
1363 copy_to_page(page, vaddr, src, len);
1364
1365 /*
1366 * We probably need flush_icache_user_range() but it needs vma.
1367 * This should work on most of architectures by default. If
1368 * architecture needs to do something different it can define
1369 * its own version of the function.
1370 */
1371 flush_dcache_page(page);
1372}
1373
0326f5a9
SD
1374/**
1375 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1376 * @regs: Reflects the saved state of the task after it has hit a breakpoint
1377 * instruction.
1378 * Return the address of the breakpoint instruction.
1379 */
1380unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1381{
1382 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1383}
1384
b02ef20a
ON
1385unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1386{
1387 struct uprobe_task *utask = current->utask;
1388
1389 if (unlikely(utask && utask->active_uprobe))
1390 return utask->vaddr;
1391
1392 return instruction_pointer(regs);
1393}
1394
2bb5e840
ON
1395static struct return_instance *free_ret_instance(struct return_instance *ri)
1396{
1397 struct return_instance *next = ri->next;
1398 put_uprobe(ri->uprobe);
1399 kfree(ri);
1400 return next;
1401}
1402
0326f5a9
SD
1403/*
1404 * Called with no locks held.
1405 * Called in context of a exiting or a exec-ing thread.
1406 */
1407void uprobe_free_utask(struct task_struct *t)
1408{
1409 struct uprobe_task *utask = t->utask;
2bb5e840 1410 struct return_instance *ri;
0326f5a9 1411
0326f5a9
SD
1412 if (!utask)
1413 return;
1414
1415 if (utask->active_uprobe)
1416 put_uprobe(utask->active_uprobe);
1417
0dfd0eb8 1418 ri = utask->return_instances;
2bb5e840
ON
1419 while (ri)
1420 ri = free_ret_instance(ri);
0dfd0eb8 1421
d4b3b638 1422 xol_free_insn_slot(t);
0326f5a9
SD
1423 kfree(utask);
1424 t->utask = NULL;
1425}
1426
0326f5a9 1427/*
5a2df662
ON
1428 * Allocate a uprobe_task object for the task if if necessary.
1429 * Called when the thread hits a breakpoint.
0326f5a9
SD
1430 *
1431 * Returns:
1432 * - pointer to new uprobe_task on success
1433 * - NULL otherwise
1434 */
5a2df662 1435static struct uprobe_task *get_utask(void)
0326f5a9 1436{
5a2df662
ON
1437 if (!current->utask)
1438 current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1439 return current->utask;
0326f5a9
SD
1440}
1441
248d3a7b
ON
1442static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1443{
1444 struct uprobe_task *n_utask;
1445 struct return_instance **p, *o, *n;
1446
1447 n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1448 if (!n_utask)
1449 return -ENOMEM;
1450 t->utask = n_utask;
1451
1452 p = &n_utask->return_instances;
1453 for (o = o_utask->return_instances; o; o = o->next) {
1454 n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1455 if (!n)
1456 return -ENOMEM;
1457
1458 *n = *o;
f231722a 1459 get_uprobe(n->uprobe);
248d3a7b
ON
1460 n->next = NULL;
1461
1462 *p = n;
1463 p = &n->next;
1464 n_utask->depth++;
1465 }
1466
1467 return 0;
1468}
1469
1470static void uprobe_warn(struct task_struct *t, const char *msg)
1471{
1472 pr_warn("uprobe: %s:%d failed to %s\n",
1473 current->comm, current->pid, msg);
1474}
1475
aa59c53f
ON
1476static void dup_xol_work(struct callback_head *work)
1477{
aa59c53f
ON
1478 if (current->flags & PF_EXITING)
1479 return;
1480
598fdc1d
MH
1481 if (!__create_xol_area(current->utask->dup_xol_addr) &&
1482 !fatal_signal_pending(current))
aa59c53f
ON
1483 uprobe_warn(current, "dup xol area");
1484}
1485
b68e0749
ON
1486/*
1487 * Called in context of a new clone/fork from copy_process.
1488 */
3ab67966 1489void uprobe_copy_process(struct task_struct *t, unsigned long flags)
b68e0749 1490{
248d3a7b
ON
1491 struct uprobe_task *utask = current->utask;
1492 struct mm_struct *mm = current->mm;
aa59c53f 1493 struct xol_area *area;
248d3a7b 1494
b68e0749 1495 t->utask = NULL;
248d3a7b 1496
3ab67966
ON
1497 if (!utask || !utask->return_instances)
1498 return;
1499
1500 if (mm == t->mm && !(flags & CLONE_VFORK))
248d3a7b
ON
1501 return;
1502
1503 if (dup_utask(t, utask))
1504 return uprobe_warn(t, "dup ret instances");
aa59c53f
ON
1505
1506 /* The task can fork() after dup_xol_work() fails */
1507 area = mm->uprobes_state.xol_area;
1508 if (!area)
1509 return uprobe_warn(t, "dup xol area");
1510
3ab67966
ON
1511 if (mm == t->mm)
1512 return;
1513
32473431
ON
1514 t->utask->dup_xol_addr = area->vaddr;
1515 init_task_work(&t->utask->dup_xol_work, dup_xol_work);
1516 task_work_add(t, &t->utask->dup_xol_work, true);
b68e0749
ON
1517}
1518
e78aebfd
AA
1519/*
1520 * Current area->vaddr notion assume the trampoline address is always
1521 * equal area->vaddr.
1522 *
1523 * Returns -1 in case the xol_area is not allocated.
1524 */
1525static unsigned long get_trampoline_vaddr(void)
1526{
1527 struct xol_area *area;
1528 unsigned long trampoline_vaddr = -1;
1529
5c6338b4
PM
1530 /* Pairs with xol_add_vma() smp_store_release() */
1531 area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */
e78aebfd
AA
1532 if (area)
1533 trampoline_vaddr = area->vaddr;
1534
1535 return trampoline_vaddr;
1536}
1537
db087ef6
ON
1538static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
1539 struct pt_regs *regs)
a5b7e1a8
ON
1540{
1541 struct return_instance *ri = utask->return_instances;
db087ef6 1542 enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
86dcb702
ON
1543
1544 while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
a5b7e1a8
ON
1545 ri = free_ret_instance(ri);
1546 utask->depth--;
1547 }
1548 utask->return_instances = ri;
1549}
1550
0dfd0eb8
AA
1551static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
1552{
1553 struct return_instance *ri;
1554 struct uprobe_task *utask;
1555 unsigned long orig_ret_vaddr, trampoline_vaddr;
db087ef6 1556 bool chained;
0dfd0eb8
AA
1557
1558 if (!get_xol_area())
1559 return;
1560
1561 utask = get_utask();
1562 if (!utask)
1563 return;
1564
ded49c55
AA
1565 if (utask->depth >= MAX_URETPROBE_DEPTH) {
1566 printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1567 " nestedness limit pid/tgid=%d/%d\n",
1568 current->pid, current->tgid);
1569 return;
1570 }
1571
6c58d0e4 1572 ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
0dfd0eb8 1573 if (!ri)
6c58d0e4 1574 return;
0dfd0eb8
AA
1575
1576 trampoline_vaddr = get_trampoline_vaddr();
1577 orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
1578 if (orig_ret_vaddr == -1)
1579 goto fail;
1580
a5b7e1a8 1581 /* drop the entries invalidated by longjmp() */
db087ef6
ON
1582 chained = (orig_ret_vaddr == trampoline_vaddr);
1583 cleanup_return_instances(utask, chained, regs);
a5b7e1a8 1584
0dfd0eb8
AA
1585 /*
1586 * We don't want to keep trampoline address in stack, rather keep the
1587 * original return address of first caller thru all the consequent
1588 * instances. This also makes breakpoint unwrapping easier.
1589 */
db087ef6 1590 if (chained) {
0dfd0eb8
AA
1591 if (!utask->return_instances) {
1592 /*
1593 * This situation is not possible. Likely we have an
1594 * attack from user-space.
1595 */
6c58d0e4 1596 uprobe_warn(current, "handle tail call");
0dfd0eb8
AA
1597 goto fail;
1598 }
0dfd0eb8
AA
1599 orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
1600 }
1601
f231722a 1602 ri->uprobe = get_uprobe(uprobe);
0dfd0eb8 1603 ri->func = instruction_pointer(regs);
7b868e48 1604 ri->stack = user_stack_pointer(regs);
0dfd0eb8
AA
1605 ri->orig_ret_vaddr = orig_ret_vaddr;
1606 ri->chained = chained;
1607
ded49c55 1608 utask->depth++;
0dfd0eb8
AA
1609 ri->next = utask->return_instances;
1610 utask->return_instances = ri;
1611
1612 return;
0dfd0eb8
AA
1613 fail:
1614 kfree(ri);
1615}
1616
0326f5a9
SD
1617/* Prepare to single-step probed instruction out of line. */
1618static int
a6cb3f6d 1619pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
0326f5a9 1620{
a6cb3f6d
ON
1621 struct uprobe_task *utask;
1622 unsigned long xol_vaddr;
aba51024 1623 int err;
a6cb3f6d 1624
608e7427
ON
1625 utask = get_utask();
1626 if (!utask)
1627 return -ENOMEM;
a6cb3f6d
ON
1628
1629 xol_vaddr = xol_get_insn_slot(uprobe);
1630 if (!xol_vaddr)
1631 return -ENOMEM;
1632
1633 utask->xol_vaddr = xol_vaddr;
1634 utask->vaddr = bp_vaddr;
d4b3b638 1635
aba51024
ON
1636 err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1637 if (unlikely(err)) {
1638 xol_free_insn_slot(current);
1639 return err;
1640 }
1641
608e7427
ON
1642 utask->active_uprobe = uprobe;
1643 utask->state = UTASK_SSTEP;
aba51024 1644 return 0;
0326f5a9
SD
1645}
1646
1647/*
1648 * If we are singlestepping, then ensure this thread is not connected to
1649 * non-fatal signals until completion of singlestep. When xol insn itself
1650 * triggers the signal, restart the original insn even if the task is
1651 * already SIGKILL'ed (since coredump should report the correct ip). This
1652 * is even more important if the task has a handler for SIGSEGV/etc, The
1653 * _same_ instruction should be repeated again after return from the signal
1654 * handler, and SSTEP can never finish in this case.
1655 */
1656bool uprobe_deny_signal(void)
1657{
1658 struct task_struct *t = current;
1659 struct uprobe_task *utask = t->utask;
1660
1661 if (likely(!utask || !utask->active_uprobe))
1662 return false;
1663
1664 WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1665
1666 if (signal_pending(t)) {
1667 spin_lock_irq(&t->sighand->siglock);
1668 clear_tsk_thread_flag(t, TIF_SIGPENDING);
1669 spin_unlock_irq(&t->sighand->siglock);
1670
1671 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1672 utask->state = UTASK_SSTEP_TRAPPED;
1673 set_tsk_thread_flag(t, TIF_UPROBE);
0326f5a9
SD
1674 }
1675 }
1676
1677 return true;
1678}
1679
499a4f3e
ON
1680static void mmf_recalc_uprobes(struct mm_struct *mm)
1681{
1682 struct vm_area_struct *vma;
1683
1684 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1685 if (!valid_vma(vma, false))
1686 continue;
1687 /*
1688 * This is not strictly accurate, we can race with
1689 * uprobe_unregister() and see the already removed
1690 * uprobe if delete_uprobe() was not yet called.
63633cbf 1691 * Or this uprobe can be filtered out.
499a4f3e
ON
1692 */
1693 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
1694 return;
1695 }
1696
1697 clear_bit(MMF_HAS_UPROBES, &mm->flags);
1698}
1699
0908ad6e 1700static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
ec75fba9
ON
1701{
1702 struct page *page;
1703 uprobe_opcode_t opcode;
1704 int result;
1705
1706 pagefault_disable();
bd28b145 1707 result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
ec75fba9
ON
1708 pagefault_enable();
1709
1710 if (likely(result == 0))
1711 goto out;
1712
1e987790
DH
1713 /*
1714 * The NULL 'tsk' here ensures that any faults that occur here
1715 * will not be accounted to the task. 'mm' *is* current->mm,
1716 * but we treat this as a 'remote' access since it is
1717 * essentially a kernel access to the memory.
1718 */
9beae1ea 1719 result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
5b56d49f 1720 NULL, NULL);
ec75fba9
ON
1721 if (result < 0)
1722 return result;
1723
ab0d805c 1724 copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
ec75fba9
ON
1725 put_page(page);
1726 out:
0908ad6e
AM
1727 /* This needs to return true for any variant of the trap insn */
1728 return is_trap_insn(&opcode);
ec75fba9
ON
1729}
1730
d790d346 1731static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
0326f5a9 1732{
3a9ea052
ON
1733 struct mm_struct *mm = current->mm;
1734 struct uprobe *uprobe = NULL;
0326f5a9 1735 struct vm_area_struct *vma;
0326f5a9 1736
0326f5a9
SD
1737 down_read(&mm->mmap_sem);
1738 vma = find_vma(mm, bp_vaddr);
3a9ea052
ON
1739 if (vma && vma->vm_start <= bp_vaddr) {
1740 if (valid_vma(vma, false)) {
f281769e 1741 struct inode *inode = file_inode(vma->vm_file);
cb113b47 1742 loff_t offset = vaddr_to_offset(vma, bp_vaddr);
0326f5a9 1743
3a9ea052
ON
1744 uprobe = find_uprobe(inode, offset);
1745 }
d790d346
ON
1746
1747 if (!uprobe)
0908ad6e 1748 *is_swbp = is_trap_at_addr(mm, bp_vaddr);
d790d346
ON
1749 } else {
1750 *is_swbp = -EFAULT;
0326f5a9 1751 }
499a4f3e
ON
1752
1753 if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
1754 mmf_recalc_uprobes(mm);
0326f5a9
SD
1755 up_read(&mm->mmap_sem);
1756
3a9ea052
ON
1757 return uprobe;
1758}
1759
da1816b1
ON
1760static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
1761{
1762 struct uprobe_consumer *uc;
1763 int remove = UPROBE_HANDLER_REMOVE;
0dfd0eb8 1764 bool need_prep = false; /* prepare return uprobe, when needed */
da1816b1
ON
1765
1766 down_read(&uprobe->register_rwsem);
1767 for (uc = uprobe->consumers; uc; uc = uc->next) {
ea024870 1768 int rc = 0;
da1816b1 1769
ea024870
AA
1770 if (uc->handler) {
1771 rc = uc->handler(uc, regs);
1772 WARN(rc & ~UPROBE_HANDLER_MASK,
1773 "bad rc=0x%x from %pf()\n", rc, uc->handler);
1774 }
0dfd0eb8
AA
1775
1776 if (uc->ret_handler)
1777 need_prep = true;
1778
da1816b1
ON
1779 remove &= rc;
1780 }
1781
0dfd0eb8
AA
1782 if (need_prep && !remove)
1783 prepare_uretprobe(uprobe, regs); /* put bp at return */
1784
da1816b1
ON
1785 if (remove && uprobe->consumers) {
1786 WARN_ON(!uprobe_is_active(uprobe));
1787 unapply_uprobe(uprobe, current->mm);
1788 }
1789 up_read(&uprobe->register_rwsem);
1790}
1791
fec8898d
AA
1792static void
1793handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
1794{
1795 struct uprobe *uprobe = ri->uprobe;
1796 struct uprobe_consumer *uc;
1797
1798 down_read(&uprobe->register_rwsem);
1799 for (uc = uprobe->consumers; uc; uc = uc->next) {
1800 if (uc->ret_handler)
1801 uc->ret_handler(uc, ri->func, regs);
1802 }
1803 up_read(&uprobe->register_rwsem);
1804}
1805
a83cfeb9
ON
1806static struct return_instance *find_next_ret_chain(struct return_instance *ri)
1807{
1808 bool chained;
1809
1810 do {
1811 chained = ri->chained;
1812 ri = ri->next; /* can't be NULL if chained */
1813 } while (chained);
1814
1815 return ri;
1816}
1817
0b5256c7 1818static void handle_trampoline(struct pt_regs *regs)
fec8898d
AA
1819{
1820 struct uprobe_task *utask;
a83cfeb9 1821 struct return_instance *ri, *next;
5eeb50de 1822 bool valid;
fec8898d
AA
1823
1824 utask = current->utask;
1825 if (!utask)
0b5256c7 1826 goto sigill;
fec8898d
AA
1827
1828 ri = utask->return_instances;
1829 if (!ri)
0b5256c7 1830 goto sigill;
fec8898d 1831
a83cfeb9 1832 do {
5eeb50de
ON
1833 /*
1834 * We should throw out the frames invalidated by longjmp().
1835 * If this chain is valid, then the next one should be alive
1836 * or NULL; the latter case means that nobody but ri->func
1837 * could hit this trampoline on return. TODO: sigaltstack().
1838 */
1839 next = find_next_ret_chain(ri);
86dcb702 1840 valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
5eeb50de
ON
1841
1842 instruction_pointer_set(regs, ri->orig_ret_vaddr);
1843 do {
1844 if (valid)
1845 handle_uretprobe_chain(ri, regs);
1846 ri = free_ret_instance(ri);
1847 utask->depth--;
1848 } while (ri != next);
1849 } while (!valid);
fec8898d
AA
1850
1851 utask->return_instances = ri;
0b5256c7
ON
1852 return;
1853
1854 sigill:
1855 uprobe_warn(current, "handle uretprobe, sending SIGILL.");
1856 force_sig_info(SIGILL, SEND_SIG_FORCED, current);
fec8898d 1857
fec8898d
AA
1858}
1859
6fe50a28
DL
1860bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
1861{
1862 return false;
1863}
1864
86dcb702
ON
1865bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
1866 struct pt_regs *regs)
97da8976
ON
1867{
1868 return true;
1869}
1870
3a9ea052
ON
1871/*
1872 * Run handler and ask thread to singlestep.
1873 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
1874 */
1875static void handle_swbp(struct pt_regs *regs)
1876{
3a9ea052
ON
1877 struct uprobe *uprobe;
1878 unsigned long bp_vaddr;
56bb4cf6 1879 int uninitialized_var(is_swbp);
3a9ea052
ON
1880
1881 bp_vaddr = uprobe_get_swbp_addr(regs);
0b5256c7
ON
1882 if (bp_vaddr == get_trampoline_vaddr())
1883 return handle_trampoline(regs);
fec8898d
AA
1884
1885 uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
0326f5a9 1886 if (!uprobe) {
56bb4cf6
ON
1887 if (is_swbp > 0) {
1888 /* No matching uprobe; signal SIGTRAP. */
1889 send_sig(SIGTRAP, current, 0);
1890 } else {
1891 /*
1892 * Either we raced with uprobe_unregister() or we can't
1893 * access this memory. The latter is only possible if
1894 * another thread plays with our ->mm. In both cases
1895 * we can simply restart. If this vma was unmapped we
1896 * can pretend this insn was not executed yet and get
1897 * the (correct) SIGSEGV after restart.
1898 */
1899 instruction_pointer_set(regs, bp_vaddr);
1900 }
0326f5a9
SD
1901 return;
1902 }
74e59dfc
ON
1903
1904 /* change it in advance for ->handler() and restart */
1905 instruction_pointer_set(regs, bp_vaddr);
1906
142b18dd
ON
1907 /*
1908 * TODO: move copy_insn/etc into _register and remove this hack.
1909 * After we hit the bp, _unregister + _register can install the
1910 * new and not-yet-analyzed uprobe at the same address, restart.
1911 */
1912 smp_rmb(); /* pairs with wmb() in install_breakpoint() */
71434f2f 1913 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
74e59dfc 1914 goto out;
0326f5a9 1915
72fd293a
ON
1916 /* Tracing handlers use ->utask to communicate with fetch methods */
1917 if (!get_utask())
1918 goto out;
1919
6fe50a28
DL
1920 if (arch_uprobe_ignore(&uprobe->arch, regs))
1921 goto out;
1922
0326f5a9 1923 handler_chain(uprobe, regs);
6fe50a28 1924
8a6b1732 1925 if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
0578a970 1926 goto out;
0326f5a9 1927
608e7427 1928 if (!pre_ssout(uprobe, regs, bp_vaddr))
0326f5a9 1929 return;
0326f5a9 1930
8a6b1732 1931 /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
0578a970 1932out:
8bd87445 1933 put_uprobe(uprobe);
0326f5a9
SD
1934}
1935
1936/*
1937 * Perform required fix-ups and disable singlestep.
1938 * Allow pending signals to take effect.
1939 */
1940static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
1941{
1942 struct uprobe *uprobe;
014940ba 1943 int err = 0;
0326f5a9
SD
1944
1945 uprobe = utask->active_uprobe;
1946 if (utask->state == UTASK_SSTEP_ACK)
014940ba 1947 err = arch_uprobe_post_xol(&uprobe->arch, regs);
0326f5a9
SD
1948 else if (utask->state == UTASK_SSTEP_TRAPPED)
1949 arch_uprobe_abort_xol(&uprobe->arch, regs);
1950 else
1951 WARN_ON_ONCE(1);
1952
1953 put_uprobe(uprobe);
1954 utask->active_uprobe = NULL;
1955 utask->state = UTASK_RUNNING;
d4b3b638 1956 xol_free_insn_slot(current);
0326f5a9
SD
1957
1958 spin_lock_irq(&current->sighand->siglock);
1959 recalc_sigpending(); /* see uprobe_deny_signal() */
1960 spin_unlock_irq(&current->sighand->siglock);
014940ba
ON
1961
1962 if (unlikely(err)) {
1963 uprobe_warn(current, "execute the probed insn, sending SIGILL.");
1964 force_sig_info(SIGILL, SEND_SIG_FORCED, current);
1965 }
0326f5a9
SD
1966}
1967
1968/*
1b08e907
ON
1969 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
1970 * allows the thread to return from interrupt. After that handle_swbp()
1971 * sets utask->active_uprobe.
0326f5a9 1972 *
1b08e907
ON
1973 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
1974 * and allows the thread to return from interrupt.
0326f5a9
SD
1975 *
1976 * While returning to userspace, thread notices the TIF_UPROBE flag and calls
1977 * uprobe_notify_resume().
1978 */
1979void uprobe_notify_resume(struct pt_regs *regs)
1980{
1981 struct uprobe_task *utask;
1982
db023ea5
ON
1983 clear_thread_flag(TIF_UPROBE);
1984
0326f5a9 1985 utask = current->utask;
1b08e907 1986 if (utask && utask->active_uprobe)
0326f5a9 1987 handle_singlestep(utask, regs);
1b08e907
ON
1988 else
1989 handle_swbp(regs);
0326f5a9
SD
1990}
1991
1992/*
1993 * uprobe_pre_sstep_notifier gets called from interrupt context as part of
1994 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
1995 */
1996int uprobe_pre_sstep_notifier(struct pt_regs *regs)
1997{
0dfd0eb8
AA
1998 if (!current->mm)
1999 return 0;
2000
2001 if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
2002 (!current->utask || !current->utask->return_instances))
0326f5a9
SD
2003 return 0;
2004
0326f5a9 2005 set_thread_flag(TIF_UPROBE);
0326f5a9
SD
2006 return 1;
2007}
2008
2009/*
2010 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
2011 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
2012 */
2013int uprobe_post_sstep_notifier(struct pt_regs *regs)
2014{
2015 struct uprobe_task *utask = current->utask;
2016
2017 if (!current->mm || !utask || !utask->active_uprobe)
2018 /* task is currently not uprobed */
2019 return 0;
2020
2021 utask->state = UTASK_SSTEP_ACK;
2022 set_thread_flag(TIF_UPROBE);
2023 return 1;
2024}
2025
2026static struct notifier_block uprobe_exception_nb = {
2027 .notifier_call = arch_uprobe_exception_notify,
2028 .priority = INT_MAX-1, /* notified after kprobes, kgdb */
2029};
2030
2b144498
SD
2031static int __init init_uprobes(void)
2032{
2033 int i;
2034
66d06dff 2035 for (i = 0; i < UPROBES_HASH_SZ; i++)
2b144498 2036 mutex_init(&uprobes_mmap_mutex[i]);
0326f5a9 2037
32cdba1e
ON
2038 if (percpu_init_rwsem(&dup_mmap_sem))
2039 return -ENOMEM;
2040
0326f5a9 2041 return register_die_notifier(&uprobe_exception_nb);
2b144498 2042}
736e89d9 2043__initcall(init_uprobes);