| 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
| 2 | #ifndef _LINUX_UPROBES_H |
| 3 | #define _LINUX_UPROBES_H |
| 4 | /* |
| 5 | * User-space Probes (UProbes) |
| 6 | * |
| 7 | * Copyright (C) IBM Corporation, 2008-2012 |
| 8 | * Authors: |
| 9 | * Srikar Dronamraju |
| 10 | * Jim Keniston |
| 11 | * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra |
| 12 | */ |
| 13 | |
| 14 | #include <linux/errno.h> |
| 15 | #include <linux/rbtree.h> |
| 16 | #include <linux/types.h> |
| 17 | #include <linux/wait.h> |
| 18 | #include <linux/timer.h> |
| 19 | #include <linux/seqlock.h> |
| 20 | |
| 21 | struct uprobe; |
| 22 | struct vm_area_struct; |
| 23 | struct mm_struct; |
| 24 | struct inode; |
| 25 | struct notifier_block; |
| 26 | struct page; |
| 27 | |
| 28 | /* |
| 29 | * Allowed return values from uprobe consumer's handler callback |
| 30 | * with following meaning: |
| 31 | * |
| 32 | * UPROBE_HANDLER_REMOVE |
| 33 | * - Remove the uprobe breakpoint from current->mm. |
| 34 | * UPROBE_HANDLER_IGNORE |
| 35 | * - Ignore ret_handler callback for this consumer. |
| 36 | */ |
| 37 | #define UPROBE_HANDLER_REMOVE 1 |
| 38 | #define UPROBE_HANDLER_IGNORE 2 |
| 39 | |
| 40 | #define MAX_URETPROBE_DEPTH 64 |
| 41 | |
| 42 | #define UPROBE_NO_TRAMPOLINE_VADDR (~0UL) |
| 43 | |
| 44 | struct uprobe_consumer { |
| 45 | /* |
| 46 | * handler() can return UPROBE_HANDLER_REMOVE to signal the need to |
| 47 | * unregister uprobe for current process. If UPROBE_HANDLER_REMOVE is |
| 48 | * returned, filter() callback has to be implemented as well and it |
| 49 | * should return false to "confirm" the decision to uninstall uprobe |
| 50 | * for the current process. If filter() is omitted or returns true, |
| 51 | * UPROBE_HANDLER_REMOVE is effectively ignored. |
| 52 | */ |
| 53 | int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data); |
| 54 | int (*ret_handler)(struct uprobe_consumer *self, |
| 55 | unsigned long func, |
| 56 | struct pt_regs *regs, __u64 *data); |
| 57 | bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm); |
| 58 | |
| 59 | struct list_head cons_node; |
| 60 | |
| 61 | __u64 id; /* set when uprobe_consumer is registered */ |
| 62 | }; |
| 63 | |
| 64 | #ifdef CONFIG_UPROBES |
| 65 | #include <asm/uprobes.h> |
| 66 | |
| 67 | enum uprobe_task_state { |
| 68 | UTASK_RUNNING, |
| 69 | UTASK_SSTEP, |
| 70 | UTASK_SSTEP_ACK, |
| 71 | UTASK_SSTEP_TRAPPED, |
| 72 | }; |
| 73 | |
| 74 | /* The state of hybrid-lifetime uprobe inside struct return_instance */ |
| 75 | enum hprobe_state { |
| 76 | HPROBE_LEASED, /* uretprobes_srcu-protected uprobe */ |
| 77 | HPROBE_STABLE, /* refcounted uprobe */ |
| 78 | HPROBE_GONE, /* NULL uprobe, SRCU expired, refcount failed */ |
| 79 | HPROBE_CONSUMED, /* uprobe "consumed" by uretprobe handler */ |
| 80 | }; |
| 81 | |
| 82 | /* |
| 83 | * Hybrid lifetime uprobe. Represents a uprobe instance that could be either |
| 84 | * SRCU protected (with SRCU protection eventually potentially timing out), |
| 85 | * refcounted using uprobe->ref, or there could be no valid uprobe (NULL). |
| 86 | * |
| 87 | * hprobe's internal state is setup such that background timer thread can |
| 88 | * atomically "downgrade" temporarily RCU-protected uprobe into refcounted one |
| 89 | * (or no uprobe, if refcounting failed). |
| 90 | * |
| 91 | * *stable* pointer always point to the uprobe (or could be NULL if there is |
| 92 | * was no valid underlying uprobe to begin with). |
| 93 | * |
| 94 | * *leased* pointer is the key to achieving race-free atomic lifetime state |
| 95 | * transition and can have three possible states: |
| 96 | * - either the same non-NULL value as *stable*, in which case uprobe is |
| 97 | * SRCU-protected; |
| 98 | * - NULL, in which case uprobe (if there is any) is refcounted; |
| 99 | * - special __UPROBE_DEAD value, which represents an uprobe that was SRCU |
| 100 | * protected initially, but SRCU period timed out and we attempted to |
| 101 | * convert it to refcounted, but refcount_inc_not_zero() failed, because |
| 102 | * uprobe effectively went away (the last consumer unsubscribed). In this |
| 103 | * case it's important to know that *stable* pointer (which still has |
| 104 | * non-NULL uprobe pointer) shouldn't be used, because lifetime of |
| 105 | * underlying uprobe is not guaranteed anymore. __UPROBE_DEAD is just an |
| 106 | * internal marker and is handled transparently by hprobe_fetch() helper. |
| 107 | * |
| 108 | * When uprobe is SRCU-protected, we also record srcu_idx value, necessary for |
| 109 | * SRCU unlocking. |
| 110 | * |
| 111 | * See hprobe_expire() and hprobe_fetch() for details of race-free uprobe |
| 112 | * state transitioning details. It all hinges on atomic xchg() over *leaded* |
| 113 | * pointer. *stable* pointer, once initially set, is not modified concurrently. |
| 114 | */ |
| 115 | struct hprobe { |
| 116 | enum hprobe_state state; |
| 117 | int srcu_idx; |
| 118 | struct uprobe *uprobe; |
| 119 | }; |
| 120 | |
| 121 | /* |
| 122 | * uprobe_task: Metadata of a task while it singlesteps. |
| 123 | */ |
| 124 | struct uprobe_task { |
| 125 | enum uprobe_task_state state; |
| 126 | |
| 127 | unsigned int depth; |
| 128 | struct return_instance *return_instances; |
| 129 | |
| 130 | struct return_instance *ri_pool; |
| 131 | struct timer_list ri_timer; |
| 132 | seqcount_t ri_seqcount; |
| 133 | |
| 134 | union { |
| 135 | struct { |
| 136 | struct arch_uprobe_task autask; |
| 137 | unsigned long vaddr; |
| 138 | }; |
| 139 | |
| 140 | struct { |
| 141 | struct callback_head dup_xol_work; |
| 142 | unsigned long dup_xol_addr; |
| 143 | }; |
| 144 | }; |
| 145 | |
| 146 | struct uprobe *active_uprobe; |
| 147 | unsigned long xol_vaddr; |
| 148 | bool signal_denied; |
| 149 | |
| 150 | struct arch_uprobe *auprobe; |
| 151 | }; |
| 152 | |
| 153 | struct return_consumer { |
| 154 | __u64 cookie; |
| 155 | __u64 id; |
| 156 | }; |
| 157 | |
| 158 | struct return_instance { |
| 159 | struct hprobe hprobe; |
| 160 | unsigned long func; |
| 161 | unsigned long stack; /* stack pointer */ |
| 162 | unsigned long orig_ret_vaddr; /* original return address */ |
| 163 | bool chained; /* true, if instance is nested */ |
| 164 | int cons_cnt; /* total number of session consumers */ |
| 165 | |
| 166 | struct return_instance *next; /* keep as stack */ |
| 167 | struct rcu_head rcu; |
| 168 | |
| 169 | /* singular pre-allocated return_consumer instance for common case */ |
| 170 | struct return_consumer consumer; |
| 171 | /* |
| 172 | * extra return_consumer instances for rare cases of multiple session consumers, |
| 173 | * contains (cons_cnt - 1) elements |
| 174 | */ |
| 175 | struct return_consumer *extra_consumers; |
| 176 | } ____cacheline_aligned; |
| 177 | |
| 178 | enum rp_check { |
| 179 | RP_CHECK_CALL, |
| 180 | RP_CHECK_CHAIN_CALL, |
| 181 | RP_CHECK_RET, |
| 182 | }; |
| 183 | |
| 184 | struct xol_area; |
| 185 | |
| 186 | struct uprobes_state { |
| 187 | struct xol_area *xol_area; |
| 188 | }; |
| 189 | |
| 190 | extern void __init uprobes_init(void); |
| 191 | extern int set_swbp(struct arch_uprobe *aup, struct vm_area_struct *vma, unsigned long vaddr); |
| 192 | extern int set_orig_insn(struct arch_uprobe *aup, struct vm_area_struct *vma, unsigned long vaddr); |
| 193 | extern bool is_swbp_insn(uprobe_opcode_t *insn); |
| 194 | extern bool is_trap_insn(uprobe_opcode_t *insn); |
| 195 | extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs); |
| 196 | extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs); |
| 197 | extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct vm_area_struct *vma, unsigned long vaddr, uprobe_opcode_t); |
| 198 | extern struct uprobe *uprobe_register(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc); |
| 199 | extern int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool); |
| 200 | extern void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc); |
| 201 | extern void uprobe_unregister_sync(void); |
| 202 | extern int uprobe_mmap(struct vm_area_struct *vma); |
| 203 | extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end); |
| 204 | extern void uprobe_start_dup_mmap(void); |
| 205 | extern void uprobe_end_dup_mmap(void); |
| 206 | extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm); |
| 207 | extern void uprobe_free_utask(struct task_struct *t); |
| 208 | extern void uprobe_copy_process(struct task_struct *t, unsigned long flags); |
| 209 | extern int uprobe_post_sstep_notifier(struct pt_regs *regs); |
| 210 | extern int uprobe_pre_sstep_notifier(struct pt_regs *regs); |
| 211 | extern void uprobe_notify_resume(struct pt_regs *regs); |
| 212 | extern bool uprobe_deny_signal(void); |
| 213 | extern bool arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs); |
| 214 | extern void uprobe_clear_state(struct mm_struct *mm); |
| 215 | extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr); |
| 216 | extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs); |
| 217 | extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs); |
| 218 | extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk); |
| 219 | extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); |
| 220 | extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); |
| 221 | extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs); |
| 222 | extern bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, struct pt_regs *regs); |
| 223 | extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs); |
| 224 | extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, |
| 225 | void *src, unsigned long len); |
| 226 | extern void uprobe_handle_trampoline(struct pt_regs *regs); |
| 227 | extern void *arch_uprobe_trampoline(unsigned long *psize); |
| 228 | extern unsigned long uprobe_get_trampoline_vaddr(void); |
| 229 | #else /* !CONFIG_UPROBES */ |
| 230 | struct uprobes_state { |
| 231 | }; |
| 232 | |
| 233 | static inline void uprobes_init(void) |
| 234 | { |
| 235 | } |
| 236 | |
| 237 | #define uprobe_get_trap_addr(regs) instruction_pointer(regs) |
| 238 | |
| 239 | static inline struct uprobe * |
| 240 | uprobe_register(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc) |
| 241 | { |
| 242 | return ERR_PTR(-ENOSYS); |
| 243 | } |
| 244 | static inline int |
| 245 | uprobe_apply(struct uprobe* uprobe, struct uprobe_consumer *uc, bool add) |
| 246 | { |
| 247 | return -ENOSYS; |
| 248 | } |
| 249 | static inline void |
| 250 | uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc) |
| 251 | { |
| 252 | } |
| 253 | static inline void uprobe_unregister_sync(void) |
| 254 | { |
| 255 | } |
| 256 | static inline int uprobe_mmap(struct vm_area_struct *vma) |
| 257 | { |
| 258 | return 0; |
| 259 | } |
| 260 | static inline void |
| 261 | uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
| 262 | { |
| 263 | } |
| 264 | static inline void uprobe_start_dup_mmap(void) |
| 265 | { |
| 266 | } |
| 267 | static inline void uprobe_end_dup_mmap(void) |
| 268 | { |
| 269 | } |
| 270 | static inline void |
| 271 | uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) |
| 272 | { |
| 273 | } |
| 274 | static inline void uprobe_notify_resume(struct pt_regs *regs) |
| 275 | { |
| 276 | } |
| 277 | static inline bool uprobe_deny_signal(void) |
| 278 | { |
| 279 | return false; |
| 280 | } |
| 281 | static inline void uprobe_free_utask(struct task_struct *t) |
| 282 | { |
| 283 | } |
| 284 | static inline void uprobe_copy_process(struct task_struct *t, unsigned long flags) |
| 285 | { |
| 286 | } |
| 287 | static inline void uprobe_clear_state(struct mm_struct *mm) |
| 288 | { |
| 289 | } |
| 290 | #endif /* !CONFIG_UPROBES */ |
| 291 | #endif /* _LINUX_UPROBES_H */ |