Commit | Line | Data |
---|---|---|
8858ac8e SS |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * arch/parisc/kernel/kprobes.c | |
4 | * | |
5 | * PA-RISC kprobes implementation | |
6 | * | |
7 | * Copyright (c) 2019 Sven Schnelle <svens@stackframe.org> | |
8 | */ | |
9 | ||
10 | #include <linux/types.h> | |
11 | #include <linux/kprobes.h> | |
12 | #include <linux/slab.h> | |
13 | #include <asm/cacheflush.h> | |
14 | #include <asm/patch.h> | |
15 | ||
16 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | |
17 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | |
18 | ||
19 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | |
20 | { | |
21 | if ((unsigned long)p->addr & 3UL) | |
22 | return -EINVAL; | |
23 | ||
24 | p->ainsn.insn = get_insn_slot(); | |
25 | if (!p->ainsn.insn) | |
26 | return -ENOMEM; | |
27 | ||
28 | memcpy(p->ainsn.insn, p->addr, | |
29 | MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); | |
30 | p->opcode = *p->addr; | |
31 | flush_insn_slot(p); | |
32 | return 0; | |
33 | } | |
34 | ||
35 | void __kprobes arch_remove_kprobe(struct kprobe *p) | |
36 | { | |
37 | if (!p->ainsn.insn) | |
38 | return; | |
39 | ||
40 | free_insn_slot(p->ainsn.insn, 0); | |
41 | p->ainsn.insn = NULL; | |
42 | } | |
43 | ||
44 | void __kprobes arch_arm_kprobe(struct kprobe *p) | |
45 | { | |
46 | patch_text(p->addr, PARISC_KPROBES_BREAK_INSN); | |
47 | } | |
48 | ||
49 | void __kprobes arch_disarm_kprobe(struct kprobe *p) | |
50 | { | |
51 | patch_text(p->addr, p->opcode); | |
52 | } | |
53 | ||
54 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | |
55 | { | |
56 | kcb->prev_kprobe.kp = kprobe_running(); | |
57 | kcb->prev_kprobe.status = kcb->kprobe_status; | |
58 | } | |
59 | ||
60 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | |
61 | { | |
62 | __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); | |
63 | kcb->kprobe_status = kcb->prev_kprobe.status; | |
64 | } | |
65 | ||
66 | static inline void __kprobes set_current_kprobe(struct kprobe *p) | |
67 | { | |
68 | __this_cpu_write(current_kprobe, p); | |
69 | } | |
70 | ||
71 | static void __kprobes setup_singlestep(struct kprobe *p, | |
72 | struct kprobe_ctlblk *kcb, struct pt_regs *regs) | |
73 | { | |
74 | kcb->iaoq[0] = regs->iaoq[0]; | |
75 | kcb->iaoq[1] = regs->iaoq[1]; | |
76 | regs->iaoq[0] = (unsigned long)p->ainsn.insn; | |
77 | mtctl(0, 0); | |
78 | regs->gr[0] |= PSW_R; | |
79 | } | |
80 | ||
81 | int __kprobes parisc_kprobe_break_handler(struct pt_regs *regs) | |
82 | { | |
83 | struct kprobe *p; | |
84 | struct kprobe_ctlblk *kcb; | |
85 | ||
86 | preempt_disable(); | |
87 | ||
88 | kcb = get_kprobe_ctlblk(); | |
89 | p = get_kprobe((unsigned long *)regs->iaoq[0]); | |
90 | ||
91 | if (!p) { | |
92 | preempt_enable_no_resched(); | |
93 | return 0; | |
94 | } | |
95 | ||
96 | if (kprobe_running()) { | |
97 | /* | |
98 | * We have reentered the kprobe_handler, since another kprobe | |
99 | * was hit while within the handler, we save the original | |
100 | * kprobes and single step on the instruction of the new probe | |
101 | * without calling any user handlers to avoid recursive | |
102 | * kprobes. | |
103 | */ | |
104 | save_previous_kprobe(kcb); | |
105 | set_current_kprobe(p); | |
106 | kprobes_inc_nmissed_count(p); | |
107 | setup_singlestep(p, kcb, regs); | |
108 | kcb->kprobe_status = KPROBE_REENTER; | |
109 | return 1; | |
110 | } | |
111 | ||
112 | set_current_kprobe(p); | |
113 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | |
114 | ||
115 | /* If we have no pre-handler or it returned 0, we continue with | |
116 | * normal processing. If we have a pre-handler and it returned | |
117 | * non-zero - which means user handler setup registers to exit | |
118 | * to another instruction, we must skip the single stepping. | |
119 | */ | |
120 | ||
121 | if (!p->pre_handler || !p->pre_handler(p, regs)) { | |
122 | setup_singlestep(p, kcb, regs); | |
123 | kcb->kprobe_status = KPROBE_HIT_SS; | |
124 | } else { | |
125 | reset_current_kprobe(); | |
126 | preempt_enable_no_resched(); | |
127 | } | |
128 | return 1; | |
129 | } | |
130 | ||
131 | int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs) | |
132 | { | |
133 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
134 | struct kprobe *p = kprobe_running(); | |
135 | ||
59a783db HD |
136 | if (!p) |
137 | return 0; | |
138 | ||
8858ac8e SS |
139 | if (regs->iaoq[0] != (unsigned long)p->ainsn.insn+4) |
140 | return 0; | |
141 | ||
142 | /* restore back original saved kprobe variables and continue */ | |
143 | if (kcb->kprobe_status == KPROBE_REENTER) { | |
144 | restore_previous_kprobe(kcb); | |
145 | return 1; | |
146 | } | |
147 | ||
148 | /* for absolute branch instructions we can copy iaoq_b. for relative | |
149 | * branch instructions we need to calculate the new address based on the | |
150 | * difference between iaoq_f and iaoq_b. We cannot use iaoq_b without | |
151 | * modificationt because it's based on our ainsn.insn address. | |
152 | */ | |
153 | ||
154 | if (p->post_handler) | |
155 | p->post_handler(p, regs, 0); | |
156 | ||
157 | switch (regs->iir >> 26) { | |
158 | case 0x38: /* BE */ | |
159 | case 0x39: /* BE,L */ | |
160 | case 0x3a: /* BV */ | |
161 | case 0x3b: /* BVE */ | |
162 | /* for absolute branches, regs->iaoq[1] has already the right | |
163 | * address | |
164 | */ | |
165 | regs->iaoq[0] = kcb->iaoq[1]; | |
166 | break; | |
167 | default: | |
168 | regs->iaoq[1] = kcb->iaoq[0]; | |
169 | regs->iaoq[1] += (regs->iaoq[1] - regs->iaoq[0]) + 4; | |
170 | regs->iaoq[0] = kcb->iaoq[1]; | |
171 | break; | |
172 | } | |
173 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | |
174 | reset_current_kprobe(); | |
175 | return 1; | |
176 | } | |
177 | ||
e0b59b7b SS |
178 | static inline void kretprobe_trampoline(void) |
179 | { | |
180 | asm volatile("nop"); | |
181 | asm volatile("nop"); | |
182 | } | |
183 | ||
184 | static int __kprobes trampoline_probe_handler(struct kprobe *p, | |
185 | struct pt_regs *regs); | |
186 | ||
187 | static struct kprobe trampoline_p = { | |
188 | .pre_handler = trampoline_probe_handler | |
189 | }; | |
190 | ||
191 | static int __kprobes trampoline_probe_handler(struct kprobe *p, | |
192 | struct pt_regs *regs) | |
193 | { | |
194 | struct kretprobe_instance *ri = NULL; | |
195 | struct hlist_head *head, empty_rp; | |
196 | struct hlist_node *tmp; | |
197 | unsigned long flags, orig_ret_address = 0; | |
198 | unsigned long trampoline_address = (unsigned long)trampoline_p.addr; | |
199 | kprobe_opcode_t *correct_ret_addr = NULL; | |
200 | ||
201 | INIT_HLIST_HEAD(&empty_rp); | |
202 | kretprobe_hash_lock(current, &head, &flags); | |
203 | ||
204 | /* | |
205 | * It is possible to have multiple instances associated with a given | |
206 | * task either because multiple functions in the call path have | |
207 | * a return probe installed on them, and/or more than one return | |
208 | * probe was registered for a target function. | |
209 | * | |
210 | * We can handle this because: | |
211 | * - instances are always inserted at the head of the list | |
212 | * - when multiple return probes are registered for the same | |
213 | * function, the first instance's ret_addr will point to the | |
214 | * real return address, and all the rest will point to | |
215 | * kretprobe_trampoline | |
216 | */ | |
217 | hlist_for_each_entry_safe(ri, tmp, head, hlist) { | |
218 | if (ri->task != current) | |
219 | /* another task is sharing our hash bucket */ | |
220 | continue; | |
221 | ||
222 | orig_ret_address = (unsigned long)ri->ret_addr; | |
223 | ||
224 | if (orig_ret_address != trampoline_address) | |
225 | /* | |
226 | * This is the real return address. Any other | |
227 | * instances associated with this task are for | |
228 | * other calls deeper on the call stack | |
229 | */ | |
230 | break; | |
231 | } | |
232 | ||
233 | kretprobe_assert(ri, orig_ret_address, trampoline_address); | |
234 | ||
235 | correct_ret_addr = ri->ret_addr; | |
236 | hlist_for_each_entry_safe(ri, tmp, head, hlist) { | |
237 | if (ri->task != current) | |
238 | /* another task is sharing our hash bucket */ | |
239 | continue; | |
240 | ||
241 | orig_ret_address = (unsigned long)ri->ret_addr; | |
242 | if (ri->rp && ri->rp->handler) { | |
243 | __this_cpu_write(current_kprobe, &ri->rp->kp); | |
244 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; | |
245 | ri->ret_addr = correct_ret_addr; | |
246 | ri->rp->handler(ri, regs); | |
247 | __this_cpu_write(current_kprobe, NULL); | |
248 | } | |
249 | ||
250 | recycle_rp_inst(ri, &empty_rp); | |
251 | ||
252 | if (orig_ret_address != trampoline_address) | |
253 | /* | |
254 | * This is the real return address. Any other | |
255 | * instances associated with this task are for | |
256 | * other calls deeper on the call stack | |
257 | */ | |
258 | break; | |
259 | } | |
260 | ||
261 | kretprobe_hash_unlock(current, &flags); | |
262 | ||
263 | hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { | |
264 | hlist_del(&ri->hlist); | |
265 | kfree(ri); | |
266 | } | |
267 | instruction_pointer_set(regs, orig_ret_address); | |
268 | return 1; | |
269 | } | |
270 | ||
271 | void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, | |
272 | struct pt_regs *regs) | |
273 | { | |
274 | ri->ret_addr = (kprobe_opcode_t *)regs->gr[2]; | |
275 | ||
276 | /* Replace the return addr with trampoline addr. */ | |
277 | regs->gr[2] = (unsigned long)trampoline_p.addr; | |
278 | } | |
279 | ||
280 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) | |
281 | { | |
282 | return p->addr == trampoline_p.addr; | |
283 | } | |
8858ac8e SS |
284 | |
285 | int __init arch_init_kprobes(void) | |
286 | { | |
e0b59b7b SS |
287 | trampoline_p.addr = (kprobe_opcode_t *) |
288 | dereference_function_descriptor(kretprobe_trampoline); | |
289 | return register_kprobe(&trampoline_p); | |
8858ac8e | 290 | } |