Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2a0a5b22 JW |
2 | /* |
3 | * User-space Probes (UProbes) for s390 | |
4 | * | |
5 | * Copyright IBM Corp. 2014 | |
6 | * Author(s): Jan Willeke, | |
7 | */ | |
8 | ||
2a0a5b22 JW |
9 | #include <linux/uaccess.h> |
10 | #include <linux/uprobes.h> | |
11 | #include <linux/compat.h> | |
12 | #include <linux/kdebug.h> | |
68db0cf1 IM |
13 | #include <linux/sched/task_stack.h> |
14 | ||
2a0a5b22 JW |
15 | #include <asm/switch_to.h> |
16 | #include <asm/facility.h> | |
d6fe5be3 | 17 | #include <asm/kprobes.h> |
2a0a5b22 JW |
18 | #include <asm/dis.h> |
19 | #include "entry.h" | |
20 | ||
21 | #define UPROBE_TRAP_NR UINT_MAX | |
22 | ||
23 | int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, | |
24 | unsigned long addr) | |
25 | { | |
26 | return probe_is_prohibited_opcode(auprobe->insn); | |
27 | } | |
28 | ||
29 | int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) | |
30 | { | |
8bb3fdd6 | 31 | if (psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT) |
2a0a5b22 | 32 | return -EINVAL; |
8bb3fdd6 | 33 | if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT) |
2a0a5b22 JW |
34 | return -EINVAL; |
35 | clear_pt_regs_flag(regs, PIF_PER_TRAP); | |
a7525982 | 36 | auprobe->saved_per = psw_bits(regs->psw).per; |
2a0a5b22 JW |
37 | auprobe->saved_int_code = regs->int_code; |
38 | regs->int_code = UPROBE_TRAP_NR; | |
39 | regs->psw.addr = current->utask->xol_vaddr; | |
40 | set_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP); | |
41 | update_cr_regs(current); | |
42 | return 0; | |
43 | } | |
44 | ||
45 | bool arch_uprobe_xol_was_trapped(struct task_struct *tsk) | |
46 | { | |
47 | struct pt_regs *regs = task_pt_regs(tsk); | |
48 | ||
49 | if (regs->int_code != UPROBE_TRAP_NR) | |
50 | return true; | |
51 | return false; | |
52 | } | |
53 | ||
8d1a2427 JW |
54 | static int check_per_event(unsigned short cause, unsigned long control, |
55 | struct pt_regs *regs) | |
56 | { | |
57 | if (!(regs->psw.mask & PSW_MASK_PER)) | |
58 | return 0; | |
59 | /* user space single step */ | |
60 | if (control == 0) | |
61 | return 1; | |
62 | /* over indication for storage alteration */ | |
63 | if ((control & 0x20200000) && (cause & 0x2000)) | |
64 | return 1; | |
65 | if (cause & 0x8000) { | |
66 | /* all branches */ | |
67 | if ((control & 0x80800000) == 0x80000000) | |
68 | return 1; | |
69 | /* branch into selected range */ | |
70 | if (((control & 0x80800000) == 0x80800000) && | |
71 | regs->psw.addr >= current->thread.per_user.start && | |
72 | regs->psw.addr <= current->thread.per_user.end) | |
73 | return 1; | |
74 | } | |
75 | return 0; | |
76 | } | |
77 | ||
2a0a5b22 JW |
78 | int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) |
79 | { | |
80 | int fixup = probe_get_fixup_type(auprobe->insn); | |
81 | struct uprobe_task *utask = current->utask; | |
82 | ||
83 | clear_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP); | |
84 | update_cr_regs(current); | |
a7525982 | 85 | psw_bits(regs->psw).per = auprobe->saved_per; |
2a0a5b22 JW |
86 | regs->int_code = auprobe->saved_int_code; |
87 | ||
88 | if (fixup & FIXUP_PSW_NORMAL) | |
89 | regs->psw.addr += utask->vaddr - utask->xol_vaddr; | |
90 | if (fixup & FIXUP_RETURN_REGISTER) { | |
91 | int reg = (auprobe->insn[0] & 0xf0) >> 4; | |
92 | ||
93 | regs->gprs[reg] += utask->vaddr - utask->xol_vaddr; | |
94 | } | |
95 | if (fixup & FIXUP_BRANCH_NOT_TAKEN) { | |
96 | int ilen = insn_length(auprobe->insn[0] >> 8); | |
97 | ||
98 | if (regs->psw.addr - utask->xol_vaddr == ilen) | |
99 | regs->psw.addr = utask->vaddr + ilen; | |
100 | } | |
8d1a2427 JW |
101 | if (check_per_event(current->thread.per_event.cause, |
102 | current->thread.per_user.control, regs)) { | |
103 | /* fix per address */ | |
104 | current->thread.per_event.address = utask->vaddr; | |
105 | /* trigger per event */ | |
106 | set_pt_regs_flag(regs, PIF_PER_TRAP); | |
107 | } | |
2a0a5b22 JW |
108 | return 0; |
109 | } | |
110 | ||
111 | int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, | |
112 | void *data) | |
113 | { | |
114 | struct die_args *args = data; | |
115 | struct pt_regs *regs = args->regs; | |
116 | ||
117 | if (!user_mode(regs)) | |
118 | return NOTIFY_DONE; | |
119 | if (regs->int_code & 0x200) /* Trap during transaction */ | |
120 | return NOTIFY_DONE; | |
121 | switch (val) { | |
122 | case DIE_BPT: | |
123 | if (uprobe_pre_sstep_notifier(regs)) | |
124 | return NOTIFY_STOP; | |
125 | break; | |
126 | case DIE_SSTEP: | |
127 | if (uprobe_post_sstep_notifier(regs)) | |
128 | return NOTIFY_STOP; | |
129 | default: | |
130 | break; | |
131 | } | |
132 | return NOTIFY_DONE; | |
133 | } | |
134 | ||
135 | void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) | |
136 | { | |
137 | clear_thread_flag(TIF_UPROBE_SINGLESTEP); | |
138 | regs->int_code = auprobe->saved_int_code; | |
139 | regs->psw.addr = current->utask->vaddr; | |
8d1a2427 | 140 | current->thread.per_event.address = current->utask->vaddr; |
2a0a5b22 JW |
141 | } |
142 | ||
143 | unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline, | |
144 | struct pt_regs *regs) | |
145 | { | |
146 | unsigned long orig; | |
147 | ||
148 | orig = regs->gprs[14]; | |
149 | regs->gprs[14] = trampoline; | |
150 | return orig; | |
151 | } | |
152 | ||
153 | /* Instruction Emulation */ | |
154 | ||
155 | static void adjust_psw_addr(psw_t *psw, unsigned long len) | |
156 | { | |
157 | psw->addr = __rewind_psw(*psw, -len); | |
158 | } | |
159 | ||
160 | #define EMU_ILLEGAL_OP 1 | |
161 | #define EMU_SPECIFICATION 2 | |
162 | #define EMU_ADDRESSING 3 | |
163 | ||
164 | #define emu_load_ril(ptr, output) \ | |
165 | ({ \ | |
166 | unsigned int mask = sizeof(*(ptr)) - 1; \ | |
167 | __typeof__(*(ptr)) input; \ | |
168 | int __rc = 0; \ | |
169 | \ | |
170 | if (!test_facility(34)) \ | |
171 | __rc = EMU_ILLEGAL_OP; \ | |
172 | else if ((u64 __force)ptr & mask) \ | |
173 | __rc = EMU_SPECIFICATION; \ | |
174 | else if (get_user(input, ptr)) \ | |
175 | __rc = EMU_ADDRESSING; \ | |
176 | else \ | |
177 | *(output) = input; \ | |
178 | __rc; \ | |
179 | }) | |
180 | ||
8d1a2427 | 181 | #define emu_store_ril(regs, ptr, input) \ |
2a0a5b22 JW |
182 | ({ \ |
183 | unsigned int mask = sizeof(*(ptr)) - 1; \ | |
8d1a2427 | 184 | __typeof__(ptr) __ptr = (ptr); \ |
2a0a5b22 JW |
185 | int __rc = 0; \ |
186 | \ | |
187 | if (!test_facility(34)) \ | |
188 | __rc = EMU_ILLEGAL_OP; \ | |
8d1a2427 | 189 | else if ((u64 __force)__ptr & mask) \ |
2a0a5b22 | 190 | __rc = EMU_SPECIFICATION; \ |
8d1a2427 | 191 | else if (put_user(*(input), __ptr)) \ |
2a0a5b22 | 192 | __rc = EMU_ADDRESSING; \ |
8d1a2427 | 193 | if (__rc == 0) \ |
9f9d86e1 HC |
194 | sim_stor_event(regs, \ |
195 | (void __force *)__ptr, \ | |
196 | mask + 1); \ | |
2a0a5b22 JW |
197 | __rc; \ |
198 | }) | |
199 | ||
200 | #define emu_cmp_ril(regs, ptr, cmp) \ | |
201 | ({ \ | |
202 | unsigned int mask = sizeof(*(ptr)) - 1; \ | |
203 | __typeof__(*(ptr)) input; \ | |
204 | int __rc = 0; \ | |
205 | \ | |
206 | if (!test_facility(34)) \ | |
207 | __rc = EMU_ILLEGAL_OP; \ | |
208 | else if ((u64 __force)ptr & mask) \ | |
209 | __rc = EMU_SPECIFICATION; \ | |
210 | else if (get_user(input, ptr)) \ | |
211 | __rc = EMU_ADDRESSING; \ | |
212 | else if (input > *(cmp)) \ | |
213 | psw_bits((regs)->psw).cc = 1; \ | |
214 | else if (input < *(cmp)) \ | |
215 | psw_bits((regs)->psw).cc = 2; \ | |
216 | else \ | |
217 | psw_bits((regs)->psw).cc = 0; \ | |
218 | __rc; \ | |
219 | }) | |
220 | ||
221 | struct insn_ril { | |
222 | u8 opc0; | |
223 | u8 reg : 4; | |
224 | u8 opc1 : 4; | |
225 | s32 disp; | |
226 | } __packed; | |
227 | ||
228 | union split_register { | |
229 | u64 u64; | |
230 | u32 u32[2]; | |
231 | u16 u16[4]; | |
232 | s64 s64; | |
233 | s32 s32[2]; | |
234 | s16 s16[4]; | |
235 | }; | |
236 | ||
8d1a2427 JW |
237 | /* |
238 | * If user per registers are setup to trace storage alterations and an | |
239 | * emulated store took place on a fitting address a user trap is generated. | |
240 | */ | |
241 | static void sim_stor_event(struct pt_regs *regs, void *addr, int len) | |
242 | { | |
243 | if (!(regs->psw.mask & PSW_MASK_PER)) | |
244 | return; | |
245 | if (!(current->thread.per_user.control & PER_EVENT_STORE)) | |
246 | return; | |
247 | if ((void *)current->thread.per_user.start > (addr + len)) | |
248 | return; | |
249 | if ((void *)current->thread.per_user.end < addr) | |
250 | return; | |
251 | current->thread.per_event.address = regs->psw.addr; | |
252 | current->thread.per_event.cause = PER_EVENT_STORE >> 16; | |
253 | set_pt_regs_flag(regs, PIF_PER_TRAP); | |
254 | } | |
255 | ||
2a0a5b22 JW |
256 | /* |
257 | * pc relative instructions are emulated, since parameters may not be | |
258 | * accessible from the xol area due to range limitations. | |
259 | */ | |
260 | static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs) | |
261 | { | |
262 | union split_register *rx; | |
263 | struct insn_ril *insn; | |
264 | unsigned int ilen; | |
265 | void *uptr; | |
266 | int rc = 0; | |
267 | ||
268 | insn = (struct insn_ril *) &auprobe->insn; | |
269 | rx = (union split_register *) ®s->gprs[insn->reg]; | |
270 | uptr = (void *)(regs->psw.addr + (insn->disp * 2)); | |
271 | ilen = insn_length(insn->opc0); | |
272 | ||
273 | switch (insn->opc0) { | |
274 | case 0xc0: | |
275 | switch (insn->opc1) { | |
276 | case 0x00: /* larl */ | |
277 | rx->u64 = (unsigned long)uptr; | |
278 | break; | |
279 | } | |
280 | break; | |
281 | case 0xc4: | |
282 | switch (insn->opc1) { | |
283 | case 0x02: /* llhrl */ | |
284 | rc = emu_load_ril((u16 __user *)uptr, &rx->u32[1]); | |
285 | break; | |
286 | case 0x04: /* lghrl */ | |
287 | rc = emu_load_ril((s16 __user *)uptr, &rx->u64); | |
288 | break; | |
289 | case 0x05: /* lhrl */ | |
290 | rc = emu_load_ril((s16 __user *)uptr, &rx->u32[1]); | |
291 | break; | |
292 | case 0x06: /* llghrl */ | |
293 | rc = emu_load_ril((u16 __user *)uptr, &rx->u64); | |
294 | break; | |
295 | case 0x08: /* lgrl */ | |
296 | rc = emu_load_ril((u64 __user *)uptr, &rx->u64); | |
297 | break; | |
298 | case 0x0c: /* lgfrl */ | |
299 | rc = emu_load_ril((s32 __user *)uptr, &rx->u64); | |
300 | break; | |
301 | case 0x0d: /* lrl */ | |
302 | rc = emu_load_ril((u32 __user *)uptr, &rx->u32[1]); | |
303 | break; | |
304 | case 0x0e: /* llgfrl */ | |
305 | rc = emu_load_ril((u32 __user *)uptr, &rx->u64); | |
306 | break; | |
307 | case 0x07: /* sthrl */ | |
8d1a2427 | 308 | rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]); |
2a0a5b22 JW |
309 | break; |
310 | case 0x0b: /* stgrl */ | |
8d1a2427 | 311 | rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64); |
2a0a5b22 JW |
312 | break; |
313 | case 0x0f: /* strl */ | |
8d1a2427 | 314 | rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]); |
2a0a5b22 JW |
315 | break; |
316 | } | |
317 | break; | |
318 | case 0xc6: | |
319 | switch (insn->opc1) { | |
320 | case 0x02: /* pfdrl */ | |
321 | if (!test_facility(34)) | |
322 | rc = EMU_ILLEGAL_OP; | |
323 | break; | |
324 | case 0x04: /* cghrl */ | |
325 | rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s64); | |
326 | break; | |
327 | case 0x05: /* chrl */ | |
328 | rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s32[1]); | |
329 | break; | |
330 | case 0x06: /* clghrl */ | |
331 | rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u64); | |
332 | break; | |
333 | case 0x07: /* clhrl */ | |
334 | rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u32[1]); | |
335 | break; | |
336 | case 0x08: /* cgrl */ | |
337 | rc = emu_cmp_ril(regs, (s64 __user *)uptr, &rx->s64); | |
338 | break; | |
339 | case 0x0a: /* clgrl */ | |
340 | rc = emu_cmp_ril(regs, (u64 __user *)uptr, &rx->u64); | |
341 | break; | |
342 | case 0x0c: /* cgfrl */ | |
343 | rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s64); | |
344 | break; | |
345 | case 0x0d: /* crl */ | |
346 | rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s32[1]); | |
347 | break; | |
348 | case 0x0e: /* clgfrl */ | |
349 | rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u64); | |
350 | break; | |
351 | case 0x0f: /* clrl */ | |
352 | rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u32[1]); | |
353 | break; | |
354 | } | |
355 | break; | |
356 | } | |
357 | adjust_psw_addr(®s->psw, ilen); | |
358 | switch (rc) { | |
359 | case EMU_ILLEGAL_OP: | |
360 | regs->int_code = ilen << 16 | 0x0001; | |
361 | do_report_trap(regs, SIGILL, ILL_ILLOPC, NULL); | |
362 | break; | |
363 | case EMU_SPECIFICATION: | |
364 | regs->int_code = ilen << 16 | 0x0006; | |
365 | do_report_trap(regs, SIGILL, ILL_ILLOPC , NULL); | |
366 | break; | |
367 | case EMU_ADDRESSING: | |
368 | regs->int_code = ilen << 16 | 0x0005; | |
369 | do_report_trap(regs, SIGSEGV, SEGV_MAPERR, NULL); | |
370 | break; | |
371 | } | |
372 | } | |
373 | ||
374 | bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) | |
375 | { | |
8bb3fdd6 HC |
376 | if ((psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT) || |
377 | ((psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT) && | |
2a0a5b22 JW |
378 | !is_compat_task())) { |
379 | regs->psw.addr = __rewind_psw(regs->psw, UPROBE_SWBP_INSN_SIZE); | |
380 | do_report_trap(regs, SIGILL, ILL_ILLADR, NULL); | |
381 | return true; | |
382 | } | |
383 | if (probe_is_insn_relative_long(auprobe->insn)) { | |
384 | handle_insn_ril(auprobe, regs); | |
385 | return true; | |
386 | } | |
387 | return false; | |
388 | } |