[S390] kprobes: insn slots
[linux-block.git] / arch / s390 / kernel / kprobes.c
CommitLineData
4ba069b8
MG
1/*
2 * Kernel Probes (KProbes)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2002, 2006
19 *
20 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
21 */
22
4ba069b8
MG
23#include <linux/kprobes.h>
24#include <linux/ptrace.h>
25#include <linux/preempt.h>
26#include <linux/stop_machine.h>
1eeb66a1 27#include <linux/kdebug.h>
a2b53673 28#include <linux/uaccess.h>
4ba069b8 29#include <asm/cacheflush.h>
4ba069b8 30#include <asm/sections.h>
4ba069b8 31#include <linux/module.h>
5a0e3ad6 32#include <linux/slab.h>
adb45839 33#include <linux/hardirq.h>
4ba069b8
MG
34
35DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
36DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
37
f438d914
MH
38struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
39
ba640a59 40static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
4ba069b8 41{
ba640a59 42 switch (insn[0] >> 8) {
4ba069b8
MG
43 case 0x0c: /* bassm */
44 case 0x0b: /* bsm */
45 case 0x83: /* diag */
46 case 0x44: /* ex */
bac9f154
HC
47 case 0xac: /* stnsm */
48 case 0xad: /* stosm */
4ba069b8
MG
49 return -EINVAL;
50 }
ba640a59 51 switch (insn[0]) {
4ba069b8
MG
52 case 0x0101: /* pr */
53 case 0xb25a: /* bsa */
54 case 0xb240: /* bakr */
55 case 0xb258: /* bsg */
56 case 0xb218: /* pc */
57 case 0xb228: /* pt */
bac9f154 58 case 0xb98d: /* epsw */
4ba069b8
MG
59 return -EINVAL;
60 }
61 return 0;
62}
63
ba640a59 64static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
4ba069b8
MG
65{
66 /* default fixup method */
ba640a59 67 int fixup = FIXUP_PSW_NORMAL;
4ba069b8 68
ba640a59 69 switch (insn[0] >> 8) {
4ba069b8
MG
70 case 0x05: /* balr */
71 case 0x0d: /* basr */
ba640a59 72 fixup = FIXUP_RETURN_REGISTER;
4ba069b8 73 /* if r2 = 0, no branch will be taken */
ba640a59
MS
74 if ((insn[0] & 0x0f) == 0)
75 fixup |= FIXUP_BRANCH_NOT_TAKEN;
4ba069b8
MG
76 break;
77 case 0x06: /* bctr */
78 case 0x07: /* bcr */
ba640a59 79 fixup = FIXUP_BRANCH_NOT_TAKEN;
4ba069b8
MG
80 break;
81 case 0x45: /* bal */
82 case 0x4d: /* bas */
ba640a59 83 fixup = FIXUP_RETURN_REGISTER;
4ba069b8
MG
84 break;
85 case 0x47: /* bc */
86 case 0x46: /* bct */
87 case 0x86: /* bxh */
88 case 0x87: /* bxle */
ba640a59 89 fixup = FIXUP_BRANCH_NOT_TAKEN;
4ba069b8
MG
90 break;
91 case 0x82: /* lpsw */
ba640a59 92 fixup = FIXUP_NOT_REQUIRED;
4ba069b8
MG
93 break;
94 case 0xb2: /* lpswe */
ba640a59
MS
95 if ((insn[0] & 0xff) == 0xb2)
96 fixup = FIXUP_NOT_REQUIRED;
4ba069b8
MG
97 break;
98 case 0xa7: /* bras */
ba640a59
MS
99 if ((insn[0] & 0x0f) == 0x05)
100 fixup |= FIXUP_RETURN_REGISTER;
4ba069b8
MG
101 break;
102 case 0xc0:
ba640a59
MS
103 if ((insn[0] & 0x0f) == 0x00 || /* larl */
104 (insn[0] & 0x0f) == 0x05) /* brasl */
105 fixup |= FIXUP_RETURN_REGISTER;
4ba069b8
MG
106 break;
107 case 0xeb:
ba640a59
MS
108 if ((insn[2] & 0xff) == 0x44 || /* bxhg */
109 (insn[2] & 0xff) == 0x45) /* bxleg */
110 fixup = FIXUP_BRANCH_NOT_TAKEN;
4ba069b8
MG
111 break;
112 case 0xe3: /* bctg */
ba640a59
MS
113 if ((insn[2] & 0xff) == 0x46)
114 fixup = FIXUP_BRANCH_NOT_TAKEN;
4ba069b8
MG
115 break;
116 }
ba640a59
MS
117 return fixup;
118}
119
120int __kprobes arch_prepare_kprobe(struct kprobe *p)
121{
122 if ((unsigned long) p->addr & 0x01)
123 return -EINVAL;
124
125 /* Make sure the probe isn't going on a difficult instruction */
126 if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
127 return -EINVAL;
128
ba640a59
MS
129 p->opcode = *p->addr;
130 memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2);
131
132 return 0;
4ba069b8
MG
133}
134
5a8b589f
MS
135struct ins_replace_args {
136 kprobe_opcode_t *ptr;
137 kprobe_opcode_t opcode;
138};
139
4ba069b8
MG
140static int __kprobes swap_instruction(void *aref)
141{
acf01800
HC
142 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
143 unsigned long status = kcb->kprobe_status;
4ba069b8 144 struct ins_replace_args *args = aref;
a2b53673 145
acf01800 146 kcb->kprobe_status = KPROBE_SWAP_INST;
5a8b589f 147 probe_kernel_write(args->ptr, &args->opcode, sizeof(args->opcode));
acf01800 148 kcb->kprobe_status = status;
5a8b589f 149 return 0;
4ba069b8
MG
150}
151
152void __kprobes arch_arm_kprobe(struct kprobe *p)
153{
4ba069b8
MG
154 struct ins_replace_args args;
155
156 args.ptr = p->addr;
5a8b589f 157 args.opcode = BREAKPOINT_INSTRUCTION;
9b1a4d38 158 stop_machine(swap_instruction, &args, NULL);
4ba069b8
MG
159}
160
161void __kprobes arch_disarm_kprobe(struct kprobe *p)
162{
4ba069b8
MG
163 struct ins_replace_args args;
164
165 args.ptr = p->addr;
5a8b589f 166 args.opcode = p->opcode;
9b1a4d38 167 stop_machine(swap_instruction, &args, NULL);
4ba069b8
MG
168}
169
170void __kprobes arch_remove_kprobe(struct kprobe *p)
171{
4ba069b8
MG
172}
173
fc0a1fea
MS
174static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
175 struct pt_regs *regs,
176 unsigned long ip)
4ba069b8
MG
177{
178 per_cr_bits kprobe_per_regs[1];
179
4ba069b8 180 /* Set up the per control reg info, will pass to lctl */
fc0a1fea 181 memset(kprobe_per_regs, 0, sizeof(per_cr_bits));
4ba069b8 182 kprobe_per_regs[0].em_instruction_fetch = 1;
fc0a1fea
MS
183 kprobe_per_regs[0].starting_addr = ip;
184 kprobe_per_regs[0].ending_addr = ip;
4ba069b8 185
fc0a1fea
MS
186 /* Save control regs and psw mask */
187 __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
188 kcb->kprobe_saved_imask = regs->psw.mask &
189 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
190
191 /* Set PER control regs, turns on single step for the given address */
4ba069b8
MG
192 __ctl_load(kprobe_per_regs, 9, 11);
193 regs->psw.mask |= PSW_MASK_PER;
adb45839 194 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
fc0a1fea 195 regs->psw.addr = ip | PSW_ADDR_AMODE;
4ba069b8
MG
196}
197
fc0a1fea
MS
198static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb,
199 struct pt_regs *regs,
200 unsigned long ip)
201{
202 /* Restore control regs and psw mask, set new psw address */
203 __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
204 regs->psw.mask &= ~PSW_MASK_PER;
205 regs->psw.mask |= kcb->kprobe_saved_imask;
206 regs->psw.addr = ip | PSW_ADDR_AMODE;
207}
208
b9599798
MS
209/*
210 * Activate a kprobe by storing its pointer to current_kprobe. The
211 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to
212 * two kprobes can be active, see KPROBE_REENTER.
213 */
214static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
4ba069b8 215{
b9599798 216 kcb->prev_kprobe.kp = __get_cpu_var(current_kprobe);
4ba069b8 217 kcb->prev_kprobe.status = kcb->kprobe_status;
b9599798 218 __get_cpu_var(current_kprobe) = p;
4ba069b8
MG
219}
220
b9599798
MS
221/*
222 * Deactivate a kprobe by backing up to the previous state. If the
223 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL,
224 * for any other state prev_kprobe.kp will be NULL.
225 */
226static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb)
4ba069b8
MG
227{
228 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
229 kcb->kprobe_status = kcb->prev_kprobe.status;
4ba069b8
MG
230}
231
4c4308cb 232void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
4ba069b8
MG
233 struct pt_regs *regs)
234{
4c4308cb 235 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
4ba069b8 236
4c4308cb
CH
237 /* Replace the return addr with trampoline addr */
238 regs->gprs[14] = (unsigned long)&kretprobe_trampoline;
4ba069b8
MG
239}
240
241static int __kprobes kprobe_handler(struct pt_regs *regs)
242{
243 struct kprobe *p;
244 int ret = 0;
245 unsigned long *addr = (unsigned long *)
246 ((regs->psw.addr & PSW_ADDR_INSN) - 2);
247 struct kprobe_ctlblk *kcb;
248
249 /*
250 * We don't want to be preempted for the entire
251 * duration of kprobe processing
252 */
253 preempt_disable();
254 kcb = get_kprobe_ctlblk();
255
256 /* Check we're not actually recursing */
257 if (kprobe_running()) {
258 p = get_kprobe(addr);
259 if (p) {
b9599798
MS
260 /*
261 * We have hit a kprobe while another is still
262 * active. This can happen in the pre and post
263 * handler. Single step the instruction of the
264 * new probe but do not call any handler function
265 * of this secondary kprobe.
266 * push_kprobe and pop_kprobe saves and restores
267 * the currently active kprobe.
4ba069b8 268 */
b9599798 269 push_kprobe(kcb, p);
4ba069b8 270 kprobes_inc_nmissed_count(p);
fc0a1fea
MS
271 enable_singlestep(kcb, regs,
272 (unsigned long) p->ainsn.insn);
4ba069b8
MG
273 kcb->kprobe_status = KPROBE_REENTER;
274 return 1;
275 } else {
276 p = __get_cpu_var(current_kprobe);
277 if (p->break_handler && p->break_handler(p, regs)) {
278 goto ss_probe;
279 }
280 }
281 goto no_kprobe;
282 }
283
284 p = get_kprobe(addr);
f794c827
MS
285 if (!p)
286 /*
287 * No kprobe at this address. The fault has not been
288 * caused by a kprobe breakpoint. The race of breakpoint
289 * vs. kprobe remove does not exist because on s390 we
9b1a4d38 290 * use stop_machine to arm/disarm the breakpoints.
f794c827 291 */
4ba069b8 292 goto no_kprobe;
4ba069b8
MG
293
294 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
b9599798 295 push_kprobe(kcb, p);
4ba069b8
MG
296 if (p->pre_handler && p->pre_handler(p, regs))
297 /* handler has already set things up, so skip ss setup */
298 return 1;
299
300ss_probe:
fc0a1fea 301 enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
4ba069b8
MG
302 kcb->kprobe_status = KPROBE_HIT_SS;
303 return 1;
304
305no_kprobe:
306 preempt_enable_no_resched();
307 return ret;
308}
309
310/*
311 * Function return probe trampoline:
312 * - init_kprobes() establishes a probepoint here
313 * - When the probed function returns, this probe
314 * causes the handlers to fire
315 */
a806170e 316static void __used kretprobe_trampoline_holder(void)
4ba069b8
MG
317{
318 asm volatile(".global kretprobe_trampoline\n"
319 "kretprobe_trampoline: bcr 0,0\n");
320}
321
322/*
323 * Called when the probe at kretprobe trampoline is hit
324 */
2b67fc46
HC
325static int __kprobes trampoline_probe_handler(struct kprobe *p,
326 struct pt_regs *regs)
4ba069b8
MG
327{
328 struct kretprobe_instance *ri = NULL;
99219a3f 329 struct hlist_head *head, empty_rp;
4ba069b8
MG
330 struct hlist_node *node, *tmp;
331 unsigned long flags, orig_ret_address = 0;
332 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
89480801 333 kprobe_opcode_t *correct_ret_addr = NULL;
4ba069b8 334
99219a3f 335 INIT_HLIST_HEAD(&empty_rp);
ef53d9c5 336 kretprobe_hash_lock(current, &head, &flags);
4ba069b8
MG
337
338 /*
339 * It is possible to have multiple instances associated with a given
340 * task either because an multiple functions in the call path
025dfdaf 341 * have a return probe installed on them, and/or more than one return
4ba069b8
MG
342 * return probe was registered for a target function.
343 *
344 * We can handle this because:
345 * - instances are always inserted at the head of the list
346 * - when multiple return probes are registered for the same
347 * function, the first instance's ret_addr will point to the
348 * real return address, and all the rest will point to
349 * kretprobe_trampoline
350 */
351 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
352 if (ri->task != current)
353 /* another task is sharing our hash bucket */
354 continue;
355
89480801
MS
356 orig_ret_address = (unsigned long)ri->ret_addr;
357
358 if (orig_ret_address != trampoline_address)
359 /*
360 * This is the real return address. Any other
361 * instances associated with this task are for
362 * other calls deeper on the call stack
363 */
364 break;
365 }
366
367 kretprobe_assert(ri, orig_ret_address, trampoline_address);
368
369 correct_ret_addr = ri->ret_addr;
370 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
371 if (ri->task != current)
372 /* another task is sharing our hash bucket */
373 continue;
4ba069b8
MG
374
375 orig_ret_address = (unsigned long)ri->ret_addr;
89480801
MS
376
377 if (ri->rp && ri->rp->handler) {
378 ri->ret_addr = correct_ret_addr;
379 ri->rp->handler(ri, regs);
380 }
381
99219a3f 382 recycle_rp_inst(ri, &empty_rp);
4ba069b8
MG
383
384 if (orig_ret_address != trampoline_address) {
385 /*
386 * This is the real return address. Any other
387 * instances associated with this task are for
388 * other calls deeper on the call stack
389 */
390 break;
391 }
392 }
89480801 393
4ba069b8
MG
394 regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
395
b9599798 396 pop_kprobe(get_kprobe_ctlblk());
ef53d9c5 397 kretprobe_hash_unlock(current, &flags);
4ba069b8
MG
398 preempt_enable_no_resched();
399
99219a3f 400 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
401 hlist_del(&ri->hlist);
402 kfree(ri);
403 }
4ba069b8
MG
404 /*
405 * By returning a non-zero value, we are telling
406 * kprobe_handler() that we don't want the post_handler
407 * to run (and have re-enabled preemption)
408 */
409 return 1;
410}
411
412/*
413 * Called after single-stepping. p->addr is the address of the
414 * instruction whose first byte has been replaced by the "breakpoint"
415 * instruction. To avoid the SMP problems that can occur when we
416 * temporarily put back the original opcode to single-step, we
417 * single-stepped a copy of the instruction. The address of this
418 * copy is p->ainsn.insn.
419 */
420static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
421{
422 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
fc0a1fea 423 unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
ba640a59 424 int fixup = get_fixup_type(p->ainsn.insn);
4ba069b8 425
ba640a59 426 if (fixup & FIXUP_PSW_NORMAL)
fc0a1fea 427 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
4ba069b8 428
ba640a59
MS
429 if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
430 int ilen = ((p->ainsn.insn[0] >> 14) + 3) & -2;
431 if (ip - (unsigned long) p->ainsn.insn == ilen)
432 ip = (unsigned long) p->addr + ilen;
433 }
4ba069b8 434
ba640a59
MS
435 if (fixup & FIXUP_RETURN_REGISTER) {
436 int reg = (p->ainsn.insn[0] & 0xf0) >> 4;
437 regs->gprs[reg] += (unsigned long) p->addr -
438 (unsigned long) p->ainsn.insn;
439 }
4ba069b8 440
fc0a1fea 441 disable_singlestep(kcb, regs, ip);
4ba069b8
MG
442}
443
444static int __kprobes post_kprobe_handler(struct pt_regs *regs)
445{
446 struct kprobe *cur = kprobe_running();
447 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
448
449 if (!cur)
450 return 0;
451
452 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
453 kcb->kprobe_status = KPROBE_HIT_SSDONE;
454 cur->post_handler(cur, regs, 0);
455 }
456
457 resume_execution(cur, regs);
b9599798 458 pop_kprobe(kcb);
4ba069b8
MG
459 preempt_enable_no_resched();
460
461 /*
462 * if somebody else is singlestepping across a probe point, psw mask
463 * will have PER set, in which case, continue the remaining processing
464 * of do_single_step, as if this is not a probe hit.
465 */
466 if (regs->psw.mask & PSW_MASK_PER) {
467 return 0;
468 }
469
470 return 1;
471}
472
adb45839 473static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
4ba069b8
MG
474{
475 struct kprobe *cur = kprobe_running();
476 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
477 const struct exception_table_entry *entry;
478
479 switch(kcb->kprobe_status) {
480 case KPROBE_SWAP_INST:
481 /* We are here because the instruction replacement failed */
482 return 0;
483 case KPROBE_HIT_SS:
484 case KPROBE_REENTER:
485 /*
486 * We are here because the instruction being single
487 * stepped caused a page fault. We reset the current
488 * kprobe and the nip points back to the probe address
489 * and allow the page fault handler to continue as a
490 * normal page fault.
491 */
fc0a1fea 492 disable_singlestep(kcb, regs, (unsigned long) cur->addr);
b9599798 493 pop_kprobe(kcb);
4ba069b8
MG
494 preempt_enable_no_resched();
495 break;
496 case KPROBE_HIT_ACTIVE:
497 case KPROBE_HIT_SSDONE:
498 /*
499 * We increment the nmissed count for accounting,
500 * we can also use npre/npostfault count for accouting
501 * these specific fault cases.
502 */
503 kprobes_inc_nmissed_count(cur);
504
505 /*
506 * We come here because instructions in the pre/post
507 * handler caused the page_fault, this could happen
508 * if handler tries to access user space by
509 * copy_from_user(), get_user() etc. Let the
510 * user-specified handler try to fix it first.
511 */
512 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
513 return 1;
514
515 /*
516 * In case the user-specified fault handler returned
517 * zero, try to fix up.
518 */
519 entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
520 if (entry) {
521 regs->psw.addr = entry->fixup | PSW_ADDR_AMODE;
522 return 1;
523 }
524
525 /*
526 * fixup_exception() could not handle it,
527 * Let do_page_fault() fix it.
528 */
529 break;
530 default:
531 break;
532 }
533 return 0;
534}
535
adb45839
MS
536int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
537{
538 int ret;
539
540 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
541 local_irq_disable();
542 ret = kprobe_trap_handler(regs, trapnr);
543 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
544 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
545 return ret;
546}
547
4ba069b8
MG
548/*
549 * Wrapper routine to for handling exceptions.
550 */
551int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
552 unsigned long val, void *data)
553{
554 struct die_args *args = (struct die_args *)data;
adb45839 555 struct pt_regs *regs = args->regs;
4ba069b8
MG
556 int ret = NOTIFY_DONE;
557
adb45839
MS
558 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
559 local_irq_disable();
560
4ba069b8
MG
561 switch (val) {
562 case DIE_BPT:
563 if (kprobe_handler(args->regs))
564 ret = NOTIFY_STOP;
565 break;
566 case DIE_SSTEP:
567 if (post_kprobe_handler(args->regs))
568 ret = NOTIFY_STOP;
569 break;
570 case DIE_TRAP:
adb45839
MS
571 if (!preemptible() && kprobe_running() &&
572 kprobe_trap_handler(args->regs, args->trapnr))
4ba069b8 573 ret = NOTIFY_STOP;
4ba069b8
MG
574 break;
575 default:
576 break;
577 }
adb45839
MS
578
579 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
580 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
581
4ba069b8
MG
582 return ret;
583}
584
585int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
586{
587 struct jprobe *jp = container_of(p, struct jprobe, kp);
588 unsigned long addr;
589 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
590
591 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
592
593 /* setup return addr to the jprobe handler routine */
594 regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
adb45839 595 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
4ba069b8
MG
596
597 /* r14 is the function return address */
598 kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
599 /* r15 is the stack pointer */
600 kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15];
601 addr = (unsigned long)kcb->jprobe_saved_r15;
602
603 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
604 MIN_STACK_SIZE(addr));
605 return 1;
606}
607
608void __kprobes jprobe_return(void)
609{
610 asm volatile(".word 0x0002");
611}
612
613void __kprobes jprobe_return_end(void)
614{
615 asm volatile("bcr 0,0");
616}
617
618int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
619{
620 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
621 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15);
622
623 /* Put the regs back */
624 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
625 /* put the stack back */
626 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
627 MIN_STACK_SIZE(stack_addr));
628 preempt_enable_no_resched();
629 return 1;
630}
631
632static struct kprobe trampoline_p = {
633 .addr = (kprobe_opcode_t *) & kretprobe_trampoline,
634 .pre_handler = trampoline_probe_handler
635};
636
637int __init arch_init_kprobes(void)
638{
639 return register_kprobe(&trampoline_p);
640}
bf8f6e5b
AM
641
642int __kprobes arch_trampoline_kprobe(struct kprobe *p)
643{
644 if (p->addr == (kprobe_opcode_t *) & kretprobe_trampoline)
645 return 1;
646 return 0;
647}