x86: make arch/x86/kernel/acpi/wakeup_32.S use a separate
[linux-block.git] / arch / x86 / kernel / kprobes.c
... / ...
CommitLineData
1/*
2 * Kernel Probes (KProbes)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2002, 2004
19 *
20 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21 * Probes initial implementation ( includes contributions from
22 * Rusty Russell).
23 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24 * interface to access function arguments.
25 * 2004-Oct Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
26 * <prasanna@in.ibm.com> adapted for x86_64 from i386.
27 * 2005-Mar Roland McGrath <roland@redhat.com>
28 * Fixed to handle %rip-relative addressing mode correctly.
29 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
30 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
31 * <prasanna@in.ibm.com> added function-return probes.
32 * 2005-May Rusty Lynch <rusty.lynch@intel.com>
33 * Added function return probes functionality
34 * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
35 * kprobe-booster and kretprobe-booster for i386.
36 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
37 * and kretprobe-booster for x86-64
38 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
39 * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
40 * unified x86 kprobes code.
41 */
42
43#include <linux/kprobes.h>
44#include <linux/ptrace.h>
45#include <linux/string.h>
46#include <linux/slab.h>
47#include <linux/hardirq.h>
48#include <linux/preempt.h>
49#include <linux/module.h>
50#include <linux/kdebug.h>
51
52#include <asm/cacheflush.h>
53#include <asm/desc.h>
54#include <asm/pgtable.h>
55#include <asm/uaccess.h>
56#include <asm/alternative.h>
57
58void jprobe_return_end(void);
59
60DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
61DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
62
63#ifdef CONFIG_X86_64
64#define stack_addr(regs) ((unsigned long *)regs->sp)
65#else
66/*
67 * "&regs->sp" looks wrong, but it's correct for x86_32. x86_32 CPUs
68 * don't save the ss and esp registers if the CPU is already in kernel
69 * mode when it traps. So for kprobes, regs->sp and regs->ss are not
70 * the [nonexistent] saved stack pointer and ss register, but rather
71 * the top 8 bytes of the pre-int3 stack. So &regs->sp happens to
72 * point to the top of the pre-int3 stack.
73 */
74#define stack_addr(regs) ((unsigned long *)&regs->sp)
75#endif
76
77#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
78 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
79 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
80 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
81 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
82 << (row % 32))
83 /*
84 * Undefined/reserved opcodes, conditional jump, Opcode Extension
85 * Groups, and some special opcodes can not boost.
86 */
87static const u32 twobyte_is_boostable[256 / 32] = {
88 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
89 /* ---------------------------------------------- */
90 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
91 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */
92 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
93 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
94 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
95 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
96 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
97 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
98 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
99 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
100 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
101 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
102 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
103 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
104 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
105 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
106 /* ----------------------------------------------- */
107 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
108};
109static const u32 onebyte_has_modrm[256 / 32] = {
110 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
111 /* ----------------------------------------------- */
112 W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 00 */
113 W(0x10, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 10 */
114 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 20 */
115 W(0x30, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 30 */
116 W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */
117 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
118 W(0x60, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0) | /* 60 */
119 W(0x70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 70 */
120 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
121 W(0x90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 90 */
122 W(0xa0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* a0 */
123 W(0xb0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* b0 */
124 W(0xc0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* c0 */
125 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
126 W(0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* e0 */
127 W(0xf0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) /* f0 */
128 /* ----------------------------------------------- */
129 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
130};
131static const u32 twobyte_has_modrm[256 / 32] = {
132 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
133 /* ----------------------------------------------- */
134 W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1) | /* 0f */
135 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0) , /* 1f */
136 W(0x20, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 2f */
137 W(0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 3f */
138 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 4f */
139 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 5f */
140 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 6f */
141 W(0x70, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1) , /* 7f */
142 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 8f */
143 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 9f */
144 W(0xa0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) | /* af */
145 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* bf */
146 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* cf */
147 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* df */
148 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* ef */
149 W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* ff */
150 /* ----------------------------------------------- */
151 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
152};
153#undef W
154
155struct kretprobe_blackpoint kretprobe_blacklist[] = {
156 {"__switch_to", }, /* This function switches only current task, but
157 doesn't switch kernel stack.*/
158 {NULL, NULL} /* Terminator */
159};
160const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
161
162/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
163static void __kprobes set_jmp_op(void *from, void *to)
164{
165 struct __arch_jmp_op {
166 char op;
167 s32 raddr;
168 } __attribute__((packed)) * jop;
169 jop = (struct __arch_jmp_op *)from;
170 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
171 jop->op = RELATIVEJUMP_INSTRUCTION;
172}
173
174/*
175 * Check for the REX prefix which can only exist on X86_64
176 * X86_32 always returns 0
177 */
178static int __kprobes is_REX_prefix(kprobe_opcode_t *insn)
179{
180#ifdef CONFIG_X86_64
181 if ((*insn & 0xf0) == 0x40)
182 return 1;
183#endif
184 return 0;
185}
186
187/*
188 * Returns non-zero if opcode is boostable.
189 * RIP relative instructions are adjusted at copying time in 64 bits mode
190 */
191static int __kprobes can_boost(kprobe_opcode_t *opcodes)
192{
193 kprobe_opcode_t opcode;
194 kprobe_opcode_t *orig_opcodes = opcodes;
195
196retry:
197 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
198 return 0;
199 opcode = *(opcodes++);
200
201 /* 2nd-byte opcode */
202 if (opcode == 0x0f) {
203 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
204 return 0;
205 return test_bit(*opcodes,
206 (unsigned long *)twobyte_is_boostable);
207 }
208
209 switch (opcode & 0xf0) {
210#ifdef CONFIG_X86_64
211 case 0x40:
212 goto retry; /* REX prefix is boostable */
213#endif
214 case 0x60:
215 if (0x63 < opcode && opcode < 0x67)
216 goto retry; /* prefixes */
217 /* can't boost Address-size override and bound */
218 return (opcode != 0x62 && opcode != 0x67);
219 case 0x70:
220 return 0; /* can't boost conditional jump */
221 case 0xc0:
222 /* can't boost software-interruptions */
223 return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
224 case 0xd0:
225 /* can boost AA* and XLAT */
226 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
227 case 0xe0:
228 /* can boost in/out and absolute jmps */
229 return ((opcode & 0x04) || opcode == 0xea);
230 case 0xf0:
231 if ((opcode & 0x0c) == 0 && opcode != 0xf1)
232 goto retry; /* lock/rep(ne) prefix */
233 /* clear and set flags are boostable */
234 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
235 default:
236 /* segment override prefixes are boostable */
237 if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
238 goto retry; /* prefixes */
239 /* CS override prefix and call are not boostable */
240 return (opcode != 0x2e && opcode != 0x9a);
241 }
242}
243
244/*
245 * Returns non-zero if opcode modifies the interrupt flag.
246 */
247static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
248{
249 switch (*insn) {
250 case 0xfa: /* cli */
251 case 0xfb: /* sti */
252 case 0xcf: /* iret/iretd */
253 case 0x9d: /* popf/popfd */
254 return 1;
255 }
256
257 /*
258 * on X86_64, 0x40-0x4f are REX prefixes so we need to look
259 * at the next byte instead.. but of course not recurse infinitely
260 */
261 if (is_REX_prefix(insn))
262 return is_IF_modifier(++insn);
263
264 return 0;
265}
266
267/*
268 * Adjust the displacement if the instruction uses the %rip-relative
269 * addressing mode.
270 * If it does, Return the address of the 32-bit displacement word.
271 * If not, return null.
272 * Only applicable to 64-bit x86.
273 */
274static void __kprobes fix_riprel(struct kprobe *p)
275{
276#ifdef CONFIG_X86_64
277 u8 *insn = p->ainsn.insn;
278 s64 disp;
279 int need_modrm;
280
281 /* Skip legacy instruction prefixes. */
282 while (1) {
283 switch (*insn) {
284 case 0x66:
285 case 0x67:
286 case 0x2e:
287 case 0x3e:
288 case 0x26:
289 case 0x64:
290 case 0x65:
291 case 0x36:
292 case 0xf0:
293 case 0xf3:
294 case 0xf2:
295 ++insn;
296 continue;
297 }
298 break;
299 }
300
301 /* Skip REX instruction prefix. */
302 if (is_REX_prefix(insn))
303 ++insn;
304
305 if (*insn == 0x0f) {
306 /* Two-byte opcode. */
307 ++insn;
308 need_modrm = test_bit(*insn,
309 (unsigned long *)twobyte_has_modrm);
310 } else
311 /* One-byte opcode. */
312 need_modrm = test_bit(*insn,
313 (unsigned long *)onebyte_has_modrm);
314
315 if (need_modrm) {
316 u8 modrm = *++insn;
317 if ((modrm & 0xc7) == 0x05) {
318 /* %rip+disp32 addressing mode */
319 /* Displacement follows ModRM byte. */
320 ++insn;
321 /*
322 * The copied instruction uses the %rip-relative
323 * addressing mode. Adjust the displacement for the
324 * difference between the original location of this
325 * instruction and the location of the copy that will
326 * actually be run. The tricky bit here is making sure
327 * that the sign extension happens correctly in this
328 * calculation, since we need a signed 32-bit result to
329 * be sign-extended to 64 bits when it's added to the
330 * %rip value and yield the same 64-bit result that the
331 * sign-extension of the original signed 32-bit
332 * displacement would have given.
333 */
334 disp = (u8 *) p->addr + *((s32 *) insn) -
335 (u8 *) p->ainsn.insn;
336 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
337 *(s32 *)insn = (s32) disp;
338 }
339 }
340#endif
341}
342
343static void __kprobes arch_copy_kprobe(struct kprobe *p)
344{
345 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
346
347 fix_riprel(p);
348
349 if (can_boost(p->addr))
350 p->ainsn.boostable = 0;
351 else
352 p->ainsn.boostable = -1;
353
354 p->opcode = *p->addr;
355}
356
357int __kprobes arch_prepare_kprobe(struct kprobe *p)
358{
359 /* insn: must be on special executable page on x86. */
360 p->ainsn.insn = get_insn_slot();
361 if (!p->ainsn.insn)
362 return -ENOMEM;
363 arch_copy_kprobe(p);
364 return 0;
365}
366
367void __kprobes arch_arm_kprobe(struct kprobe *p)
368{
369 text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
370}
371
372void __kprobes arch_disarm_kprobe(struct kprobe *p)
373{
374 text_poke(p->addr, &p->opcode, 1);
375}
376
377void __kprobes arch_remove_kprobe(struct kprobe *p)
378{
379 mutex_lock(&kprobe_mutex);
380 free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
381 mutex_unlock(&kprobe_mutex);
382}
383
384static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
385{
386 kcb->prev_kprobe.kp = kprobe_running();
387 kcb->prev_kprobe.status = kcb->kprobe_status;
388 kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
389 kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
390}
391
392static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
393{
394 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
395 kcb->kprobe_status = kcb->prev_kprobe.status;
396 kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
397 kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
398}
399
400static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
401 struct kprobe_ctlblk *kcb)
402{
403 __get_cpu_var(current_kprobe) = p;
404 kcb->kprobe_saved_flags = kcb->kprobe_old_flags
405 = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
406 if (is_IF_modifier(p->ainsn.insn))
407 kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
408}
409
410static void __kprobes clear_btf(void)
411{
412 if (test_thread_flag(TIF_DEBUGCTLMSR))
413 wrmsr(MSR_IA32_DEBUGCTLMSR, 0, 0);
414}
415
416static void __kprobes restore_btf(void)
417{
418 if (test_thread_flag(TIF_DEBUGCTLMSR))
419 wrmsr(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr, 0);
420}
421
422static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
423{
424 clear_btf();
425 regs->flags |= X86_EFLAGS_TF;
426 regs->flags &= ~X86_EFLAGS_IF;
427 /* single step inline if the instruction is an int3 */
428 if (p->opcode == BREAKPOINT_INSTRUCTION)
429 regs->ip = (unsigned long)p->addr;
430 else
431 regs->ip = (unsigned long)p->ainsn.insn;
432}
433
434/* Called with kretprobe_lock held */
435void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
436 struct pt_regs *regs)
437{
438 unsigned long *sara = stack_addr(regs);
439
440 ri->ret_addr = (kprobe_opcode_t *) *sara;
441
442 /* Replace the return addr with trampoline addr */
443 *sara = (unsigned long) &kretprobe_trampoline;
444}
445/*
446 * We have reentered the kprobe_handler(), since another probe was hit while
447 * within the handler. We save the original kprobes variables and just single
448 * step on the instruction of the new probe without calling any user handlers.
449 */
450static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
451 struct kprobe_ctlblk *kcb)
452{
453 if (kcb->kprobe_status == KPROBE_HIT_SS &&
454 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
455 regs->flags &= ~X86_EFLAGS_TF;
456 regs->flags |= kcb->kprobe_saved_flags;
457 return 0;
458#ifdef CONFIG_X86_64
459 } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) {
460 /* TODO: Provide re-entrancy from post_kprobes_handler() and
461 * avoid exception stack corruption while single-stepping on
462 * the instruction of the new probe.
463 */
464 arch_disarm_kprobe(p);
465 regs->ip = (unsigned long)p->addr;
466 reset_current_kprobe();
467 return 1;
468#endif
469 }
470 save_previous_kprobe(kcb);
471 set_current_kprobe(p, regs, kcb);
472 kprobes_inc_nmissed_count(p);
473 prepare_singlestep(p, regs);
474 kcb->kprobe_status = KPROBE_REENTER;
475 return 1;
476}
477
478/*
479 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
480 * remain disabled thorough out this function.
481 */
482static int __kprobes kprobe_handler(struct pt_regs *regs)
483{
484 struct kprobe *p;
485 int ret = 0;
486 kprobe_opcode_t *addr;
487 struct kprobe_ctlblk *kcb;
488
489 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
490
491 /*
492 * We don't want to be preempted for the entire
493 * duration of kprobe processing
494 */
495 preempt_disable();
496 kcb = get_kprobe_ctlblk();
497
498 p = get_kprobe(addr);
499 if (p) {
500 /* Check we're not actually recursing */
501 if (kprobe_running()) {
502 ret = reenter_kprobe(p, regs, kcb);
503 if (kcb->kprobe_status == KPROBE_REENTER)
504 {
505 ret = 1;
506 goto out;
507 }
508 goto preempt_out;
509 } else {
510 set_current_kprobe(p, regs, kcb);
511 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
512 if (p->pre_handler && p->pre_handler(p, regs))
513 {
514 /* handler set things up, skip ss setup */
515 ret = 1;
516 goto out;
517 }
518 }
519 } else {
520 if (*addr != BREAKPOINT_INSTRUCTION) {
521 /*
522 * The breakpoint instruction was removed right
523 * after we hit it. Another cpu has removed
524 * either a probepoint or a debugger breakpoint
525 * at this address. In either case, no further
526 * handling of this interrupt is appropriate.
527 * Back up over the (now missing) int3 and run
528 * the original instruction.
529 */
530 regs->ip = (unsigned long)addr;
531 ret = 1;
532 goto preempt_out;
533 }
534 if (kprobe_running()) {
535 p = __get_cpu_var(current_kprobe);
536 if (p->break_handler && p->break_handler(p, regs))
537 goto ss_probe;
538 }
539 /* Not one of ours: let kernel handle it */
540 goto preempt_out;
541 }
542
543ss_probe:
544 ret = 1;
545#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
546 if (p->ainsn.boostable == 1 && !p->post_handler) {
547 /* Boost up -- we can execute copied instructions directly */
548 reset_current_kprobe();
549 regs->ip = (unsigned long)p->ainsn.insn;
550 goto preempt_out;
551 }
552#endif
553 prepare_singlestep(p, regs);
554 kcb->kprobe_status = KPROBE_HIT_SS;
555 goto out;
556
557preempt_out:
558 preempt_enable_no_resched();
559out:
560 return ret;
561}
562
563/*
564 * When a retprobed function returns, this code saves registers and
565 * calls trampoline_handler() runs, which calls the kretprobe's handler.
566 */
567 void __kprobes kretprobe_trampoline_holder(void)
568 {
569 asm volatile (
570 ".global kretprobe_trampoline\n"
571 "kretprobe_trampoline: \n"
572#ifdef CONFIG_X86_64
573 /* We don't bother saving the ss register */
574 " pushq %rsp\n"
575 " pushfq\n"
576 /*
577 * Skip cs, ip, orig_ax.
578 * trampoline_handler() will plug in these values
579 */
580 " subq $24, %rsp\n"
581 " pushq %rdi\n"
582 " pushq %rsi\n"
583 " pushq %rdx\n"
584 " pushq %rcx\n"
585 " pushq %rax\n"
586 " pushq %r8\n"
587 " pushq %r9\n"
588 " pushq %r10\n"
589 " pushq %r11\n"
590 " pushq %rbx\n"
591 " pushq %rbp\n"
592 " pushq %r12\n"
593 " pushq %r13\n"
594 " pushq %r14\n"
595 " pushq %r15\n"
596 " movq %rsp, %rdi\n"
597 " call trampoline_handler\n"
598 /* Replace saved sp with true return address. */
599 " movq %rax, 152(%rsp)\n"
600 " popq %r15\n"
601 " popq %r14\n"
602 " popq %r13\n"
603 " popq %r12\n"
604 " popq %rbp\n"
605 " popq %rbx\n"
606 " popq %r11\n"
607 " popq %r10\n"
608 " popq %r9\n"
609 " popq %r8\n"
610 " popq %rax\n"
611 " popq %rcx\n"
612 " popq %rdx\n"
613 " popq %rsi\n"
614 " popq %rdi\n"
615 /* Skip orig_ax, ip, cs */
616 " addq $24, %rsp\n"
617 " popfq\n"
618#else
619 " pushf\n"
620 /*
621 * Skip cs, ip, orig_ax.
622 * trampoline_handler() will plug in these values
623 */
624 " subl $12, %esp\n"
625 " pushl %fs\n"
626 " pushl %ds\n"
627 " pushl %es\n"
628 " pushl %eax\n"
629 " pushl %ebp\n"
630 " pushl %edi\n"
631 " pushl %esi\n"
632 " pushl %edx\n"
633 " pushl %ecx\n"
634 " pushl %ebx\n"
635 " movl %esp, %eax\n"
636 " call trampoline_handler\n"
637 /* Move flags to cs */
638 " movl 52(%esp), %edx\n"
639 " movl %edx, 48(%esp)\n"
640 /* Replace saved flags with true return address. */
641 " movl %eax, 52(%esp)\n"
642 " popl %ebx\n"
643 " popl %ecx\n"
644 " popl %edx\n"
645 " popl %esi\n"
646 " popl %edi\n"
647 " popl %ebp\n"
648 " popl %eax\n"
649 /* Skip ip, orig_ax, es, ds, fs */
650 " addl $20, %esp\n"
651 " popf\n"
652#endif
653 " ret\n");
654 }
655
656/*
657 * Called from kretprobe_trampoline
658 */
659void * __kprobes trampoline_handler(struct pt_regs *regs)
660{
661 struct kretprobe_instance *ri = NULL;
662 struct hlist_head *head, empty_rp;
663 struct hlist_node *node, *tmp;
664 unsigned long flags, orig_ret_address = 0;
665 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
666
667 INIT_HLIST_HEAD(&empty_rp);
668 spin_lock_irqsave(&kretprobe_lock, flags);
669 head = kretprobe_inst_table_head(current);
670 /* fixup registers */
671#ifdef CONFIG_X86_64
672 regs->cs = __KERNEL_CS;
673#else
674 regs->cs = __KERNEL_CS | get_kernel_rpl();
675#endif
676 regs->ip = trampoline_address;
677 regs->orig_ax = ~0UL;
678
679 /*
680 * It is possible to have multiple instances associated with a given
681 * task either because multiple functions in the call path have
682 * return probes installed on them, and/or more then one
683 * return probe was registered for a target function.
684 *
685 * We can handle this because:
686 * - instances are always pushed into the head of the list
687 * - when multiple return probes are registered for the same
688 * function, the (chronologically) first instance's ret_addr
689 * will be the real return address, and all the rest will
690 * point to kretprobe_trampoline.
691 */
692 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
693 if (ri->task != current)
694 /* another task is sharing our hash bucket */
695 continue;
696
697 if (ri->rp && ri->rp->handler) {
698 __get_cpu_var(current_kprobe) = &ri->rp->kp;
699 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
700 ri->rp->handler(ri, regs);
701 __get_cpu_var(current_kprobe) = NULL;
702 }
703
704 orig_ret_address = (unsigned long)ri->ret_addr;
705 recycle_rp_inst(ri, &empty_rp);
706
707 if (orig_ret_address != trampoline_address)
708 /*
709 * This is the real return address. Any other
710 * instances associated with this task are for
711 * other calls deeper on the call stack
712 */
713 break;
714 }
715
716 kretprobe_assert(ri, orig_ret_address, trampoline_address);
717
718 spin_unlock_irqrestore(&kretprobe_lock, flags);
719
720 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
721 hlist_del(&ri->hlist);
722 kfree(ri);
723 }
724 return (void *)orig_ret_address;
725}
726
727/*
728 * Called after single-stepping. p->addr is the address of the
729 * instruction whose first byte has been replaced by the "int 3"
730 * instruction. To avoid the SMP problems that can occur when we
731 * temporarily put back the original opcode to single-step, we
732 * single-stepped a copy of the instruction. The address of this
733 * copy is p->ainsn.insn.
734 *
735 * This function prepares to return from the post-single-step
736 * interrupt. We have to fix up the stack as follows:
737 *
738 * 0) Except in the case of absolute or indirect jump or call instructions,
739 * the new ip is relative to the copied instruction. We need to make
740 * it relative to the original instruction.
741 *
742 * 1) If the single-stepped instruction was pushfl, then the TF and IF
743 * flags are set in the just-pushed flags, and may need to be cleared.
744 *
745 * 2) If the single-stepped instruction was a call, the return address
746 * that is atop the stack is the address following the copied instruction.
747 * We need to make it the address following the original instruction.
748 *
749 * If this is the first time we've single-stepped the instruction at
750 * this probepoint, and the instruction is boostable, boost it: add a
751 * jump instruction after the copied instruction, that jumps to the next
752 * instruction after the probepoint.
753 */
754static void __kprobes resume_execution(struct kprobe *p,
755 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
756{
757 unsigned long *tos = stack_addr(regs);
758 unsigned long copy_ip = (unsigned long)p->ainsn.insn;
759 unsigned long orig_ip = (unsigned long)p->addr;
760 kprobe_opcode_t *insn = p->ainsn.insn;
761
762 /*skip the REX prefix*/
763 if (is_REX_prefix(insn))
764 insn++;
765
766 regs->flags &= ~X86_EFLAGS_TF;
767 switch (*insn) {
768 case 0x9c: /* pushfl */
769 *tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
770 *tos |= kcb->kprobe_old_flags;
771 break;
772 case 0xc2: /* iret/ret/lret */
773 case 0xc3:
774 case 0xca:
775 case 0xcb:
776 case 0xcf:
777 case 0xea: /* jmp absolute -- ip is correct */
778 /* ip is already adjusted, no more changes required */
779 p->ainsn.boostable = 1;
780 goto no_change;
781 case 0xe8: /* call relative - Fix return addr */
782 *tos = orig_ip + (*tos - copy_ip);
783 break;
784#ifdef CONFIG_X86_32
785 case 0x9a: /* call absolute -- same as call absolute, indirect */
786 *tos = orig_ip + (*tos - copy_ip);
787 goto no_change;
788#endif
789 case 0xff:
790 if ((insn[1] & 0x30) == 0x10) {
791 /*
792 * call absolute, indirect
793 * Fix return addr; ip is correct.
794 * But this is not boostable
795 */
796 *tos = orig_ip + (*tos - copy_ip);
797 goto no_change;
798 } else if (((insn[1] & 0x31) == 0x20) ||
799 ((insn[1] & 0x31) == 0x21)) {
800 /*
801 * jmp near and far, absolute indirect
802 * ip is correct. And this is boostable
803 */
804 p->ainsn.boostable = 1;
805 goto no_change;
806 }
807 default:
808 break;
809 }
810
811 if (p->ainsn.boostable == 0) {
812 if ((regs->ip > copy_ip) &&
813 (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) {
814 /*
815 * These instructions can be executed directly if it
816 * jumps back to correct address.
817 */
818 set_jmp_op((void *)regs->ip,
819 (void *)orig_ip + (regs->ip - copy_ip));
820 p->ainsn.boostable = 1;
821 } else {
822 p->ainsn.boostable = -1;
823 }
824 }
825
826 regs->ip += orig_ip - copy_ip;
827
828no_change:
829 restore_btf();
830}
831
832/*
833 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
834 * remain disabled thoroughout this function.
835 */
836static int __kprobes post_kprobe_handler(struct pt_regs *regs)
837{
838 struct kprobe *cur = kprobe_running();
839 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
840
841 if (!cur)
842 return 0;
843
844 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
845 kcb->kprobe_status = KPROBE_HIT_SSDONE;
846 cur->post_handler(cur, regs, 0);
847 }
848
849 resume_execution(cur, regs, kcb);
850 regs->flags |= kcb->kprobe_saved_flags;
851 trace_hardirqs_fixup_flags(regs->flags);
852
853 /* Restore back the original saved kprobes variables and continue. */
854 if (kcb->kprobe_status == KPROBE_REENTER) {
855 restore_previous_kprobe(kcb);
856 goto out;
857 }
858 reset_current_kprobe();
859out:
860 preempt_enable_no_resched();
861
862 /*
863 * if somebody else is singlestepping across a probe point, flags
864 * will have TF set, in which case, continue the remaining processing
865 * of do_debug, as if this is not a probe hit.
866 */
867 if (regs->flags & X86_EFLAGS_TF)
868 return 0;
869
870 return 1;
871}
872
873int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
874{
875 struct kprobe *cur = kprobe_running();
876 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
877
878 switch (kcb->kprobe_status) {
879 case KPROBE_HIT_SS:
880 case KPROBE_REENTER:
881 /*
882 * We are here because the instruction being single
883 * stepped caused a page fault. We reset the current
884 * kprobe and the ip points back to the probe address
885 * and allow the page fault handler to continue as a
886 * normal page fault.
887 */
888 regs->ip = (unsigned long)cur->addr;
889 regs->flags |= kcb->kprobe_old_flags;
890 if (kcb->kprobe_status == KPROBE_REENTER)
891 restore_previous_kprobe(kcb);
892 else
893 reset_current_kprobe();
894 preempt_enable_no_resched();
895 break;
896 case KPROBE_HIT_ACTIVE:
897 case KPROBE_HIT_SSDONE:
898 /*
899 * We increment the nmissed count for accounting,
900 * we can also use npre/npostfault count for accounting
901 * these specific fault cases.
902 */
903 kprobes_inc_nmissed_count(cur);
904
905 /*
906 * We come here because instructions in the pre/post
907 * handler caused the page_fault, this could happen
908 * if handler tries to access user space by
909 * copy_from_user(), get_user() etc. Let the
910 * user-specified handler try to fix it first.
911 */
912 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
913 return 1;
914
915 /*
916 * In case the user-specified fault handler returned
917 * zero, try to fix up.
918 */
919 if (fixup_exception(regs))
920 return 1;
921
922 /*
923 * fixup routine could not handle it,
924 * Let do_page_fault() fix it.
925 */
926 break;
927 default:
928 break;
929 }
930 return 0;
931}
932
933/*
934 * Wrapper routine for handling exceptions.
935 */
936int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
937 unsigned long val, void *data)
938{
939 struct die_args *args = (struct die_args *)data;
940 int ret = NOTIFY_DONE;
941
942 if (args->regs && user_mode_vm(args->regs))
943 return ret;
944
945 switch (val) {
946 case DIE_INT3:
947 if (kprobe_handler(args->regs))
948 ret = NOTIFY_STOP;
949 break;
950 case DIE_DEBUG:
951 if (post_kprobe_handler(args->regs))
952 ret = NOTIFY_STOP;
953 break;
954 case DIE_GPF:
955 /*
956 * To be potentially processing a kprobe fault and to
957 * trust the result from kprobe_running(), we have
958 * be non-preemptible.
959 */
960 if (!preemptible() && kprobe_running() &&
961 kprobe_fault_handler(args->regs, args->trapnr))
962 ret = NOTIFY_STOP;
963 break;
964 default:
965 break;
966 }
967 return ret;
968}
969
970int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
971{
972 struct jprobe *jp = container_of(p, struct jprobe, kp);
973 unsigned long addr;
974 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
975
976 kcb->jprobe_saved_regs = *regs;
977 kcb->jprobe_saved_sp = stack_addr(regs);
978 addr = (unsigned long)(kcb->jprobe_saved_sp);
979
980 /*
981 * As Linus pointed out, gcc assumes that the callee
982 * owns the argument space and could overwrite it, e.g.
983 * tailcall optimization. So, to be absolutely safe
984 * we also save and restore enough stack bytes to cover
985 * the argument area.
986 */
987 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
988 MIN_STACK_SIZE(addr));
989 regs->flags &= ~X86_EFLAGS_IF;
990 trace_hardirqs_off();
991 regs->ip = (unsigned long)(jp->entry);
992 return 1;
993}
994
995void __kprobes jprobe_return(void)
996{
997 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
998
999 asm volatile (
1000#ifdef CONFIG_X86_64
1001 " xchg %%rbx,%%rsp \n"
1002#else
1003 " xchgl %%ebx,%%esp \n"
1004#endif
1005 " int3 \n"
1006 " .globl jprobe_return_end\n"
1007 " jprobe_return_end: \n"
1008 " nop \n"::"b"
1009 (kcb->jprobe_saved_sp):"memory");
1010}
1011
1012int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1013{
1014 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1015 u8 *addr = (u8 *) (regs->ip - 1);
1016 struct jprobe *jp = container_of(p, struct jprobe, kp);
1017
1018 if ((addr > (u8 *) jprobe_return) &&
1019 (addr < (u8 *) jprobe_return_end)) {
1020 if (stack_addr(regs) != kcb->jprobe_saved_sp) {
1021 struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
1022 printk(KERN_ERR
1023 "current sp %p does not match saved sp %p\n",
1024 stack_addr(regs), kcb->jprobe_saved_sp);
1025 printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
1026 show_registers(saved_regs);
1027 printk(KERN_ERR "Current registers\n");
1028 show_registers(regs);
1029 BUG();
1030 }
1031 *regs = kcb->jprobe_saved_regs;
1032 memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
1033 kcb->jprobes_stack,
1034 MIN_STACK_SIZE(kcb->jprobe_saved_sp));
1035 preempt_enable_no_resched();
1036 return 1;
1037 }
1038 return 0;
1039}
1040
1041int __init arch_init_kprobes(void)
1042{
1043 return 0;
1044}
1045
1046int __kprobes arch_trampoline_kprobe(struct kprobe *p)
1047{
1048 return 0;
1049}