Commit | Line | Data |
---|---|---|
2b144498 | 1 | /* |
7b2d81d4 | 2 | * User-space Probes (UProbes) for x86 |
2b144498 SD |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright (C) IBM Corporation, 2008-2011 | |
19 | * Authors: | |
20 | * Srikar Dronamraju | |
21 | * Jim Keniston | |
22 | */ | |
2b144498 SD |
23 | #include <linux/kernel.h> |
24 | #include <linux/sched.h> | |
25 | #include <linux/ptrace.h> | |
26 | #include <linux/uprobes.h> | |
0326f5a9 | 27 | #include <linux/uaccess.h> |
2b144498 SD |
28 | |
29 | #include <linux/kdebug.h> | |
0326f5a9 | 30 | #include <asm/processor.h> |
2b144498 SD |
31 | #include <asm/insn.h> |
32 | ||
33 | /* Post-execution fixups. */ | |
34 | ||
2b144498 | 35 | /* Adjust IP back to vicinity of actual insn */ |
900771a4 | 36 | #define UPROBE_FIX_IP 0x1 |
0326f5a9 | 37 | |
2b144498 | 38 | /* Adjust the return address of a call insn */ |
900771a4 | 39 | #define UPROBE_FIX_CALL 0x2 |
2b144498 | 40 | |
bdc1e472 SAS |
41 | /* Instruction will modify TF, don't change it */ |
42 | #define UPROBE_FIX_SETF 0x4 | |
43 | ||
900771a4 SD |
44 | #define UPROBE_FIX_RIP_AX 0x8000 |
45 | #define UPROBE_FIX_RIP_CX 0x4000 | |
2b144498 | 46 | |
0326f5a9 SD |
47 | #define UPROBE_TRAP_NR UINT_MAX |
48 | ||
2b144498 | 49 | /* Adaptations for mhiramat x86 decoder v14. */ |
7b2d81d4 IM |
50 | #define OPCODE1(insn) ((insn)->opcode.bytes[0]) |
51 | #define OPCODE2(insn) ((insn)->opcode.bytes[1]) | |
52 | #define OPCODE3(insn) ((insn)->opcode.bytes[2]) | |
ddb69f27 | 53 | #define MODRM_REG(insn) X86_MODRM_REG((insn)->modrm.value) |
2b144498 SD |
54 | |
55 | #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ | |
56 | (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ | |
57 | (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \ | |
58 | (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \ | |
59 | (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \ | |
60 | << (row % 32)) | |
61 | ||
04a3d984 SD |
62 | /* |
63 | * Good-instruction tables for 32-bit apps. This is non-const and volatile | |
64 | * to keep gcc from statically optimizing it out, as variable_test_bit makes | |
65 | * some versions of gcc to think only *(unsigned long*) is used. | |
66 | */ | |
67 | static volatile u32 good_insns_32[256 / 32] = { | |
2b144498 SD |
68 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
69 | /* ---------------------------------------------- */ | |
70 | W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 00 */ | |
71 | W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */ | |
72 | W(0x20, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* 20 */ | |
73 | W(0x30, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) , /* 30 */ | |
74 | W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ | |
75 | W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ | |
76 | W(0x60, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */ | |
77 | W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */ | |
78 | W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ | |
79 | W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ | |
80 | W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */ | |
81 | W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ | |
82 | W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */ | |
83 | W(0xd0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ | |
84 | W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */ | |
85 | W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */ | |
86 | /* ---------------------------------------------- */ | |
87 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ | |
88 | }; | |
89 | ||
90 | /* Using this for both 64-bit and 32-bit apps */ | |
04a3d984 | 91 | static volatile u32 good_2byte_insns[256 / 32] = { |
2b144498 SD |
92 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
93 | /* ---------------------------------------------- */ | |
94 | W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */ | |
95 | W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */ | |
96 | W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */ | |
97 | W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ | |
98 | W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ | |
99 | W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ | |
100 | W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */ | |
101 | W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ | |
102 | W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ | |
103 | W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ | |
104 | W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */ | |
105 | W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ | |
106 | W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ | |
107 | W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ | |
108 | W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */ | |
109 | W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* f0 */ | |
110 | /* ---------------------------------------------- */ | |
111 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ | |
112 | }; | |
113 | ||
04a3d984 SD |
114 | /* Good-instruction tables for 64-bit apps */ |
115 | static volatile u32 good_insns_64[256 / 32] = { | |
116 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ | |
117 | /* ---------------------------------------------- */ | |
118 | W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 00 */ | |
119 | W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */ | |
120 | W(0x20, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 20 */ | |
121 | W(0x30, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 30 */ | |
122 | W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */ | |
123 | W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ | |
124 | W(0x60, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */ | |
125 | W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */ | |
126 | W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ | |
127 | W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ | |
128 | W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */ | |
129 | W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ | |
130 | W(0xc0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */ | |
131 | W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ | |
132 | W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */ | |
133 | W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */ | |
134 | /* ---------------------------------------------- */ | |
135 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ | |
136 | }; | |
2b144498 SD |
137 | #undef W |
138 | ||
139 | /* | |
140 | * opcodes we'll probably never support: | |
7b2d81d4 IM |
141 | * |
142 | * 6c-6d, e4-e5, ec-ed - in | |
143 | * 6e-6f, e6-e7, ee-ef - out | |
144 | * cc, cd - int3, int | |
145 | * cf - iret | |
146 | * d6 - illegal instruction | |
147 | * f1 - int1/icebp | |
148 | * f4 - hlt | |
149 | * fa, fb - cli, sti | |
150 | * 0f - lar, lsl, syscall, clts, sysret, sysenter, sysexit, invd, wbinvd, ud2 | |
2b144498 SD |
151 | * |
152 | * invalid opcodes in 64-bit mode: | |
2b144498 | 153 | * |
7b2d81d4 IM |
154 | * 06, 0e, 16, 1e, 27, 2f, 37, 3f, 60-62, 82, c4-c5, d4-d5 |
155 | * 63 - we support this opcode in x86_64 but not in i386. | |
2b144498 SD |
156 | * |
157 | * opcodes we may need to refine support for: | |
7b2d81d4 IM |
158 | * |
159 | * 0f - 2-byte instructions: For many of these instructions, the validity | |
160 | * depends on the prefix and/or the reg field. On such instructions, we | |
161 | * just consider the opcode combination valid if it corresponds to any | |
162 | * valid instruction. | |
163 | * | |
164 | * 8f - Group 1 - only reg = 0 is OK | |
165 | * c6-c7 - Group 11 - only reg = 0 is OK | |
166 | * d9-df - fpu insns with some illegal encodings | |
167 | * f2, f3 - repnz, repz prefixes. These are also the first byte for | |
168 | * certain floating-point instructions, such as addsd. | |
169 | * | |
170 | * fe - Group 4 - only reg = 0 or 1 is OK | |
171 | * ff - Group 5 - only reg = 0-6 is OK | |
2b144498 SD |
172 | * |
173 | * others -- Do we need to support these? | |
7b2d81d4 IM |
174 | * |
175 | * 0f - (floating-point?) prefetch instructions | |
176 | * 07, 17, 1f - pop es, pop ss, pop ds | |
177 | * 26, 2e, 36, 3e - es:, cs:, ss:, ds: segment prefixes -- | |
2b144498 | 178 | * but 64 and 65 (fs: and gs:) seem to be used, so we support them |
7b2d81d4 IM |
179 | * 67 - addr16 prefix |
180 | * ce - into | |
181 | * f0 - lock prefix | |
2b144498 SD |
182 | */ |
183 | ||
184 | /* | |
185 | * TODO: | |
186 | * - Where necessary, examine the modrm byte and allow only valid instructions | |
187 | * in the different Groups and fpu instructions. | |
188 | */ | |
189 | ||
190 | static bool is_prefix_bad(struct insn *insn) | |
191 | { | |
192 | int i; | |
193 | ||
194 | for (i = 0; i < insn->prefixes.nbytes; i++) { | |
195 | switch (insn->prefixes.bytes[i]) { | |
7b2d81d4 IM |
196 | case 0x26: /* INAT_PFX_ES */ |
197 | case 0x2E: /* INAT_PFX_CS */ | |
198 | case 0x36: /* INAT_PFX_DS */ | |
199 | case 0x3E: /* INAT_PFX_SS */ | |
200 | case 0xF0: /* INAT_PFX_LOCK */ | |
2b144498 SD |
201 | return true; |
202 | } | |
203 | } | |
204 | return false; | |
205 | } | |
206 | ||
73175d0d | 207 | static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64) |
2b144498 | 208 | { |
73175d0d ON |
209 | u32 volatile *good_insns; |
210 | ||
211 | insn_init(insn, auprobe->insn, x86_64); | |
ff261964 ON |
212 | /* has the side-effect of processing the entire instruction */ |
213 | insn_get_length(insn); | |
214 | if (WARN_ON_ONCE(!insn_complete(insn))) | |
215 | return -ENOEXEC; | |
2b144498 | 216 | |
2b144498 SD |
217 | if (is_prefix_bad(insn)) |
218 | return -ENOTSUPP; | |
7b2d81d4 | 219 | |
73175d0d ON |
220 | if (x86_64) |
221 | good_insns = good_insns_64; | |
222 | else | |
223 | good_insns = good_insns_32; | |
224 | ||
225 | if (test_bit(OPCODE1(insn), (unsigned long *)good_insns)) | |
2b144498 | 226 | return 0; |
7b2d81d4 | 227 | |
2b144498 SD |
228 | if (insn->opcode.nbytes == 2) { |
229 | if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns)) | |
230 | return 0; | |
231 | } | |
7b2d81d4 | 232 | |
2b144498 SD |
233 | return -ENOTSUPP; |
234 | } | |
235 | ||
2b144498 | 236 | #ifdef CONFIG_X86_64 |
2ae1f49a ON |
237 | static inline bool is_64bit_mm(struct mm_struct *mm) |
238 | { | |
239 | return !config_enabled(CONFIG_IA32_EMULATION) || | |
240 | !mm->context.ia32_compat; | |
241 | } | |
2b144498 | 242 | /* |
3ff54efd | 243 | * If arch_uprobe->insn doesn't use rip-relative addressing, return |
2b144498 SD |
244 | * immediately. Otherwise, rewrite the instruction so that it accesses |
245 | * its memory operand indirectly through a scratch register. Set | |
3ff54efd | 246 | * arch_uprobe->fixups and arch_uprobe->rip_rela_target_address |
2b144498 SD |
247 | * accordingly. (The contents of the scratch register will be saved |
248 | * before we single-step the modified instruction, and restored | |
249 | * afterward.) | |
250 | * | |
251 | * We do this because a rip-relative instruction can access only a | |
252 | * relatively small area (+/- 2 GB from the instruction), and the XOL | |
253 | * area typically lies beyond that area. At least for instructions | |
254 | * that store to memory, we can't execute the original instruction | |
255 | * and "fix things up" later, because the misdirected store could be | |
256 | * disastrous. | |
257 | * | |
258 | * Some useful facts about rip-relative instructions: | |
7b2d81d4 IM |
259 | * |
260 | * - There's always a modrm byte. | |
261 | * - There's never a SIB byte. | |
262 | * - The displacement is always 4 bytes. | |
2b144498 | 263 | */ |
e3343e6a | 264 | static void |
59078d4b | 265 | handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn) |
2b144498 SD |
266 | { |
267 | u8 *cursor; | |
268 | u8 reg; | |
269 | ||
2b144498 SD |
270 | if (!insn_rip_relative(insn)) |
271 | return; | |
272 | ||
273 | /* | |
274 | * insn_rip_relative() would have decoded rex_prefix, modrm. | |
275 | * Clear REX.b bit (extension of MODRM.rm field): | |
276 | * we want to encode rax/rcx, not r8/r9. | |
277 | */ | |
278 | if (insn->rex_prefix.nbytes) { | |
3ff54efd | 279 | cursor = auprobe->insn + insn_offset_rex_prefix(insn); |
2b144498 SD |
280 | *cursor &= 0xfe; /* Clearing REX.B bit */ |
281 | } | |
282 | ||
283 | /* | |
284 | * Point cursor at the modrm byte. The next 4 bytes are the | |
285 | * displacement. Beyond the displacement, for some instructions, | |
286 | * is the immediate operand. | |
287 | */ | |
3ff54efd | 288 | cursor = auprobe->insn + insn_offset_modrm(insn); |
2b144498 SD |
289 | /* |
290 | * Convert from rip-relative addressing to indirect addressing | |
291 | * via a scratch register. Change the r/m field from 0x5 (%rip) | |
292 | * to 0x0 (%rax) or 0x1 (%rcx), and squeeze out the offset field. | |
293 | */ | |
294 | reg = MODRM_REG(insn); | |
295 | if (reg == 0) { | |
296 | /* | |
297 | * The register operand (if any) is either the A register | |
298 | * (%rax, %eax, etc.) or (if the 0x4 bit is set in the | |
299 | * REX prefix) %r8. In any case, we know the C register | |
300 | * is NOT the register operand, so we use %rcx (register | |
301 | * #1) for the scratch register. | |
302 | */ | |
900771a4 | 303 | auprobe->fixups = UPROBE_FIX_RIP_CX; |
2b144498 SD |
304 | /* Change modrm from 00 000 101 to 00 000 001. */ |
305 | *cursor = 0x1; | |
306 | } else { | |
307 | /* Use %rax (register #0) for the scratch register. */ | |
900771a4 | 308 | auprobe->fixups = UPROBE_FIX_RIP_AX; |
2b144498 SD |
309 | /* Change modrm from 00 xxx 101 to 00 xxx 000 */ |
310 | *cursor = (reg << 3); | |
311 | } | |
312 | ||
313 | /* Target address = address of next instruction + (signed) offset */ | |
3ff54efd | 314 | auprobe->rip_rela_target_address = (long)insn->length + insn->displacement.value; |
7b2d81d4 | 315 | |
2b144498 SD |
316 | /* Displacement field is gone; slide immediate field (if any) over. */ |
317 | if (insn->immediate.nbytes) { | |
318 | cursor++; | |
7b2d81d4 | 319 | memmove(cursor, cursor + insn->displacement.nbytes, insn->immediate.nbytes); |
2b144498 | 320 | } |
2b144498 SD |
321 | } |
322 | ||
d20737c0 ON |
323 | /* |
324 | * If we're emulating a rip-relative instruction, save the contents | |
325 | * of the scratch register and store the target address in that register. | |
326 | */ | |
327 | static void | |
328 | pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs, | |
329 | struct arch_uprobe_task *autask) | |
330 | { | |
331 | if (auprobe->fixups & UPROBE_FIX_RIP_AX) { | |
332 | autask->saved_scratch_register = regs->ax; | |
333 | regs->ax = current->utask->vaddr; | |
334 | regs->ax += auprobe->rip_rela_target_address; | |
335 | } else if (auprobe->fixups & UPROBE_FIX_RIP_CX) { | |
336 | autask->saved_scratch_register = regs->cx; | |
337 | regs->cx = current->utask->vaddr; | |
338 | regs->cx += auprobe->rip_rela_target_address; | |
339 | } | |
340 | } | |
341 | ||
342 | static void | |
343 | handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction) | |
344 | { | |
345 | if (auprobe->fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) { | |
346 | struct arch_uprobe_task *autask; | |
347 | ||
348 | autask = ¤t->utask->autask; | |
349 | if (auprobe->fixups & UPROBE_FIX_RIP_AX) | |
350 | regs->ax = autask->saved_scratch_register; | |
351 | else | |
352 | regs->cx = autask->saved_scratch_register; | |
353 | ||
354 | /* | |
355 | * The original instruction includes a displacement, and so | |
356 | * is 4 bytes longer than what we've just single-stepped. | |
357 | * Caller may need to apply other fixups to handle stuff | |
358 | * like "jmpq *...(%rip)" and "callq *...(%rip)". | |
359 | */ | |
360 | if (correction) | |
361 | *correction += 4; | |
362 | } | |
363 | } | |
2ae1f49a ON |
364 | #else /* 32-bit: */ |
365 | static inline bool is_64bit_mm(struct mm_struct *mm) | |
2b144498 | 366 | { |
2ae1f49a | 367 | return false; |
2b144498 | 368 | } |
d20737c0 ON |
369 | /* |
370 | * No RIP-relative addressing on 32-bit | |
371 | */ | |
59078d4b | 372 | static void handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn) |
2b144498 | 373 | { |
d20737c0 ON |
374 | } |
375 | static void pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs, | |
376 | struct arch_uprobe_task *autask) | |
377 | { | |
378 | } | |
379 | static void handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, | |
380 | long *correction) | |
381 | { | |
2b144498 | 382 | } |
2b144498 SD |
383 | #endif /* CONFIG_X86_64 */ |
384 | ||
8ad8e9d3 ON |
385 | struct uprobe_xol_ops { |
386 | bool (*emulate)(struct arch_uprobe *, struct pt_regs *); | |
387 | int (*pre_xol)(struct arch_uprobe *, struct pt_regs *); | |
388 | int (*post_xol)(struct arch_uprobe *, struct pt_regs *); | |
389 | }; | |
390 | ||
8faaed1b ON |
391 | static inline int sizeof_long(void) |
392 | { | |
393 | return is_ia32_task() ? 4 : 8; | |
394 | } | |
395 | ||
8ad8e9d3 ON |
396 | static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) |
397 | { | |
398 | pre_xol_rip_insn(auprobe, regs, ¤t->utask->autask); | |
399 | return 0; | |
400 | } | |
401 | ||
402 | /* | |
403 | * Adjust the return address pushed by a call insn executed out of line. | |
404 | */ | |
405 | static int adjust_ret_addr(unsigned long sp, long correction) | |
406 | { | |
8faaed1b ON |
407 | int rasize = sizeof_long(); |
408 | long ra; | |
8ad8e9d3 | 409 | |
8faaed1b | 410 | if (copy_from_user(&ra, (void __user *)sp, rasize)) |
8ad8e9d3 ON |
411 | return -EFAULT; |
412 | ||
413 | ra += correction; | |
8faaed1b | 414 | if (copy_to_user((void __user *)sp, &ra, rasize)) |
8ad8e9d3 ON |
415 | return -EFAULT; |
416 | ||
417 | return 0; | |
418 | } | |
419 | ||
420 | static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) | |
421 | { | |
422 | struct uprobe_task *utask = current->utask; | |
423 | long correction = (long)(utask->vaddr - utask->xol_vaddr); | |
8ad8e9d3 ON |
424 | |
425 | handle_riprel_post_xol(auprobe, regs, &correction); | |
426 | if (auprobe->fixups & UPROBE_FIX_IP) | |
427 | regs->ip += correction; | |
428 | ||
75f9ef0b ON |
429 | if (auprobe->fixups & UPROBE_FIX_CALL) { |
430 | if (adjust_ret_addr(regs->sp, correction)) { | |
8faaed1b | 431 | regs->sp += sizeof_long(); |
75f9ef0b ON |
432 | return -ERESTART; |
433 | } | |
434 | } | |
8ad8e9d3 | 435 | |
75f9ef0b | 436 | return 0; |
8ad8e9d3 ON |
437 | } |
438 | ||
439 | static struct uprobe_xol_ops default_xol_ops = { | |
440 | .pre_xol = default_pre_xol_op, | |
441 | .post_xol = default_post_xol_op, | |
442 | }; | |
443 | ||
8e89c0be ON |
444 | static bool branch_is_call(struct arch_uprobe *auprobe) |
445 | { | |
446 | return auprobe->branch.opc1 == 0xe8; | |
447 | } | |
448 | ||
8f95505b ON |
449 | #define CASE_COND \ |
450 | COND(70, 71, XF(OF)) \ | |
451 | COND(72, 73, XF(CF)) \ | |
452 | COND(74, 75, XF(ZF)) \ | |
453 | COND(78, 79, XF(SF)) \ | |
454 | COND(7a, 7b, XF(PF)) \ | |
455 | COND(76, 77, XF(CF) || XF(ZF)) \ | |
456 | COND(7c, 7d, XF(SF) != XF(OF)) \ | |
457 | COND(7e, 7f, XF(ZF) || XF(SF) != XF(OF)) | |
458 | ||
459 | #define COND(op_y, op_n, expr) \ | |
460 | case 0x ## op_y: DO((expr) != 0) \ | |
461 | case 0x ## op_n: DO((expr) == 0) | |
462 | ||
463 | #define XF(xf) (!!(flags & X86_EFLAGS_ ## xf)) | |
464 | ||
465 | static bool is_cond_jmp_opcode(u8 opcode) | |
466 | { | |
467 | switch (opcode) { | |
468 | #define DO(expr) \ | |
469 | return true; | |
470 | CASE_COND | |
471 | #undef DO | |
472 | ||
473 | default: | |
474 | return false; | |
475 | } | |
476 | } | |
477 | ||
478 | static bool check_jmp_cond(struct arch_uprobe *auprobe, struct pt_regs *regs) | |
479 | { | |
480 | unsigned long flags = regs->flags; | |
481 | ||
482 | switch (auprobe->branch.opc1) { | |
483 | #define DO(expr) \ | |
484 | return expr; | |
485 | CASE_COND | |
486 | #undef DO | |
487 | ||
488 | default: /* not a conditional jmp */ | |
489 | return true; | |
490 | } | |
491 | } | |
492 | ||
493 | #undef XF | |
494 | #undef COND | |
495 | #undef CASE_COND | |
496 | ||
7ba6db2d ON |
497 | static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs) |
498 | { | |
8e89c0be | 499 | unsigned long new_ip = regs->ip += auprobe->branch.ilen; |
8f95505b | 500 | unsigned long offs = (long)auprobe->branch.offs; |
8e89c0be ON |
501 | |
502 | if (branch_is_call(auprobe)) { | |
503 | unsigned long new_sp = regs->sp - sizeof_long(); | |
504 | /* | |
505 | * If it fails we execute this (mangled, see the comment in | |
506 | * branch_clear_offset) insn out-of-line. In the likely case | |
507 | * this should trigger the trap, and the probed application | |
508 | * should die or restart the same insn after it handles the | |
509 | * signal, arch_uprobe_post_xol() won't be even called. | |
510 | * | |
511 | * But there is corner case, see the comment in ->post_xol(). | |
512 | */ | |
513 | if (copy_to_user((void __user *)new_sp, &new_ip, sizeof_long())) | |
514 | return false; | |
515 | regs->sp = new_sp; | |
8f95505b ON |
516 | } else if (!check_jmp_cond(auprobe, regs)) { |
517 | offs = 0; | |
8e89c0be ON |
518 | } |
519 | ||
8f95505b | 520 | regs->ip = new_ip + offs; |
7ba6db2d ON |
521 | return true; |
522 | } | |
523 | ||
8e89c0be ON |
524 | static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) |
525 | { | |
526 | BUG_ON(!branch_is_call(auprobe)); | |
527 | /* | |
528 | * We can only get here if branch_emulate_op() failed to push the ret | |
529 | * address _and_ another thread expanded our stack before the (mangled) | |
530 | * "call" insn was executed out-of-line. Just restore ->sp and restart. | |
531 | * We could also restore ->ip and try to call branch_emulate_op() again. | |
532 | */ | |
533 | regs->sp += sizeof_long(); | |
534 | return -ERESTART; | |
535 | } | |
536 | ||
537 | static void branch_clear_offset(struct arch_uprobe *auprobe, struct insn *insn) | |
538 | { | |
539 | /* | |
540 | * Turn this insn into "call 1f; 1:", this is what we will execute | |
541 | * out-of-line if ->emulate() fails. We only need this to generate | |
542 | * a trap, so that the probed task receives the correct signal with | |
543 | * the properly filled siginfo. | |
544 | * | |
545 | * But see the comment in ->post_xol(), in the unlikely case it can | |
546 | * succeed. So we need to ensure that the new ->ip can not fall into | |
547 | * the non-canonical area and trigger #GP. | |
548 | * | |
549 | * We could turn it into (say) "pushf", but then we would need to | |
550 | * divorce ->insn[] and ->ixol[]. We need to preserve the 1st byte | |
551 | * of ->insn[] for set_orig_insn(). | |
552 | */ | |
553 | memset(auprobe->insn + insn_offset_immediate(insn), | |
554 | 0, insn->immediate.nbytes); | |
555 | } | |
556 | ||
7ba6db2d ON |
557 | static struct uprobe_xol_ops branch_xol_ops = { |
558 | .emulate = branch_emulate_op, | |
8e89c0be | 559 | .post_xol = branch_post_xol_op, |
7ba6db2d ON |
560 | }; |
561 | ||
562 | /* Returns -ENOSYS if branch_xol_ops doesn't handle this insn */ | |
563 | static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) | |
564 | { | |
8e89c0be | 565 | u8 opc1 = OPCODE1(insn); |
250bbd12 | 566 | int i; |
8e89c0be | 567 | |
8e89c0be | 568 | switch (opc1) { |
7ba6db2d ON |
569 | case 0xeb: /* jmp 8 */ |
570 | case 0xe9: /* jmp 32 */ | |
d2410063 | 571 | case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */ |
7ba6db2d | 572 | break; |
8e89c0be ON |
573 | |
574 | case 0xe8: /* call relative */ | |
575 | branch_clear_offset(auprobe, insn); | |
576 | break; | |
8f95505b | 577 | |
6cc5e7ff ON |
578 | case 0x0f: |
579 | if (insn->opcode.nbytes != 2) | |
580 | return -ENOSYS; | |
581 | /* | |
582 | * If it is a "near" conditional jmp, OPCODE2() - 0x10 matches | |
583 | * OPCODE1() of the "short" jmp which checks the same condition. | |
584 | */ | |
585 | opc1 = OPCODE2(insn) - 0x10; | |
7ba6db2d | 586 | default: |
8f95505b ON |
587 | if (!is_cond_jmp_opcode(opc1)) |
588 | return -ENOSYS; | |
7ba6db2d ON |
589 | } |
590 | ||
250bbd12 DV |
591 | /* |
592 | * 16-bit overrides such as CALLW (66 e8 nn nn) are not supported. | |
593 | * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix. | |
594 | * No one uses these insns, reject any branch insns with such prefix. | |
595 | */ | |
596 | for (i = 0; i < insn->prefixes.nbytes; i++) { | |
597 | if (insn->prefixes.bytes[i] == 0x66) | |
598 | return -ENOTSUPP; | |
599 | } | |
600 | ||
8e89c0be | 601 | auprobe->branch.opc1 = opc1; |
7ba6db2d ON |
602 | auprobe->branch.ilen = insn->length; |
603 | auprobe->branch.offs = insn->immediate.value; | |
604 | ||
605 | auprobe->ops = &branch_xol_ops; | |
606 | return 0; | |
607 | } | |
608 | ||
2b144498 | 609 | /** |
0326f5a9 | 610 | * arch_uprobe_analyze_insn - instruction analysis including validity and fixups. |
2b144498 | 611 | * @mm: the probed address space. |
3ff54efd | 612 | * @arch_uprobe: the probepoint information. |
7eb9ba5e | 613 | * @addr: virtual address at which to install the probepoint |
2b144498 SD |
614 | * Return 0 on success or a -ve number on error. |
615 | */ | |
7eb9ba5e | 616 | int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr) |
2b144498 | 617 | { |
2b144498 | 618 | struct insn insn; |
ddb69f27 ON |
619 | bool fix_ip = true, fix_call = false; |
620 | int ret; | |
2b144498 | 621 | |
2ae1f49a | 622 | ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm)); |
ddb69f27 | 623 | if (ret) |
2b144498 | 624 | return ret; |
7b2d81d4 | 625 | |
7ba6db2d ON |
626 | ret = branch_setup_xol_ops(auprobe, &insn); |
627 | if (ret != -ENOSYS) | |
628 | return ret; | |
629 | ||
ddb69f27 ON |
630 | /* |
631 | * Figure out which fixups arch_uprobe_post_xol() will need to perform, | |
632 | * and annotate arch_uprobe->fixups accordingly. To start with, ->fixups | |
633 | * is either zero or it reflects rip-related fixups. | |
634 | */ | |
ddb69f27 ON |
635 | switch (OPCODE1(&insn)) { |
636 | case 0x9d: /* popf */ | |
637 | auprobe->fixups |= UPROBE_FIX_SETF; | |
638 | break; | |
639 | case 0xc3: /* ret or lret -- ip is correct */ | |
640 | case 0xcb: | |
641 | case 0xc2: | |
642 | case 0xca: | |
643 | fix_ip = false; | |
644 | break; | |
ddb69f27 ON |
645 | case 0x9a: /* call absolute - Fix return addr, not ip */ |
646 | fix_call = true; | |
647 | fix_ip = false; | |
648 | break; | |
649 | case 0xea: /* jmp absolute -- ip is correct */ | |
650 | fix_ip = false; | |
651 | break; | |
652 | case 0xff: | |
ddb69f27 ON |
653 | switch (MODRM_REG(&insn)) { |
654 | case 2: case 3: /* call or lcall, indirect */ | |
655 | fix_call = true; | |
656 | case 4: case 5: /* jmp or ljmp, indirect */ | |
657 | fix_ip = false; | |
658 | } | |
e55848a4 | 659 | /* fall through */ |
ddb69f27 | 660 | default: |
e55848a4 | 661 | handle_riprel_insn(auprobe, &insn); |
ddb69f27 ON |
662 | } |
663 | ||
664 | if (fix_ip) | |
665 | auprobe->fixups |= UPROBE_FIX_IP; | |
666 | if (fix_call) | |
667 | auprobe->fixups |= UPROBE_FIX_CALL; | |
7b2d81d4 | 668 | |
8ad8e9d3 | 669 | auprobe->ops = &default_xol_ops; |
2b144498 SD |
670 | return 0; |
671 | } | |
0326f5a9 | 672 | |
0326f5a9 SD |
673 | /* |
674 | * arch_uprobe_pre_xol - prepare to execute out of line. | |
675 | * @auprobe: the probepoint information. | |
676 | * @regs: reflects the saved user state of current task. | |
677 | */ | |
678 | int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) | |
679 | { | |
34e7317d | 680 | struct uprobe_task *utask = current->utask; |
0326f5a9 | 681 | |
34e7317d ON |
682 | regs->ip = utask->xol_vaddr; |
683 | utask->autask.saved_trap_nr = current->thread.trap_nr; | |
0326f5a9 | 684 | current->thread.trap_nr = UPROBE_TRAP_NR; |
0326f5a9 | 685 | |
34e7317d | 686 | utask->autask.saved_tf = !!(regs->flags & X86_EFLAGS_TF); |
4dc316c6 ON |
687 | regs->flags |= X86_EFLAGS_TF; |
688 | if (test_tsk_thread_flag(current, TIF_BLOCKSTEP)) | |
689 | set_task_blockstep(current, false); | |
690 | ||
8ad8e9d3 ON |
691 | if (auprobe->ops->pre_xol) |
692 | return auprobe->ops->pre_xol(auprobe, regs); | |
0326f5a9 SD |
693 | return 0; |
694 | } | |
695 | ||
0326f5a9 SD |
696 | /* |
697 | * If xol insn itself traps and generates a signal(Say, | |
698 | * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped | |
699 | * instruction jumps back to its own address. It is assumed that anything | |
700 | * like do_page_fault/do_trap/etc sets thread.trap_nr != -1. | |
701 | * | |
702 | * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr, | |
703 | * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to | |
704 | * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol(). | |
705 | */ | |
706 | bool arch_uprobe_xol_was_trapped(struct task_struct *t) | |
707 | { | |
708 | if (t->thread.trap_nr != UPROBE_TRAP_NR) | |
709 | return true; | |
710 | ||
711 | return false; | |
712 | } | |
713 | ||
714 | /* | |
715 | * Called after single-stepping. To avoid the SMP problems that can | |
716 | * occur when we temporarily put back the original opcode to | |
717 | * single-step, we single-stepped a copy of the instruction. | |
718 | * | |
719 | * This function prepares to resume execution after the single-step. | |
720 | * We have to fix things up as follows: | |
721 | * | |
722 | * Typically, the new ip is relative to the copied instruction. We need | |
723 | * to make it relative to the original instruction (FIX_IP). Exceptions | |
724 | * are return instructions and absolute or indirect jump or call instructions. | |
725 | * | |
726 | * If the single-stepped instruction was a call, the return address that | |
727 | * is atop the stack is the address following the copied instruction. We | |
728 | * need to make it the address following the original instruction (FIX_CALL). | |
729 | * | |
730 | * If the original instruction was a rip-relative instruction such as | |
731 | * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent | |
732 | * instruction using a scratch register -- e.g., "movl %edx,(%rax)". | |
733 | * We need to restore the contents of the scratch register and adjust | |
734 | * the ip, keeping in mind that the instruction we executed is 4 bytes | |
735 | * shorter than the original instruction (since we squeezed out the offset | |
736 | * field). (FIX_RIP_AX or FIX_RIP_CX) | |
737 | */ | |
738 | int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) | |
739 | { | |
34e7317d | 740 | struct uprobe_task *utask = current->utask; |
0326f5a9 SD |
741 | |
742 | WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); | |
014940ba ON |
743 | |
744 | if (auprobe->ops->post_xol) { | |
745 | int err = auprobe->ops->post_xol(auprobe, regs); | |
746 | if (err) { | |
747 | arch_uprobe_abort_xol(auprobe, regs); | |
75f9ef0b ON |
748 | /* |
749 | * Restart the probed insn. ->post_xol() must ensure | |
750 | * this is really possible if it returns -ERESTART. | |
751 | */ | |
752 | if (err == -ERESTART) | |
753 | return 0; | |
014940ba ON |
754 | return err; |
755 | } | |
756 | } | |
757 | ||
0326f5a9 | 758 | current->thread.trap_nr = utask->autask.saved_trap_nr; |
4dc316c6 ON |
759 | /* |
760 | * arch_uprobe_pre_xol() doesn't save the state of TIF_BLOCKSTEP | |
761 | * so we can get an extra SIGTRAP if we do not clear TF. We need | |
762 | * to examine the opcode to make it right. | |
763 | */ | |
764 | if (utask->autask.saved_tf) | |
765 | send_sig(SIGTRAP, current, 0); | |
766 | else if (!(auprobe->fixups & UPROBE_FIX_SETF)) | |
767 | regs->flags &= ~X86_EFLAGS_TF; | |
768 | ||
8ad8e9d3 | 769 | return 0; |
0326f5a9 SD |
770 | } |
771 | ||
772 | /* callback routine for handling exceptions. */ | |
773 | int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data) | |
774 | { | |
775 | struct die_args *args = data; | |
776 | struct pt_regs *regs = args->regs; | |
777 | int ret = NOTIFY_DONE; | |
778 | ||
779 | /* We are only interested in userspace traps */ | |
780 | if (regs && !user_mode_vm(regs)) | |
781 | return NOTIFY_DONE; | |
782 | ||
783 | switch (val) { | |
784 | case DIE_INT3: | |
785 | if (uprobe_pre_sstep_notifier(regs)) | |
786 | ret = NOTIFY_STOP; | |
787 | ||
788 | break; | |
789 | ||
790 | case DIE_DEBUG: | |
791 | if (uprobe_post_sstep_notifier(regs)) | |
792 | ret = NOTIFY_STOP; | |
793 | ||
794 | default: | |
795 | break; | |
796 | } | |
797 | ||
798 | return ret; | |
799 | } | |
800 | ||
801 | /* | |
802 | * This function gets called when XOL instruction either gets trapped or | |
014940ba ON |
803 | * the thread has a fatal signal, or if arch_uprobe_post_xol() failed. |
804 | * Reset the instruction pointer to its probed address for the potential | |
805 | * restart or for post mortem analysis. | |
0326f5a9 SD |
806 | */ |
807 | void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) | |
808 | { | |
809 | struct uprobe_task *utask = current->utask; | |
810 | ||
811 | current->thread.trap_nr = utask->autask.saved_trap_nr; | |
812 | handle_riprel_post_xol(auprobe, regs, NULL); | |
813 | instruction_pointer_set(regs, utask->vaddr); | |
4dc316c6 ON |
814 | |
815 | /* clear TF if it was set by us in arch_uprobe_pre_xol() */ | |
816 | if (!utask->autask.saved_tf) | |
817 | regs->flags &= ~X86_EFLAGS_TF; | |
0326f5a9 SD |
818 | } |
819 | ||
3a4664aa | 820 | static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) |
0326f5a9 | 821 | { |
8ad8e9d3 ON |
822 | if (auprobe->ops->emulate) |
823 | return auprobe->ops->emulate(auprobe, regs); | |
0326f5a9 SD |
824 | return false; |
825 | } | |
bdc1e472 | 826 | |
3a4664aa ON |
827 | bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) |
828 | { | |
829 | bool ret = __skip_sstep(auprobe, regs); | |
830 | if (ret && (regs->flags & X86_EFLAGS_TF)) | |
831 | send_sig(SIGTRAP, current, 0); | |
832 | return ret; | |
833 | } | |
791eca10 AA |
834 | |
835 | unsigned long | |
836 | arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs) | |
837 | { | |
8faaed1b | 838 | int rasize = sizeof_long(), nleft; |
791eca10 AA |
839 | unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */ |
840 | ||
8faaed1b | 841 | if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize)) |
791eca10 AA |
842 | return -1; |
843 | ||
844 | /* check whether address has been already hijacked */ | |
845 | if (orig_ret_vaddr == trampoline_vaddr) | |
846 | return orig_ret_vaddr; | |
847 | ||
8faaed1b ON |
848 | nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize); |
849 | if (likely(!nleft)) | |
791eca10 AA |
850 | return orig_ret_vaddr; |
851 | ||
8faaed1b | 852 | if (nleft != rasize) { |
791eca10 AA |
853 | pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, " |
854 | "%%ip=%#lx\n", current->pid, regs->sp, regs->ip); | |
855 | ||
856 | force_sig_info(SIGSEGV, SEND_SIG_FORCED, current); | |
857 | } | |
858 | ||
859 | return -1; | |
860 | } |