Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* ptrace.c */ |
2 | /* By Ross Biro 1/23/92 */ | |
3 | /* | |
4 | * Pentium III FXSR, SSE support | |
5 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
6 | */ | |
7 | ||
8 | #include <linux/kernel.h> | |
9 | #include <linux/sched.h> | |
10 | #include <linux/mm.h> | |
11 | #include <linux/smp.h> | |
12 | #include <linux/smp_lock.h> | |
13 | #include <linux/errno.h> | |
14 | #include <linux/ptrace.h> | |
15 | #include <linux/user.h> | |
16 | #include <linux/security.h> | |
17 | #include <linux/audit.h> | |
18 | #include <linux/seccomp.h> | |
7ed20e1a | 19 | #include <linux/signal.h> |
1da177e4 LT |
20 | |
21 | #include <asm/uaccess.h> | |
22 | #include <asm/pgtable.h> | |
23 | #include <asm/system.h> | |
24 | #include <asm/processor.h> | |
25 | #include <asm/i387.h> | |
26 | #include <asm/debugreg.h> | |
27 | #include <asm/ldt.h> | |
28 | #include <asm/desc.h> | |
29 | ||
30 | /* | |
31 | * does not yet catch signals sent when the child dies. | |
32 | * in exit.c or in signal.c. | |
33 | */ | |
34 | ||
35 | /* determines which flags the user has access to. */ | |
36 | /* 1 = access 0 = no access */ | |
37 | #define FLAG_MASK 0x00044dd5 | |
38 | ||
39 | /* set's the trap flag. */ | |
40 | #define TRAP_FLAG 0x100 | |
41 | ||
42 | /* | |
43 | * Offset of eflags on child stack.. | |
44 | */ | |
45 | #define EFL_OFFSET ((EFL-2)*4-sizeof(struct pt_regs)) | |
46 | ||
47 | static inline struct pt_regs *get_child_regs(struct task_struct *task) | |
48 | { | |
49 | void *stack_top = (void *)task->thread.esp0; | |
50 | return stack_top - sizeof(struct pt_regs); | |
51 | } | |
52 | ||
53 | /* | |
54 | * this routine will get a word off of the processes privileged stack. | |
55 | * the offset is how far from the base addr as stored in the TSS. | |
56 | * this routine assumes that all the privileged stacks are in our | |
57 | * data space. | |
58 | */ | |
59 | static inline int get_stack_long(struct task_struct *task, int offset) | |
60 | { | |
61 | unsigned char *stack; | |
62 | ||
63 | stack = (unsigned char *)task->thread.esp0; | |
64 | stack += offset; | |
65 | return (*((int *)stack)); | |
66 | } | |
67 | ||
68 | /* | |
69 | * this routine will put a word on the processes privileged stack. | |
70 | * the offset is how far from the base addr as stored in the TSS. | |
71 | * this routine assumes that all the privileged stacks are in our | |
72 | * data space. | |
73 | */ | |
74 | static inline int put_stack_long(struct task_struct *task, int offset, | |
75 | unsigned long data) | |
76 | { | |
77 | unsigned char * stack; | |
78 | ||
79 | stack = (unsigned char *) task->thread.esp0; | |
80 | stack += offset; | |
81 | *(unsigned long *) stack = data; | |
82 | return 0; | |
83 | } | |
84 | ||
85 | static int putreg(struct task_struct *child, | |
86 | unsigned long regno, unsigned long value) | |
87 | { | |
88 | switch (regno >> 2) { | |
89 | case FS: | |
90 | if (value && (value & 3) != 3) | |
91 | return -EIO; | |
92 | child->thread.fs = value; | |
93 | return 0; | |
94 | case GS: | |
95 | if (value && (value & 3) != 3) | |
96 | return -EIO; | |
97 | child->thread.gs = value; | |
98 | return 0; | |
99 | case DS: | |
100 | case ES: | |
101 | if (value && (value & 3) != 3) | |
102 | return -EIO; | |
103 | value &= 0xffff; | |
104 | break; | |
105 | case SS: | |
106 | case CS: | |
107 | if ((value & 3) != 3) | |
108 | return -EIO; | |
109 | value &= 0xffff; | |
110 | break; | |
111 | case EFL: | |
112 | value &= FLAG_MASK; | |
113 | value |= get_stack_long(child, EFL_OFFSET) & ~FLAG_MASK; | |
114 | break; | |
115 | } | |
116 | if (regno > GS*4) | |
117 | regno -= 2*4; | |
118 | put_stack_long(child, regno - sizeof(struct pt_regs), value); | |
119 | return 0; | |
120 | } | |
121 | ||
122 | static unsigned long getreg(struct task_struct *child, | |
123 | unsigned long regno) | |
124 | { | |
125 | unsigned long retval = ~0UL; | |
126 | ||
127 | switch (regno >> 2) { | |
128 | case FS: | |
129 | retval = child->thread.fs; | |
130 | break; | |
131 | case GS: | |
132 | retval = child->thread.gs; | |
133 | break; | |
134 | case DS: | |
135 | case ES: | |
136 | case SS: | |
137 | case CS: | |
138 | retval = 0xffff; | |
139 | /* fall through */ | |
140 | default: | |
141 | if (regno > GS*4) | |
142 | regno -= 2*4; | |
143 | regno = regno - sizeof(struct pt_regs); | |
144 | retval &= get_stack_long(child, regno); | |
145 | } | |
146 | return retval; | |
147 | } | |
148 | ||
149 | #define LDT_SEGMENT 4 | |
150 | ||
151 | static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_regs *regs) | |
152 | { | |
153 | unsigned long addr, seg; | |
154 | ||
155 | addr = regs->eip; | |
156 | seg = regs->xcs & 0xffff; | |
157 | if (regs->eflags & VM_MASK) { | |
158 | addr = (addr & 0xffff) + (seg << 4); | |
159 | return addr; | |
160 | } | |
161 | ||
162 | /* | |
163 | * We'll assume that the code segments in the GDT | |
164 | * are all zero-based. That is largely true: the | |
165 | * TLS segments are used for data, and the PNPBIOS | |
166 | * and APM bios ones we just ignore here. | |
167 | */ | |
168 | if (seg & LDT_SEGMENT) { | |
169 | u32 *desc; | |
170 | unsigned long base; | |
171 | ||
172 | down(&child->mm->context.sem); | |
173 | desc = child->mm->context.ldt + (seg & ~7); | |
174 | base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000); | |
175 | ||
176 | /* 16-bit code segment? */ | |
177 | if (!((desc[1] >> 22) & 1)) | |
178 | addr &= 0xffff; | |
179 | addr += base; | |
180 | up(&child->mm->context.sem); | |
181 | } | |
182 | return addr; | |
183 | } | |
184 | ||
185 | static inline int is_at_popf(struct task_struct *child, struct pt_regs *regs) | |
186 | { | |
187 | int i, copied; | |
188 | unsigned char opcode[16]; | |
189 | unsigned long addr = convert_eip_to_linear(child, regs); | |
190 | ||
191 | copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); | |
192 | for (i = 0; i < copied; i++) { | |
193 | switch (opcode[i]) { | |
194 | /* popf */ | |
195 | case 0x9d: | |
196 | return 1; | |
197 | /* opcode and address size prefixes */ | |
198 | case 0x66: case 0x67: | |
199 | continue; | |
200 | /* irrelevant prefixes (segment overrides and repeats) */ | |
201 | case 0x26: case 0x2e: | |
202 | case 0x36: case 0x3e: | |
203 | case 0x64: case 0x65: | |
204 | case 0xf0: case 0xf2: case 0xf3: | |
205 | continue; | |
206 | ||
207 | /* | |
208 | * pushf: NOTE! We should probably not let | |
209 | * the user see the TF bit being set. But | |
210 | * it's more pain than it's worth to avoid | |
211 | * it, and a debugger could emulate this | |
212 | * all in user space if it _really_ cares. | |
213 | */ | |
214 | case 0x9c: | |
215 | default: | |
216 | return 0; | |
217 | } | |
218 | } | |
219 | return 0; | |
220 | } | |
221 | ||
222 | static void set_singlestep(struct task_struct *child) | |
223 | { | |
224 | struct pt_regs *regs = get_child_regs(child); | |
225 | ||
226 | /* | |
227 | * Always set TIF_SINGLESTEP - this guarantees that | |
228 | * we single-step system calls etc.. This will also | |
229 | * cause us to set TF when returning to user mode. | |
230 | */ | |
231 | set_tsk_thread_flag(child, TIF_SINGLESTEP); | |
232 | ||
233 | /* | |
234 | * If TF was already set, don't do anything else | |
235 | */ | |
236 | if (regs->eflags & TRAP_FLAG) | |
237 | return; | |
238 | ||
239 | /* Set TF on the kernel stack.. */ | |
240 | regs->eflags |= TRAP_FLAG; | |
241 | ||
242 | /* | |
243 | * ..but if TF is changed by the instruction we will trace, | |
244 | * don't mark it as being "us" that set it, so that we | |
245 | * won't clear it by hand later. | |
246 | */ | |
247 | if (is_at_popf(child, regs)) | |
248 | return; | |
249 | ||
250 | child->ptrace |= PT_DTRACE; | |
251 | } | |
252 | ||
253 | static void clear_singlestep(struct task_struct *child) | |
254 | { | |
255 | /* Always clear TIF_SINGLESTEP... */ | |
256 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); | |
257 | ||
258 | /* But touch TF only if it was set by us.. */ | |
259 | if (child->ptrace & PT_DTRACE) { | |
260 | struct pt_regs *regs = get_child_regs(child); | |
261 | regs->eflags &= ~TRAP_FLAG; | |
262 | child->ptrace &= ~PT_DTRACE; | |
263 | } | |
264 | } | |
265 | ||
266 | /* | |
267 | * Called by kernel/ptrace.c when detaching.. | |
268 | * | |
269 | * Make sure the single step bit is not set. | |
270 | */ | |
271 | void ptrace_disable(struct task_struct *child) | |
272 | { | |
273 | clear_singlestep(child); | |
ab1c23c2 BS |
274 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
275 | clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); | |
1da177e4 LT |
276 | } |
277 | ||
278 | /* | |
279 | * Perform get_thread_area on behalf of the traced child. | |
280 | */ | |
281 | static int | |
282 | ptrace_get_thread_area(struct task_struct *child, | |
283 | int idx, struct user_desc __user *user_desc) | |
284 | { | |
285 | struct user_desc info; | |
286 | struct desc_struct *desc; | |
287 | ||
288 | /* | |
289 | * Get the current Thread-Local Storage area: | |
290 | */ | |
291 | ||
292 | #define GET_BASE(desc) ( \ | |
293 | (((desc)->a >> 16) & 0x0000ffff) | \ | |
294 | (((desc)->b << 16) & 0x00ff0000) | \ | |
295 | ( (desc)->b & 0xff000000) ) | |
296 | ||
297 | #define GET_LIMIT(desc) ( \ | |
298 | ((desc)->a & 0x0ffff) | \ | |
299 | ((desc)->b & 0xf0000) ) | |
300 | ||
301 | #define GET_32BIT(desc) (((desc)->b >> 22) & 1) | |
302 | #define GET_CONTENTS(desc) (((desc)->b >> 10) & 3) | |
303 | #define GET_WRITABLE(desc) (((desc)->b >> 9) & 1) | |
304 | #define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1) | |
305 | #define GET_PRESENT(desc) (((desc)->b >> 15) & 1) | |
306 | #define GET_USEABLE(desc) (((desc)->b >> 20) & 1) | |
307 | ||
308 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) | |
309 | return -EINVAL; | |
310 | ||
311 | desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; | |
312 | ||
313 | info.entry_number = idx; | |
314 | info.base_addr = GET_BASE(desc); | |
315 | info.limit = GET_LIMIT(desc); | |
316 | info.seg_32bit = GET_32BIT(desc); | |
317 | info.contents = GET_CONTENTS(desc); | |
318 | info.read_exec_only = !GET_WRITABLE(desc); | |
319 | info.limit_in_pages = GET_LIMIT_PAGES(desc); | |
320 | info.seg_not_present = !GET_PRESENT(desc); | |
321 | info.useable = GET_USEABLE(desc); | |
322 | ||
323 | if (copy_to_user(user_desc, &info, sizeof(info))) | |
324 | return -EFAULT; | |
325 | ||
326 | return 0; | |
327 | } | |
328 | ||
329 | /* | |
330 | * Perform set_thread_area on behalf of the traced child. | |
331 | */ | |
332 | static int | |
333 | ptrace_set_thread_area(struct task_struct *child, | |
334 | int idx, struct user_desc __user *user_desc) | |
335 | { | |
336 | struct user_desc info; | |
337 | struct desc_struct *desc; | |
338 | ||
339 | if (copy_from_user(&info, user_desc, sizeof(info))) | |
340 | return -EFAULT; | |
341 | ||
342 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) | |
343 | return -EINVAL; | |
344 | ||
345 | desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; | |
346 | if (LDT_empty(&info)) { | |
347 | desc->a = 0; | |
348 | desc->b = 0; | |
349 | } else { | |
350 | desc->a = LDT_entry_a(&info); | |
351 | desc->b = LDT_entry_b(&info); | |
352 | } | |
353 | ||
354 | return 0; | |
355 | } | |
356 | ||
357 | asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |
358 | { | |
359 | struct task_struct *child; | |
360 | struct user * dummy = NULL; | |
361 | int i, ret; | |
362 | unsigned long __user *datap = (unsigned long __user *)data; | |
363 | ||
364 | lock_kernel(); | |
365 | ret = -EPERM; | |
366 | if (request == PTRACE_TRACEME) { | |
367 | /* are we already being traced? */ | |
368 | if (current->ptrace & PT_PTRACED) | |
369 | goto out; | |
370 | ret = security_ptrace(current->parent, current); | |
371 | if (ret) | |
372 | goto out; | |
373 | /* set the ptrace bit in the process flags. */ | |
374 | current->ptrace |= PT_PTRACED; | |
375 | ret = 0; | |
376 | goto out; | |
377 | } | |
378 | ret = -ESRCH; | |
379 | read_lock(&tasklist_lock); | |
380 | child = find_task_by_pid(pid); | |
381 | if (child) | |
382 | get_task_struct(child); | |
383 | read_unlock(&tasklist_lock); | |
384 | if (!child) | |
385 | goto out; | |
386 | ||
387 | ret = -EPERM; | |
388 | if (pid == 1) /* you may not mess with init */ | |
389 | goto out_tsk; | |
390 | ||
391 | if (request == PTRACE_ATTACH) { | |
392 | ret = ptrace_attach(child); | |
393 | goto out_tsk; | |
394 | } | |
395 | ||
396 | ret = ptrace_check_attach(child, request == PTRACE_KILL); | |
397 | if (ret < 0) | |
398 | goto out_tsk; | |
399 | ||
400 | switch (request) { | |
401 | /* when I and D space are separate, these will need to be fixed. */ | |
402 | case PTRACE_PEEKTEXT: /* read word at location addr. */ | |
403 | case PTRACE_PEEKDATA: { | |
404 | unsigned long tmp; | |
405 | int copied; | |
406 | ||
407 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); | |
408 | ret = -EIO; | |
409 | if (copied != sizeof(tmp)) | |
410 | break; | |
411 | ret = put_user(tmp, datap); | |
412 | break; | |
413 | } | |
414 | ||
415 | /* read the word at location addr in the USER area. */ | |
416 | case PTRACE_PEEKUSR: { | |
417 | unsigned long tmp; | |
418 | ||
419 | ret = -EIO; | |
420 | if ((addr & 3) || addr < 0 || | |
421 | addr > sizeof(struct user) - 3) | |
422 | break; | |
423 | ||
424 | tmp = 0; /* Default return condition */ | |
425 | if(addr < FRAME_SIZE*sizeof(long)) | |
426 | tmp = getreg(child, addr); | |
427 | if(addr >= (long) &dummy->u_debugreg[0] && | |
428 | addr <= (long) &dummy->u_debugreg[7]){ | |
429 | addr -= (long) &dummy->u_debugreg[0]; | |
430 | addr = addr >> 2; | |
431 | tmp = child->thread.debugreg[addr]; | |
432 | } | |
433 | ret = put_user(tmp, datap); | |
434 | break; | |
435 | } | |
436 | ||
437 | /* when I and D space are separate, this will have to be fixed. */ | |
438 | case PTRACE_POKETEXT: /* write the word at location addr. */ | |
439 | case PTRACE_POKEDATA: | |
440 | ret = 0; | |
441 | if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data)) | |
442 | break; | |
443 | ret = -EIO; | |
444 | break; | |
445 | ||
446 | case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ | |
447 | ret = -EIO; | |
448 | if ((addr & 3) || addr < 0 || | |
449 | addr > sizeof(struct user) - 3) | |
450 | break; | |
451 | ||
452 | if (addr < FRAME_SIZE*sizeof(long)) { | |
453 | ret = putreg(child, addr, data); | |
454 | break; | |
455 | } | |
456 | /* We need to be very careful here. We implicitly | |
457 | want to modify a portion of the task_struct, and we | |
458 | have to be selective about what portions we allow someone | |
459 | to modify. */ | |
460 | ||
461 | ret = -EIO; | |
462 | if(addr >= (long) &dummy->u_debugreg[0] && | |
463 | addr <= (long) &dummy->u_debugreg[7]){ | |
464 | ||
465 | if(addr == (long) &dummy->u_debugreg[4]) break; | |
466 | if(addr == (long) &dummy->u_debugreg[5]) break; | |
467 | if(addr < (long) &dummy->u_debugreg[4] && | |
468 | ((unsigned long) data) >= TASK_SIZE-3) break; | |
469 | ||
470 | /* Sanity-check data. Take one half-byte at once with | |
471 | * check = (val >> (16 + 4*i)) & 0xf. It contains the | |
472 | * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits | |
473 | * 2 and 3 are LENi. Given a list of invalid values, | |
474 | * we do mask |= 1 << invalid_value, so that | |
475 | * (mask >> check) & 1 is a correct test for invalid | |
476 | * values. | |
477 | * | |
478 | * R/Wi contains the type of the breakpoint / | |
479 | * watchpoint, LENi contains the length of the watched | |
480 | * data in the watchpoint case. | |
481 | * | |
482 | * The invalid values are: | |
483 | * - LENi == 0x10 (undefined), so mask |= 0x0f00. | |
484 | * - R/Wi == 0x10 (break on I/O reads or writes), so | |
485 | * mask |= 0x4444. | |
486 | * - R/Wi == 0x00 && LENi != 0x00, so we have mask |= | |
487 | * 0x1110. | |
488 | * | |
489 | * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54. | |
490 | * | |
491 | * See the Intel Manual "System Programming Guide", | |
492 | * 15.2.4 | |
493 | * | |
494 | * Note that LENi == 0x10 is defined on x86_64 in long | |
495 | * mode (i.e. even for 32-bit userspace software, but | |
496 | * 64-bit kernel), so the x86_64 mask value is 0x5454. | |
497 | * See the AMD manual no. 24593 (AMD64 System | |
498 | * Programming)*/ | |
499 | ||
500 | if(addr == (long) &dummy->u_debugreg[7]) { | |
501 | data &= ~DR_CONTROL_RESERVED; | |
502 | for(i=0; i<4; i++) | |
503 | if ((0x5f54 >> ((data >> (16 + 4*i)) & 0xf)) & 1) | |
504 | goto out_tsk; | |
505 | } | |
506 | ||
507 | addr -= (long) &dummy->u_debugreg; | |
508 | addr = addr >> 2; | |
509 | child->thread.debugreg[addr] = data; | |
510 | ret = 0; | |
511 | } | |
512 | break; | |
513 | ||
ed75e8d5 | 514 | case PTRACE_SYSEMU: /* continue and stop at next syscall, which will not be executed */ |
1da177e4 LT |
515 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ |
516 | case PTRACE_CONT: /* restart after signal. */ | |
517 | ret = -EIO; | |
7ed20e1a | 518 | if (!valid_signal(data)) |
1da177e4 | 519 | break; |
ed75e8d5 LV |
520 | if (request == PTRACE_SYSEMU) { |
521 | set_tsk_thread_flag(child, TIF_SYSCALL_EMU); | |
c8c86cec BS |
522 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
523 | } else if (request == PTRACE_SYSCALL) { | |
1da177e4 | 524 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
c8c86cec | 525 | clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); |
ed75e8d5 | 526 | } else { |
c8c86cec | 527 | clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); |
1da177e4 LT |
528 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
529 | } | |
530 | child->exit_code = data; | |
531 | /* make sure the single step bit is not set. */ | |
532 | clear_singlestep(child); | |
533 | wake_up_process(child); | |
534 | ret = 0; | |
535 | break; | |
536 | ||
537 | /* | |
538 | * make the child exit. Best I can do is send it a sigkill. | |
539 | * perhaps it should be put in the status that it wants to | |
540 | * exit. | |
541 | */ | |
542 | case PTRACE_KILL: | |
543 | ret = 0; | |
544 | if (child->exit_state == EXIT_ZOMBIE) /* already dead */ | |
545 | break; | |
546 | child->exit_code = SIGKILL; | |
547 | /* make sure the single step bit is not set. */ | |
548 | clear_singlestep(child); | |
549 | wake_up_process(child); | |
550 | break; | |
551 | ||
1b38f006 | 552 | case PTRACE_SYSEMU_SINGLESTEP: /* Same as SYSEMU, but singlestep if not syscall */ |
1da177e4 LT |
553 | case PTRACE_SINGLESTEP: /* set the trap flag. */ |
554 | ret = -EIO; | |
7ed20e1a | 555 | if (!valid_signal(data)) |
1da177e4 | 556 | break; |
1b38f006 BS |
557 | |
558 | if (request == PTRACE_SYSEMU_SINGLESTEP) | |
559 | set_tsk_thread_flag(child, TIF_SYSCALL_EMU); | |
560 | else | |
561 | clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); | |
562 | ||
1da177e4 LT |
563 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
564 | set_singlestep(child); | |
565 | child->exit_code = data; | |
566 | /* give it a chance to run. */ | |
567 | wake_up_process(child); | |
568 | ret = 0; | |
569 | break; | |
570 | ||
571 | case PTRACE_DETACH: | |
572 | /* detach a process that was attached. */ | |
573 | ret = ptrace_detach(child, data); | |
574 | break; | |
575 | ||
576 | case PTRACE_GETREGS: { /* Get all gp regs from the child. */ | |
577 | if (!access_ok(VERIFY_WRITE, datap, FRAME_SIZE*sizeof(long))) { | |
578 | ret = -EIO; | |
579 | break; | |
580 | } | |
581 | for ( i = 0; i < FRAME_SIZE*sizeof(long); i += sizeof(long) ) { | |
582 | __put_user(getreg(child, i), datap); | |
583 | datap++; | |
584 | } | |
585 | ret = 0; | |
586 | break; | |
587 | } | |
588 | ||
589 | case PTRACE_SETREGS: { /* Set all gp regs in the child. */ | |
590 | unsigned long tmp; | |
591 | if (!access_ok(VERIFY_READ, datap, FRAME_SIZE*sizeof(long))) { | |
592 | ret = -EIO; | |
593 | break; | |
594 | } | |
595 | for ( i = 0; i < FRAME_SIZE*sizeof(long); i += sizeof(long) ) { | |
596 | __get_user(tmp, datap); | |
597 | putreg(child, i, tmp); | |
598 | datap++; | |
599 | } | |
600 | ret = 0; | |
601 | break; | |
602 | } | |
603 | ||
604 | case PTRACE_GETFPREGS: { /* Get the child FPU state. */ | |
605 | if (!access_ok(VERIFY_WRITE, datap, | |
606 | sizeof(struct user_i387_struct))) { | |
607 | ret = -EIO; | |
608 | break; | |
609 | } | |
610 | ret = 0; | |
611 | if (!tsk_used_math(child)) | |
612 | init_fpu(child); | |
613 | get_fpregs((struct user_i387_struct __user *)data, child); | |
614 | break; | |
615 | } | |
616 | ||
617 | case PTRACE_SETFPREGS: { /* Set the child FPU state. */ | |
618 | if (!access_ok(VERIFY_READ, datap, | |
619 | sizeof(struct user_i387_struct))) { | |
620 | ret = -EIO; | |
621 | break; | |
622 | } | |
623 | set_stopped_child_used_math(child); | |
624 | set_fpregs(child, (struct user_i387_struct __user *)data); | |
625 | ret = 0; | |
626 | break; | |
627 | } | |
628 | ||
629 | case PTRACE_GETFPXREGS: { /* Get the child extended FPU state. */ | |
630 | if (!access_ok(VERIFY_WRITE, datap, | |
631 | sizeof(struct user_fxsr_struct))) { | |
632 | ret = -EIO; | |
633 | break; | |
634 | } | |
635 | if (!tsk_used_math(child)) | |
636 | init_fpu(child); | |
637 | ret = get_fpxregs((struct user_fxsr_struct __user *)data, child); | |
638 | break; | |
639 | } | |
640 | ||
641 | case PTRACE_SETFPXREGS: { /* Set the child extended FPU state. */ | |
642 | if (!access_ok(VERIFY_READ, datap, | |
643 | sizeof(struct user_fxsr_struct))) { | |
644 | ret = -EIO; | |
645 | break; | |
646 | } | |
647 | set_stopped_child_used_math(child); | |
648 | ret = set_fpxregs(child, (struct user_fxsr_struct __user *)data); | |
649 | break; | |
650 | } | |
651 | ||
652 | case PTRACE_GET_THREAD_AREA: | |
653 | ret = ptrace_get_thread_area(child, addr, | |
654 | (struct user_desc __user *) data); | |
655 | break; | |
656 | ||
657 | case PTRACE_SET_THREAD_AREA: | |
658 | ret = ptrace_set_thread_area(child, addr, | |
659 | (struct user_desc __user *) data); | |
660 | break; | |
661 | ||
662 | default: | |
663 | ret = ptrace_request(child, request, addr, data); | |
664 | break; | |
665 | } | |
666 | out_tsk: | |
667 | put_task_struct(child); | |
668 | out: | |
669 | unlock_kernel(); | |
670 | return ret; | |
671 | } | |
672 | ||
673 | void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code) | |
674 | { | |
675 | struct siginfo info; | |
676 | ||
677 | tsk->thread.trap_no = 1; | |
678 | tsk->thread.error_code = error_code; | |
679 | ||
680 | memset(&info, 0, sizeof(info)); | |
681 | info.si_signo = SIGTRAP; | |
682 | info.si_code = TRAP_BRKPT; | |
683 | ||
684 | /* User-mode eip? */ | |
fa1e1bdf | 685 | info.si_addr = user_mode_vm(regs) ? (void __user *) regs->eip : NULL; |
1da177e4 LT |
686 | |
687 | /* Send us the fakey SIGTRAP */ | |
688 | force_sig_info(SIGTRAP, &info, tsk); | |
689 | } | |
690 | ||
691 | /* notification of system call entry/exit | |
692 | * - triggered by current->work.syscall_trace | |
693 | */ | |
694 | __attribute__((regparm(3))) | |
ed75e8d5 | 695 | int do_syscall_trace(struct pt_regs *regs, int entryexit) |
1da177e4 | 696 | { |
4c7fc722 AA |
697 | int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU); |
698 | /* | |
699 | * With TIF_SYSCALL_EMU set we want to ignore TIF_SINGLESTEP for syscall | |
700 | * interception | |
701 | */ | |
1b38f006 | 702 | int is_singlestep = !is_sysemu && test_thread_flag(TIF_SINGLESTEP); |
4c7fc722 | 703 | int ret = 0; |
1b38f006 | 704 | |
1da177e4 | 705 | /* do the secure computing check first */ |
4c7fc722 AA |
706 | if (!entryexit) |
707 | secure_computing(regs->orig_eax); | |
1da177e4 | 708 | |
ab1c23c2 BS |
709 | if (unlikely(current->audit_context)) { |
710 | if (entryexit) | |
4c7fc722 AA |
711 | audit_syscall_exit(current, AUDITSC_RESULT(regs->eax), |
712 | regs->eax); | |
ab1c23c2 BS |
713 | /* Debug traps, when using PTRACE_SINGLESTEP, must be sent only |
714 | * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is | |
715 | * not used, entry.S will call us only on syscall exit, not | |
716 | * entry; so when TIF_SYSCALL_AUDIT is used we must avoid | |
717 | * calling send_sigtrap() on syscall entry. | |
718 | * | |
719 | * Note that when PTRACE_SYSEMU_SINGLESTEP is used, | |
720 | * is_singlestep is false, despite his name, so we will still do | |
721 | * the correct thing. | |
722 | */ | |
723 | else if (is_singlestep) | |
724 | goto out; | |
725 | } | |
1da177e4 LT |
726 | |
727 | if (!(current->ptrace & PT_PTRACED)) | |
2fd6f58b | 728 | goto out; |
1da177e4 | 729 | |
1b38f006 BS |
730 | /* If a process stops on the 1st tracepoint with SYSCALL_TRACE |
731 | * and then is resumed with SYSEMU_SINGLESTEP, it will come in | |
732 | * here. We have to check this and return */ | |
733 | if (is_sysemu && entryexit) | |
734 | return 0; | |
ed75e8d5 | 735 | |
1da177e4 | 736 | /* Fake a debug trap */ |
c8c86cec | 737 | if (is_singlestep) |
1da177e4 LT |
738 | send_sigtrap(current, regs, 0); |
739 | ||
c8c86cec | 740 | if (!test_thread_flag(TIF_SYSCALL_TRACE) && !is_sysemu) |
2fd6f58b | 741 | goto out; |
1da177e4 LT |
742 | |
743 | /* the 0x80 provides a way for the tracing parent to distinguish | |
744 | between a syscall stop and SIGTRAP delivery */ | |
ed75e8d5 | 745 | /* Note that the debugger could change the result of test_thread_flag!*/ |
4c7fc722 | 746 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80:0)); |
1da177e4 LT |
747 | |
748 | /* | |
749 | * this isn't the same as continuing with a signal, but it will do | |
750 | * for normal use. strace only continues with a signal if the | |
751 | * stopping signal is not SIGTRAP. -brl | |
752 | */ | |
753 | if (current->exit_code) { | |
754 | send_sig(current->exit_code, current, 1); | |
755 | current->exit_code = 0; | |
756 | } | |
ed75e8d5 | 757 | ret = is_sysemu; |
4c7fc722 | 758 | out: |
2fd6f58b | 759 | if (unlikely(current->audit_context) && !entryexit) |
760 | audit_syscall_entry(current, AUDIT_ARCH_I386, regs->orig_eax, | |
761 | regs->ebx, regs->ecx, regs->edx, regs->esi); | |
c8c86cec BS |
762 | if (ret == 0) |
763 | return 0; | |
764 | ||
1b38f006 | 765 | regs->orig_eax = -1; /* force skip of syscall restarting */ |
c8c86cec | 766 | if (unlikely(current->audit_context)) |
4c7fc722 AA |
767 | audit_syscall_exit(current, AUDITSC_RESULT(regs->eax), |
768 | regs->eax); | |
c8c86cec | 769 | return 1; |
1da177e4 | 770 | } |