Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Kernel support for the ptrace() and syscall tracing interfaces. | |
3 | * | |
4 | * Copyright (C) 1999-2005 Hewlett-Packard Co | |
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
6 | * | |
7 | * Derived from the x86 and Alpha versions. | |
8 | */ | |
9 | #include <linux/config.h> | |
10 | #include <linux/kernel.h> | |
11 | #include <linux/sched.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/mm.h> | |
14 | #include <linux/errno.h> | |
15 | #include <linux/ptrace.h> | |
16 | #include <linux/smp_lock.h> | |
17 | #include <linux/user.h> | |
18 | #include <linux/security.h> | |
19 | #include <linux/audit.h> | |
7ed20e1a | 20 | #include <linux/signal.h> |
1da177e4 LT |
21 | |
22 | #include <asm/pgtable.h> | |
23 | #include <asm/processor.h> | |
24 | #include <asm/ptrace_offsets.h> | |
25 | #include <asm/rse.h> | |
26 | #include <asm/system.h> | |
27 | #include <asm/uaccess.h> | |
28 | #include <asm/unwind.h> | |
29 | #ifdef CONFIG_PERFMON | |
30 | #include <asm/perfmon.h> | |
31 | #endif | |
32 | ||
33 | #include "entry.h" | |
34 | ||
35 | /* | |
36 | * Bits in the PSR that we allow ptrace() to change: | |
37 | * be, up, ac, mfl, mfh (the user mask; five bits total) | |
38 | * db (debug breakpoint fault; one bit) | |
39 | * id (instruction debug fault disable; one bit) | |
40 | * dd (data debug fault disable; one bit) | |
41 | * ri (restart instruction; two bits) | |
42 | * is (instruction set; one bit) | |
43 | */ | |
44 | #define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \ | |
45 | | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI) | |
46 | ||
47 | #define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */ | |
48 | #define PFM_MASK MASK(38) | |
49 | ||
50 | #define PTRACE_DEBUG 0 | |
51 | ||
52 | #if PTRACE_DEBUG | |
53 | # define dprintk(format...) printk(format) | |
54 | # define inline | |
55 | #else | |
56 | # define dprintk(format...) | |
57 | #endif | |
58 | ||
59 | /* Return TRUE if PT was created due to kernel-entry via a system-call. */ | |
60 | ||
61 | static inline int | |
62 | in_syscall (struct pt_regs *pt) | |
63 | { | |
64 | return (long) pt->cr_ifs >= 0; | |
65 | } | |
66 | ||
67 | /* | |
68 | * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT | |
69 | * bitset where bit i is set iff the NaT bit of register i is set. | |
70 | */ | |
71 | unsigned long | |
72 | ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat) | |
73 | { | |
74 | # define GET_BITS(first, last, unat) \ | |
75 | ({ \ | |
76 | unsigned long bit = ia64_unat_pos(&pt->r##first); \ | |
77 | unsigned long nbits = (last - first + 1); \ | |
78 | unsigned long mask = MASK(nbits) << first; \ | |
79 | unsigned long dist; \ | |
80 | if (bit < first) \ | |
81 | dist = 64 + bit - first; \ | |
82 | else \ | |
83 | dist = bit - first; \ | |
84 | ia64_rotr(unat, dist) & mask; \ | |
85 | }) | |
86 | unsigned long val; | |
87 | ||
88 | /* | |
89 | * Registers that are stored consecutively in struct pt_regs | |
90 | * can be handled in parallel. If the register order in | |
91 | * struct_pt_regs changes, this code MUST be updated. | |
92 | */ | |
93 | val = GET_BITS( 1, 1, scratch_unat); | |
94 | val |= GET_BITS( 2, 3, scratch_unat); | |
95 | val |= GET_BITS(12, 13, scratch_unat); | |
96 | val |= GET_BITS(14, 14, scratch_unat); | |
97 | val |= GET_BITS(15, 15, scratch_unat); | |
98 | val |= GET_BITS( 8, 11, scratch_unat); | |
99 | val |= GET_BITS(16, 31, scratch_unat); | |
100 | return val; | |
101 | ||
102 | # undef GET_BITS | |
103 | } | |
104 | ||
105 | /* | |
106 | * Set the NaT bits for the scratch registers according to NAT and | |
107 | * return the resulting unat (assuming the scratch registers are | |
108 | * stored in PT). | |
109 | */ | |
110 | unsigned long | |
111 | ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat) | |
112 | { | |
113 | # define PUT_BITS(first, last, nat) \ | |
114 | ({ \ | |
115 | unsigned long bit = ia64_unat_pos(&pt->r##first); \ | |
116 | unsigned long nbits = (last - first + 1); \ | |
117 | unsigned long mask = MASK(nbits) << first; \ | |
118 | long dist; \ | |
119 | if (bit < first) \ | |
120 | dist = 64 + bit - first; \ | |
121 | else \ | |
122 | dist = bit - first; \ | |
123 | ia64_rotl(nat & mask, dist); \ | |
124 | }) | |
125 | unsigned long scratch_unat; | |
126 | ||
127 | /* | |
128 | * Registers that are stored consecutively in struct pt_regs | |
129 | * can be handled in parallel. If the register order in | |
130 | * struct_pt_regs changes, this code MUST be updated. | |
131 | */ | |
132 | scratch_unat = PUT_BITS( 1, 1, nat); | |
133 | scratch_unat |= PUT_BITS( 2, 3, nat); | |
134 | scratch_unat |= PUT_BITS(12, 13, nat); | |
135 | scratch_unat |= PUT_BITS(14, 14, nat); | |
136 | scratch_unat |= PUT_BITS(15, 15, nat); | |
137 | scratch_unat |= PUT_BITS( 8, 11, nat); | |
138 | scratch_unat |= PUT_BITS(16, 31, nat); | |
139 | ||
140 | return scratch_unat; | |
141 | ||
142 | # undef PUT_BITS | |
143 | } | |
144 | ||
145 | #define IA64_MLX_TEMPLATE 0x2 | |
146 | #define IA64_MOVL_OPCODE 6 | |
147 | ||
148 | void | |
149 | ia64_increment_ip (struct pt_regs *regs) | |
150 | { | |
151 | unsigned long w0, ri = ia64_psr(regs)->ri + 1; | |
152 | ||
153 | if (ri > 2) { | |
154 | ri = 0; | |
155 | regs->cr_iip += 16; | |
156 | } else if (ri == 2) { | |
157 | get_user(w0, (char __user *) regs->cr_iip + 0); | |
158 | if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { | |
159 | /* | |
160 | * rfi'ing to slot 2 of an MLX bundle causes | |
161 | * an illegal operation fault. We don't want | |
162 | * that to happen... | |
163 | */ | |
164 | ri = 0; | |
165 | regs->cr_iip += 16; | |
166 | } | |
167 | } | |
168 | ia64_psr(regs)->ri = ri; | |
169 | } | |
170 | ||
171 | void | |
172 | ia64_decrement_ip (struct pt_regs *regs) | |
173 | { | |
174 | unsigned long w0, ri = ia64_psr(regs)->ri - 1; | |
175 | ||
176 | if (ia64_psr(regs)->ri == 0) { | |
177 | regs->cr_iip -= 16; | |
178 | ri = 2; | |
179 | get_user(w0, (char __user *) regs->cr_iip + 0); | |
180 | if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { | |
181 | /* | |
182 | * rfi'ing to slot 2 of an MLX bundle causes | |
183 | * an illegal operation fault. We don't want | |
184 | * that to happen... | |
185 | */ | |
186 | ri = 1; | |
187 | } | |
188 | } | |
189 | ia64_psr(regs)->ri = ri; | |
190 | } | |
191 | ||
192 | /* | |
193 | * This routine is used to read an rnat bits that are stored on the | |
194 | * kernel backing store. Since, in general, the alignment of the user | |
195 | * and kernel are different, this is not completely trivial. In | |
196 | * essence, we need to construct the user RNAT based on up to two | |
197 | * kernel RNAT values and/or the RNAT value saved in the child's | |
198 | * pt_regs. | |
199 | * | |
200 | * user rbs | |
201 | * | |
202 | * +--------+ <-- lowest address | |
203 | * | slot62 | | |
204 | * +--------+ | |
205 | * | rnat | 0x....1f8 | |
206 | * +--------+ | |
207 | * | slot00 | \ | |
208 | * +--------+ | | |
209 | * | slot01 | > child_regs->ar_rnat | |
210 | * +--------+ | | |
211 | * | slot02 | / kernel rbs | |
212 | * +--------+ +--------+ | |
213 | * <- child_regs->ar_bspstore | slot61 | <-- krbs | |
214 | * +- - - - + +--------+ | |
215 | * | slot62 | | |
216 | * +- - - - + +--------+ | |
217 | * | rnat | | |
218 | * +- - - - + +--------+ | |
219 | * vrnat | slot00 | | |
220 | * +- - - - + +--------+ | |
221 | * = = | |
222 | * +--------+ | |
223 | * | slot00 | \ | |
224 | * +--------+ | | |
225 | * | slot01 | > child_stack->ar_rnat | |
226 | * +--------+ | | |
227 | * | slot02 | / | |
228 | * +--------+ | |
229 | * <--- child_stack->ar_bspstore | |
230 | * | |
231 | * The way to think of this code is as follows: bit 0 in the user rnat | |
232 | * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat | |
233 | * value. The kernel rnat value holding this bit is stored in | |
234 | * variable rnat0. rnat1 is loaded with the kernel rnat value that | |
235 | * form the upper bits of the user rnat value. | |
236 | * | |
237 | * Boundary cases: | |
238 | * | |
239 | * o when reading the rnat "below" the first rnat slot on the kernel | |
240 | * backing store, rnat0/rnat1 are set to 0 and the low order bits are | |
241 | * merged in from pt->ar_rnat. | |
242 | * | |
243 | * o when reading the rnat "above" the last rnat slot on the kernel | |
244 | * backing store, rnat0/rnat1 gets its value from sw->ar_rnat. | |
245 | */ | |
246 | static unsigned long | |
247 | get_rnat (struct task_struct *task, struct switch_stack *sw, | |
248 | unsigned long *krbs, unsigned long *urnat_addr, | |
249 | unsigned long *urbs_end) | |
250 | { | |
251 | unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr; | |
252 | unsigned long umask = 0, mask, m; | |
253 | unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; | |
254 | long num_regs, nbits; | |
255 | struct pt_regs *pt; | |
256 | ||
257 | pt = ia64_task_regs(task); | |
258 | kbsp = (unsigned long *) sw->ar_bspstore; | |
259 | ubspstore = (unsigned long *) pt->ar_bspstore; | |
260 | ||
261 | if (urbs_end < urnat_addr) | |
262 | nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end); | |
263 | else | |
264 | nbits = 63; | |
265 | mask = MASK(nbits); | |
266 | /* | |
267 | * First, figure out which bit number slot 0 in user-land maps | |
268 | * to in the kernel rnat. Do this by figuring out how many | |
269 | * register slots we're beyond the user's backingstore and | |
270 | * then computing the equivalent address in kernel space. | |
271 | */ | |
272 | num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); | |
273 | slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); | |
274 | shift = ia64_rse_slot_num(slot0_kaddr); | |
275 | rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr); | |
276 | rnat0_kaddr = rnat1_kaddr - 64; | |
277 | ||
278 | if (ubspstore + 63 > urnat_addr) { | |
279 | /* some bits need to be merged in from pt->ar_rnat */ | |
280 | umask = MASK(ia64_rse_slot_num(ubspstore)) & mask; | |
281 | urnat = (pt->ar_rnat & umask); | |
282 | mask &= ~umask; | |
283 | if (!mask) | |
284 | return urnat; | |
285 | } | |
286 | ||
287 | m = mask << shift; | |
288 | if (rnat0_kaddr >= kbsp) | |
289 | rnat0 = sw->ar_rnat; | |
290 | else if (rnat0_kaddr > krbs) | |
291 | rnat0 = *rnat0_kaddr; | |
292 | urnat |= (rnat0 & m) >> shift; | |
293 | ||
294 | m = mask >> (63 - shift); | |
295 | if (rnat1_kaddr >= kbsp) | |
296 | rnat1 = sw->ar_rnat; | |
297 | else if (rnat1_kaddr > krbs) | |
298 | rnat1 = *rnat1_kaddr; | |
299 | urnat |= (rnat1 & m) << (63 - shift); | |
300 | return urnat; | |
301 | } | |
302 | ||
303 | /* | |
304 | * The reverse of get_rnat. | |
305 | */ | |
306 | static void | |
307 | put_rnat (struct task_struct *task, struct switch_stack *sw, | |
308 | unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat, | |
309 | unsigned long *urbs_end) | |
310 | { | |
311 | unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m; | |
312 | unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; | |
313 | long num_regs, nbits; | |
314 | struct pt_regs *pt; | |
315 | unsigned long cfm, *urbs_kargs; | |
316 | ||
317 | pt = ia64_task_regs(task); | |
318 | kbsp = (unsigned long *) sw->ar_bspstore; | |
319 | ubspstore = (unsigned long *) pt->ar_bspstore; | |
320 | ||
321 | urbs_kargs = urbs_end; | |
322 | if (in_syscall(pt)) { | |
323 | /* | |
324 | * If entered via syscall, don't allow user to set rnat bits | |
325 | * for syscall args. | |
326 | */ | |
327 | cfm = pt->cr_ifs; | |
328 | urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f)); | |
329 | } | |
330 | ||
331 | if (urbs_kargs >= urnat_addr) | |
332 | nbits = 63; | |
333 | else { | |
334 | if ((urnat_addr - 63) >= urbs_kargs) | |
335 | return; | |
336 | nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs); | |
337 | } | |
338 | mask = MASK(nbits); | |
339 | ||
340 | /* | |
341 | * First, figure out which bit number slot 0 in user-land maps | |
342 | * to in the kernel rnat. Do this by figuring out how many | |
343 | * register slots we're beyond the user's backingstore and | |
344 | * then computing the equivalent address in kernel space. | |
345 | */ | |
346 | num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); | |
347 | slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); | |
348 | shift = ia64_rse_slot_num(slot0_kaddr); | |
349 | rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr); | |
350 | rnat0_kaddr = rnat1_kaddr - 64; | |
351 | ||
352 | if (ubspstore + 63 > urnat_addr) { | |
353 | /* some bits need to be place in pt->ar_rnat: */ | |
354 | umask = MASK(ia64_rse_slot_num(ubspstore)) & mask; | |
355 | pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask); | |
356 | mask &= ~umask; | |
357 | if (!mask) | |
358 | return; | |
359 | } | |
360 | /* | |
361 | * Note: Section 11.1 of the EAS guarantees that bit 63 of an | |
362 | * rnat slot is ignored. so we don't have to clear it here. | |
363 | */ | |
364 | rnat0 = (urnat << shift); | |
365 | m = mask << shift; | |
366 | if (rnat0_kaddr >= kbsp) | |
367 | sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m); | |
368 | else if (rnat0_kaddr > krbs) | |
369 | *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m)); | |
370 | ||
371 | rnat1 = (urnat >> (63 - shift)); | |
372 | m = mask >> (63 - shift); | |
373 | if (rnat1_kaddr >= kbsp) | |
374 | sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m); | |
375 | else if (rnat1_kaddr > krbs) | |
376 | *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m)); | |
377 | } | |
378 | ||
379 | static inline int | |
380 | on_kernel_rbs (unsigned long addr, unsigned long bspstore, | |
381 | unsigned long urbs_end) | |
382 | { | |
383 | unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *) | |
384 | urbs_end); | |
385 | return (addr >= bspstore && addr <= (unsigned long) rnat_addr); | |
386 | } | |
387 | ||
388 | /* | |
389 | * Read a word from the user-level backing store of task CHILD. ADDR | |
390 | * is the user-level address to read the word from, VAL a pointer to | |
391 | * the return value, and USER_BSP gives the end of the user-level | |
392 | * backing store (i.e., it's the address that would be in ar.bsp after | |
393 | * the user executed a "cover" instruction). | |
394 | * | |
395 | * This routine takes care of accessing the kernel register backing | |
396 | * store for those registers that got spilled there. It also takes | |
397 | * care of calculating the appropriate RNaT collection words. | |
398 | */ | |
399 | long | |
400 | ia64_peek (struct task_struct *child, struct switch_stack *child_stack, | |
401 | unsigned long user_rbs_end, unsigned long addr, long *val) | |
402 | { | |
403 | unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr; | |
404 | struct pt_regs *child_regs; | |
405 | size_t copied; | |
406 | long ret; | |
407 | ||
408 | urbs_end = (long *) user_rbs_end; | |
409 | laddr = (unsigned long *) addr; | |
410 | child_regs = ia64_task_regs(child); | |
411 | bspstore = (unsigned long *) child_regs->ar_bspstore; | |
412 | krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; | |
413 | if (on_kernel_rbs(addr, (unsigned long) bspstore, | |
414 | (unsigned long) urbs_end)) | |
415 | { | |
416 | /* | |
417 | * Attempt to read the RBS in an area that's actually | |
418 | * on the kernel RBS => read the corresponding bits in | |
419 | * the kernel RBS. | |
420 | */ | |
421 | rnat_addr = ia64_rse_rnat_addr(laddr); | |
422 | ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end); | |
423 | ||
424 | if (laddr == rnat_addr) { | |
425 | /* return NaT collection word itself */ | |
426 | *val = ret; | |
427 | return 0; | |
428 | } | |
429 | ||
430 | if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) { | |
431 | /* | |
432 | * It is implementation dependent whether the | |
433 | * data portion of a NaT value gets saved on a | |
434 | * st8.spill or RSE spill (e.g., see EAS 2.6, | |
435 | * 4.4.4.6 Register Spill and Fill). To get | |
436 | * consistent behavior across all possible | |
437 | * IA-64 implementations, we return zero in | |
438 | * this case. | |
439 | */ | |
440 | *val = 0; | |
441 | return 0; | |
442 | } | |
443 | ||
444 | if (laddr < urbs_end) { | |
445 | /* | |
446 | * The desired word is on the kernel RBS and | |
447 | * is not a NaT. | |
448 | */ | |
449 | regnum = ia64_rse_num_regs(bspstore, laddr); | |
450 | *val = *ia64_rse_skip_regs(krbs, regnum); | |
451 | return 0; | |
452 | } | |
453 | } | |
454 | copied = access_process_vm(child, addr, &ret, sizeof(ret), 0); | |
455 | if (copied != sizeof(ret)) | |
456 | return -EIO; | |
457 | *val = ret; | |
458 | return 0; | |
459 | } | |
460 | ||
461 | long | |
462 | ia64_poke (struct task_struct *child, struct switch_stack *child_stack, | |
463 | unsigned long user_rbs_end, unsigned long addr, long val) | |
464 | { | |
465 | unsigned long *bspstore, *krbs, regnum, *laddr; | |
466 | unsigned long *urbs_end = (long *) user_rbs_end; | |
467 | struct pt_regs *child_regs; | |
468 | ||
469 | laddr = (unsigned long *) addr; | |
470 | child_regs = ia64_task_regs(child); | |
471 | bspstore = (unsigned long *) child_regs->ar_bspstore; | |
472 | krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; | |
473 | if (on_kernel_rbs(addr, (unsigned long) bspstore, | |
474 | (unsigned long) urbs_end)) | |
475 | { | |
476 | /* | |
477 | * Attempt to write the RBS in an area that's actually | |
478 | * on the kernel RBS => write the corresponding bits | |
479 | * in the kernel RBS. | |
480 | */ | |
481 | if (ia64_rse_is_rnat_slot(laddr)) | |
482 | put_rnat(child, child_stack, krbs, laddr, val, | |
483 | urbs_end); | |
484 | else { | |
485 | if (laddr < urbs_end) { | |
486 | regnum = ia64_rse_num_regs(bspstore, laddr); | |
487 | *ia64_rse_skip_regs(krbs, regnum) = val; | |
488 | } | |
489 | } | |
490 | } else if (access_process_vm(child, addr, &val, sizeof(val), 1) | |
491 | != sizeof(val)) | |
492 | return -EIO; | |
493 | return 0; | |
494 | } | |
495 | ||
496 | /* | |
497 | * Calculate the address of the end of the user-level register backing | |
498 | * store. This is the address that would have been stored in ar.bsp | |
499 | * if the user had executed a "cover" instruction right before | |
500 | * entering the kernel. If CFMP is not NULL, it is used to return the | |
501 | * "current frame mask" that was active at the time the kernel was | |
502 | * entered. | |
503 | */ | |
504 | unsigned long | |
505 | ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt, | |
506 | unsigned long *cfmp) | |
507 | { | |
508 | unsigned long *krbs, *bspstore, cfm = pt->cr_ifs; | |
509 | long ndirty; | |
510 | ||
511 | krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; | |
512 | bspstore = (unsigned long *) pt->ar_bspstore; | |
513 | ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); | |
514 | ||
515 | if (in_syscall(pt)) | |
516 | ndirty += (cfm & 0x7f); | |
517 | else | |
518 | cfm &= ~(1UL << 63); /* clear valid bit */ | |
519 | ||
520 | if (cfmp) | |
521 | *cfmp = cfm; | |
522 | return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty); | |
523 | } | |
524 | ||
525 | /* | |
526 | * Synchronize (i.e, write) the RSE backing store living in kernel | |
527 | * space to the VM of the CHILD task. SW and PT are the pointers to | |
528 | * the switch_stack and pt_regs structures, respectively. | |
529 | * USER_RBS_END is the user-level address at which the backing store | |
530 | * ends. | |
531 | */ | |
532 | long | |
533 | ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw, | |
534 | unsigned long user_rbs_start, unsigned long user_rbs_end) | |
535 | { | |
536 | unsigned long addr, val; | |
537 | long ret; | |
538 | ||
539 | /* now copy word for word from kernel rbs to user rbs: */ | |
540 | for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { | |
541 | ret = ia64_peek(child, sw, user_rbs_end, addr, &val); | |
542 | if (ret < 0) | |
543 | return ret; | |
544 | if (access_process_vm(child, addr, &val, sizeof(val), 1) | |
545 | != sizeof(val)) | |
546 | return -EIO; | |
547 | } | |
548 | return 0; | |
549 | } | |
550 | ||
551 | static inline int | |
552 | thread_matches (struct task_struct *thread, unsigned long addr) | |
553 | { | |
554 | unsigned long thread_rbs_end; | |
555 | struct pt_regs *thread_regs; | |
556 | ||
557 | if (ptrace_check_attach(thread, 0) < 0) | |
558 | /* | |
559 | * If the thread is not in an attachable state, we'll | |
560 | * ignore it. The net effect is that if ADDR happens | |
561 | * to overlap with the portion of the thread's | |
562 | * register backing store that is currently residing | |
563 | * on the thread's kernel stack, then ptrace() may end | |
564 | * up accessing a stale value. But if the thread | |
565 | * isn't stopped, that's a problem anyhow, so we're | |
566 | * doing as well as we can... | |
567 | */ | |
568 | return 0; | |
569 | ||
570 | thread_regs = ia64_task_regs(thread); | |
571 | thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL); | |
572 | if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end)) | |
573 | return 0; | |
574 | ||
575 | return 1; /* looks like we've got a winner */ | |
576 | } | |
577 | ||
578 | /* | |
579 | * GDB apparently wants to be able to read the register-backing store | |
580 | * of any thread when attached to a given process. If we are peeking | |
581 | * or poking an address that happens to reside in the kernel-backing | |
582 | * store of another thread, we need to attach to that thread, because | |
583 | * otherwise we end up accessing stale data. | |
584 | * | |
585 | * task_list_lock must be read-locked before calling this routine! | |
586 | */ | |
587 | static struct task_struct * | |
588 | find_thread_for_addr (struct task_struct *child, unsigned long addr) | |
589 | { | |
590 | struct task_struct *g, *p; | |
591 | struct mm_struct *mm; | |
4ac0068f | 592 | struct list_head *this, *next; |
1da177e4 LT |
593 | int mm_users; |
594 | ||
595 | if (!(mm = get_task_mm(child))) | |
596 | return child; | |
597 | ||
598 | /* -1 because of our get_task_mm(): */ | |
599 | mm_users = atomic_read(&mm->mm_users) - 1; | |
600 | if (mm_users <= 1) | |
601 | goto out; /* not multi-threaded */ | |
602 | ||
603 | /* | |
4ac0068f CW |
604 | * Traverse the current process' children list. Every task that |
605 | * one attaches to becomes a child. And it is only attached children | |
606 | * of the debugger that are of interest (ptrace_check_attach checks | |
607 | * for this). | |
1da177e4 | 608 | */ |
4ac0068f CW |
609 | list_for_each_safe(this, next, ¤t->children) { |
610 | p = list_entry(this, struct task_struct, sibling); | |
611 | if (p->mm != mm) | |
1da177e4 | 612 | continue; |
1da177e4 LT |
613 | if (thread_matches(p, addr)) { |
614 | child = p; | |
615 | goto out; | |
616 | } | |
4ac0068f CW |
617 | } |
618 | ||
1da177e4 LT |
619 | out: |
620 | mmput(mm); | |
621 | return child; | |
622 | } | |
623 | ||
624 | /* | |
625 | * Write f32-f127 back to task->thread.fph if it has been modified. | |
626 | */ | |
627 | inline void | |
628 | ia64_flush_fph (struct task_struct *task) | |
629 | { | |
630 | struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); | |
631 | ||
05062d96 PC |
632 | /* |
633 | * Prevent migrating this task while | |
634 | * we're fiddling with the FPU state | |
635 | */ | |
636 | preempt_disable(); | |
1da177e4 LT |
637 | if (ia64_is_local_fpu_owner(task) && psr->mfh) { |
638 | psr->mfh = 0; | |
639 | task->thread.flags |= IA64_THREAD_FPH_VALID; | |
640 | ia64_save_fpu(&task->thread.fph[0]); | |
641 | } | |
05062d96 | 642 | preempt_enable(); |
1da177e4 LT |
643 | } |
644 | ||
645 | /* | |
646 | * Sync the fph state of the task so that it can be manipulated | |
647 | * through thread.fph. If necessary, f32-f127 are written back to | |
648 | * thread.fph or, if the fph state hasn't been used before, thread.fph | |
649 | * is cleared to zeroes. Also, access to f32-f127 is disabled to | |
650 | * ensure that the task picks up the state from thread.fph when it | |
651 | * executes again. | |
652 | */ | |
653 | void | |
654 | ia64_sync_fph (struct task_struct *task) | |
655 | { | |
656 | struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); | |
657 | ||
658 | ia64_flush_fph(task); | |
659 | if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) { | |
660 | task->thread.flags |= IA64_THREAD_FPH_VALID; | |
661 | memset(&task->thread.fph, 0, sizeof(task->thread.fph)); | |
662 | } | |
663 | ia64_drop_fpu(task); | |
664 | psr->dfh = 1; | |
665 | } | |
666 | ||
667 | static int | |
668 | access_fr (struct unw_frame_info *info, int regnum, int hi, | |
669 | unsigned long *data, int write_access) | |
670 | { | |
671 | struct ia64_fpreg fpval; | |
672 | int ret; | |
673 | ||
674 | ret = unw_get_fr(info, regnum, &fpval); | |
675 | if (ret < 0) | |
676 | return ret; | |
677 | ||
678 | if (write_access) { | |
679 | fpval.u.bits[hi] = *data; | |
680 | ret = unw_set_fr(info, regnum, fpval); | |
681 | } else | |
682 | *data = fpval.u.bits[hi]; | |
683 | return ret; | |
684 | } | |
685 | ||
686 | /* | |
687 | * Change the machine-state of CHILD such that it will return via the normal | |
688 | * kernel exit-path, rather than the syscall-exit path. | |
689 | */ | |
690 | static void | |
691 | convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt, | |
692 | unsigned long cfm) | |
693 | { | |
694 | struct unw_frame_info info, prev_info; | |
02a017a9 | 695 | unsigned long ip, sp, pr; |
1da177e4 LT |
696 | |
697 | unw_init_from_blocked_task(&info, child); | |
698 | while (1) { | |
699 | prev_info = info; | |
700 | if (unw_unwind(&info) < 0) | |
701 | return; | |
02a017a9 DMT |
702 | |
703 | unw_get_sp(&info, &sp); | |
704 | if ((long)((unsigned long)child + IA64_STK_OFFSET - sp) | |
705 | < IA64_PT_REGS_SIZE) { | |
706 | dprintk("ptrace.%s: ran off the top of the kernel " | |
707 | "stack\n", __FUNCTION__); | |
708 | return; | |
709 | } | |
710 | if (unw_get_pr (&prev_info, &pr) < 0) { | |
711 | unw_get_rp(&prev_info, &ip); | |
712 | dprintk("ptrace.%s: failed to read " | |
713 | "predicate register (ip=0x%lx)\n", | |
714 | __FUNCTION__, ip); | |
1da177e4 | 715 | return; |
02a017a9 DMT |
716 | } |
717 | if (unw_is_intr_frame(&info) | |
718 | && (pr & (1UL << PRED_USER_STACK))) | |
1da177e4 LT |
719 | break; |
720 | } | |
721 | ||
7f9eaedf DMT |
722 | /* |
723 | * Note: at the time of this call, the target task is blocked | |
724 | * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL | |
725 | * (aka, "pLvSys") we redirect execution from | |
726 | * .work_pending_syscall_end to .work_processed_kernel. | |
727 | */ | |
1da177e4 | 728 | unw_get_pr(&prev_info, &pr); |
7f9eaedf | 729 | pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL)); |
1da177e4 LT |
730 | pr |= (1UL << PRED_NON_SYSCALL); |
731 | unw_set_pr(&prev_info, pr); | |
732 | ||
733 | pt->cr_ifs = (1UL << 63) | cfm; | |
7f9eaedf DMT |
734 | /* |
735 | * Clear the memory that is NOT written on syscall-entry to | |
736 | * ensure we do not leak kernel-state to user when execution | |
737 | * resumes. | |
738 | */ | |
739 | pt->r2 = 0; | |
740 | pt->r3 = 0; | |
741 | pt->r14 = 0; | |
742 | memset(&pt->r16, 0, 16*8); /* clear r16-r31 */ | |
743 | memset(&pt->f6, 0, 6*16); /* clear f6-f11 */ | |
744 | pt->b7 = 0; | |
745 | pt->ar_ccv = 0; | |
746 | pt->ar_csd = 0; | |
747 | pt->ar_ssd = 0; | |
1da177e4 LT |
748 | } |
749 | ||
750 | static int | |
751 | access_nat_bits (struct task_struct *child, struct pt_regs *pt, | |
752 | struct unw_frame_info *info, | |
753 | unsigned long *data, int write_access) | |
754 | { | |
755 | unsigned long regnum, nat_bits, scratch_unat, dummy = 0; | |
756 | char nat = 0; | |
757 | ||
758 | if (write_access) { | |
759 | nat_bits = *data; | |
760 | scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits); | |
761 | if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) { | |
762 | dprintk("ptrace: failed to set ar.unat\n"); | |
763 | return -1; | |
764 | } | |
765 | for (regnum = 4; regnum <= 7; ++regnum) { | |
766 | unw_get_gr(info, regnum, &dummy, &nat); | |
767 | unw_set_gr(info, regnum, dummy, | |
768 | (nat_bits >> regnum) & 1); | |
769 | } | |
770 | } else { | |
771 | if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) { | |
772 | dprintk("ptrace: failed to read ar.unat\n"); | |
773 | return -1; | |
774 | } | |
775 | nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat); | |
776 | for (regnum = 4; regnum <= 7; ++regnum) { | |
777 | unw_get_gr(info, regnum, &dummy, &nat); | |
778 | nat_bits |= (nat != 0) << regnum; | |
779 | } | |
780 | *data = nat_bits; | |
781 | } | |
782 | return 0; | |
783 | } | |
784 | ||
785 | static int | |
786 | access_uarea (struct task_struct *child, unsigned long addr, | |
787 | unsigned long *data, int write_access) | |
788 | { | |
789 | unsigned long *ptr, regnum, urbs_end, rnat_addr, cfm; | |
790 | struct switch_stack *sw; | |
791 | struct pt_regs *pt; | |
792 | # define pt_reg_addr(pt, reg) ((void *) \ | |
793 | ((unsigned long) (pt) \ | |
794 | + offsetof(struct pt_regs, reg))) | |
795 | ||
796 | ||
797 | pt = ia64_task_regs(child); | |
798 | sw = (struct switch_stack *) (child->thread.ksp + 16); | |
799 | ||
800 | if ((addr & 0x7) != 0) { | |
801 | dprintk("ptrace: unaligned register address 0x%lx\n", addr); | |
802 | return -1; | |
803 | } | |
804 | ||
805 | if (addr < PT_F127 + 16) { | |
806 | /* accessing fph */ | |
807 | if (write_access) | |
808 | ia64_sync_fph(child); | |
809 | else | |
810 | ia64_flush_fph(child); | |
811 | ptr = (unsigned long *) | |
812 | ((unsigned long) &child->thread.fph + addr); | |
813 | } else if ((addr >= PT_F10) && (addr < PT_F11 + 16)) { | |
814 | /* scratch registers untouched by kernel (saved in pt_regs) */ | |
815 | ptr = pt_reg_addr(pt, f10) + (addr - PT_F10); | |
816 | } else if (addr >= PT_F12 && addr < PT_F15 + 16) { | |
817 | /* | |
818 | * Scratch registers untouched by kernel (saved in | |
819 | * switch_stack). | |
820 | */ | |
821 | ptr = (unsigned long *) ((long) sw | |
822 | + (addr - PT_NAT_BITS - 32)); | |
823 | } else if (addr < PT_AR_LC + 8) { | |
824 | /* preserved state: */ | |
825 | struct unw_frame_info info; | |
826 | char nat = 0; | |
827 | int ret; | |
828 | ||
829 | unw_init_from_blocked_task(&info, child); | |
830 | if (unw_unwind_to_user(&info) < 0) | |
831 | return -1; | |
832 | ||
833 | switch (addr) { | |
834 | case PT_NAT_BITS: | |
835 | return access_nat_bits(child, pt, &info, | |
836 | data, write_access); | |
837 | ||
838 | case PT_R4: case PT_R5: case PT_R6: case PT_R7: | |
839 | if (write_access) { | |
840 | /* read NaT bit first: */ | |
841 | unsigned long dummy; | |
842 | ||
843 | ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, | |
844 | &dummy, &nat); | |
845 | if (ret < 0) | |
846 | return ret; | |
847 | } | |
848 | return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data, | |
849 | &nat, write_access); | |
850 | ||
851 | case PT_B1: case PT_B2: case PT_B3: | |
852 | case PT_B4: case PT_B5: | |
853 | return unw_access_br(&info, (addr - PT_B1)/8 + 1, data, | |
854 | write_access); | |
855 | ||
856 | case PT_AR_EC: | |
857 | return unw_access_ar(&info, UNW_AR_EC, data, | |
858 | write_access); | |
859 | ||
860 | case PT_AR_LC: | |
861 | return unw_access_ar(&info, UNW_AR_LC, data, | |
862 | write_access); | |
863 | ||
864 | default: | |
865 | if (addr >= PT_F2 && addr < PT_F5 + 16) | |
866 | return access_fr(&info, (addr - PT_F2)/16 + 2, | |
867 | (addr & 8) != 0, data, | |
868 | write_access); | |
869 | else if (addr >= PT_F16 && addr < PT_F31 + 16) | |
870 | return access_fr(&info, | |
871 | (addr - PT_F16)/16 + 16, | |
872 | (addr & 8) != 0, | |
873 | data, write_access); | |
874 | else { | |
875 | dprintk("ptrace: rejecting access to register " | |
876 | "address 0x%lx\n", addr); | |
877 | return -1; | |
878 | } | |
879 | } | |
880 | } else if (addr < PT_F9+16) { | |
881 | /* scratch state */ | |
882 | switch (addr) { | |
883 | case PT_AR_BSP: | |
884 | /* | |
885 | * By convention, we use PT_AR_BSP to refer to | |
886 | * the end of the user-level backing store. | |
887 | * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) | |
888 | * to get the real value of ar.bsp at the time | |
889 | * the kernel was entered. | |
890 | * | |
891 | * Furthermore, when changing the contents of | |
892 | * PT_AR_BSP (or PT_CFM) we MUST copy any | |
893 | * users-level stacked registers that are | |
894 | * stored on the kernel stack back to | |
895 | * user-space because otherwise, we might end | |
896 | * up clobbering kernel stacked registers. | |
897 | * Also, if this happens while the task is | |
898 | * blocked in a system call, which convert the | |
899 | * state such that the non-system-call exit | |
900 | * path is used. This ensures that the proper | |
901 | * state will be picked up when resuming | |
902 | * execution. However, it *also* means that | |
903 | * once we write PT_AR_BSP/PT_CFM, it won't be | |
904 | * possible to modify the syscall arguments of | |
905 | * the pending system call any longer. This | |
906 | * shouldn't be an issue because modifying | |
907 | * PT_AR_BSP/PT_CFM generally implies that | |
908 | * we're either abandoning the pending system | |
909 | * call or that we defer it's re-execution | |
910 | * (e.g., due to GDB doing an inferior | |
911 | * function call). | |
912 | */ | |
913 | urbs_end = ia64_get_user_rbs_end(child, pt, &cfm); | |
914 | if (write_access) { | |
915 | if (*data != urbs_end) { | |
916 | if (ia64_sync_user_rbs(child, sw, | |
917 | pt->ar_bspstore, | |
918 | urbs_end) < 0) | |
919 | return -1; | |
920 | if (in_syscall(pt)) | |
921 | convert_to_non_syscall(child, | |
922 | pt, | |
923 | cfm); | |
924 | /* | |
925 | * Simulate user-level write | |
926 | * of ar.bsp: | |
927 | */ | |
928 | pt->loadrs = 0; | |
929 | pt->ar_bspstore = *data; | |
930 | } | |
931 | } else | |
932 | *data = urbs_end; | |
933 | return 0; | |
934 | ||
935 | case PT_CFM: | |
936 | urbs_end = ia64_get_user_rbs_end(child, pt, &cfm); | |
937 | if (write_access) { | |
938 | if (((cfm ^ *data) & PFM_MASK) != 0) { | |
939 | if (ia64_sync_user_rbs(child, sw, | |
940 | pt->ar_bspstore, | |
941 | urbs_end) < 0) | |
942 | return -1; | |
943 | if (in_syscall(pt)) | |
944 | convert_to_non_syscall(child, | |
945 | pt, | |
946 | cfm); | |
947 | pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK) | |
948 | | (*data & PFM_MASK)); | |
949 | } | |
950 | } else | |
951 | *data = cfm; | |
952 | return 0; | |
953 | ||
954 | case PT_CR_IPSR: | |
955 | if (write_access) | |
956 | pt->cr_ipsr = ((*data & IPSR_MASK) | |
957 | | (pt->cr_ipsr & ~IPSR_MASK)); | |
958 | else | |
959 | *data = (pt->cr_ipsr & IPSR_MASK); | |
960 | return 0; | |
961 | ||
4ea78729 MC |
962 | case PT_AR_RSC: |
963 | if (write_access) | |
964 | pt->ar_rsc = *data | (3 << 2); /* force PL3 */ | |
965 | else | |
966 | *data = pt->ar_rsc; | |
967 | return 0; | |
968 | ||
1da177e4 LT |
969 | case PT_AR_RNAT: |
970 | urbs_end = ia64_get_user_rbs_end(child, pt, NULL); | |
971 | rnat_addr = (long) ia64_rse_rnat_addr((long *) | |
972 | urbs_end); | |
973 | if (write_access) | |
974 | return ia64_poke(child, sw, urbs_end, | |
975 | rnat_addr, *data); | |
976 | else | |
977 | return ia64_peek(child, sw, urbs_end, | |
978 | rnat_addr, data); | |
979 | ||
980 | case PT_R1: | |
981 | ptr = pt_reg_addr(pt, r1); | |
982 | break; | |
983 | case PT_R2: case PT_R3: | |
984 | ptr = pt_reg_addr(pt, r2) + (addr - PT_R2); | |
985 | break; | |
986 | case PT_R8: case PT_R9: case PT_R10: case PT_R11: | |
987 | ptr = pt_reg_addr(pt, r8) + (addr - PT_R8); | |
988 | break; | |
989 | case PT_R12: case PT_R13: | |
990 | ptr = pt_reg_addr(pt, r12) + (addr - PT_R12); | |
991 | break; | |
992 | case PT_R14: | |
993 | ptr = pt_reg_addr(pt, r14); | |
994 | break; | |
995 | case PT_R15: | |
996 | ptr = pt_reg_addr(pt, r15); | |
997 | break; | |
998 | case PT_R16: case PT_R17: case PT_R18: case PT_R19: | |
999 | case PT_R20: case PT_R21: case PT_R22: case PT_R23: | |
1000 | case PT_R24: case PT_R25: case PT_R26: case PT_R27: | |
1001 | case PT_R28: case PT_R29: case PT_R30: case PT_R31: | |
1002 | ptr = pt_reg_addr(pt, r16) + (addr - PT_R16); | |
1003 | break; | |
1004 | case PT_B0: | |
1005 | ptr = pt_reg_addr(pt, b0); | |
1006 | break; | |
1007 | case PT_B6: | |
1008 | ptr = pt_reg_addr(pt, b6); | |
1009 | break; | |
1010 | case PT_B7: | |
1011 | ptr = pt_reg_addr(pt, b7); | |
1012 | break; | |
1013 | case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8: | |
1014 | case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8: | |
1015 | ptr = pt_reg_addr(pt, f6) + (addr - PT_F6); | |
1016 | break; | |
1017 | case PT_AR_BSPSTORE: | |
1018 | ptr = pt_reg_addr(pt, ar_bspstore); | |
1019 | break; | |
1da177e4 LT |
1020 | case PT_AR_UNAT: |
1021 | ptr = pt_reg_addr(pt, ar_unat); | |
1022 | break; | |
1023 | case PT_AR_PFS: | |
1024 | ptr = pt_reg_addr(pt, ar_pfs); | |
1025 | break; | |
1026 | case PT_AR_CCV: | |
1027 | ptr = pt_reg_addr(pt, ar_ccv); | |
1028 | break; | |
1029 | case PT_AR_FPSR: | |
1030 | ptr = pt_reg_addr(pt, ar_fpsr); | |
1031 | break; | |
1032 | case PT_CR_IIP: | |
1033 | ptr = pt_reg_addr(pt, cr_iip); | |
1034 | break; | |
1035 | case PT_PR: | |
1036 | ptr = pt_reg_addr(pt, pr); | |
1037 | break; | |
1038 | /* scratch register */ | |
1039 | ||
1040 | default: | |
1041 | /* disallow accessing anything else... */ | |
1042 | dprintk("ptrace: rejecting access to register " | |
1043 | "address 0x%lx\n", addr); | |
1044 | return -1; | |
1045 | } | |
1046 | } else if (addr <= PT_AR_SSD) { | |
1047 | ptr = pt_reg_addr(pt, ar_csd) + (addr - PT_AR_CSD); | |
1048 | } else { | |
1049 | /* access debug registers */ | |
1050 | ||
1051 | if (addr >= PT_IBR) { | |
1052 | regnum = (addr - PT_IBR) >> 3; | |
1053 | ptr = &child->thread.ibr[0]; | |
1054 | } else { | |
1055 | regnum = (addr - PT_DBR) >> 3; | |
1056 | ptr = &child->thread.dbr[0]; | |
1057 | } | |
1058 | ||
1059 | if (regnum >= 8) { | |
1060 | dprintk("ptrace: rejecting access to register " | |
1061 | "address 0x%lx\n", addr); | |
1062 | return -1; | |
1063 | } | |
1064 | #ifdef CONFIG_PERFMON | |
1065 | /* | |
1066 | * Check if debug registers are used by perfmon. This | |
1067 | * test must be done once we know that we can do the | |
1068 | * operation, i.e. the arguments are all valid, but | |
1069 | * before we start modifying the state. | |
1070 | * | |
1071 | * Perfmon needs to keep a count of how many processes | |
1072 | * are trying to modify the debug registers for system | |
1073 | * wide monitoring sessions. | |
1074 | * | |
1075 | * We also include read access here, because they may | |
1076 | * cause the PMU-installed debug register state | |
1077 | * (dbr[], ibr[]) to be reset. The two arrays are also | |
1078 | * used by perfmon, but we do not use | |
1079 | * IA64_THREAD_DBG_VALID. The registers are restored | |
1080 | * by the PMU context switch code. | |
1081 | */ | |
1082 | if (pfm_use_debug_registers(child)) return -1; | |
1083 | #endif | |
1084 | ||
1085 | if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { | |
1086 | child->thread.flags |= IA64_THREAD_DBG_VALID; | |
1087 | memset(child->thread.dbr, 0, | |
1088 | sizeof(child->thread.dbr)); | |
1089 | memset(child->thread.ibr, 0, | |
1090 | sizeof(child->thread.ibr)); | |
1091 | } | |
1092 | ||
1093 | ptr += regnum; | |
1094 | ||
1095 | if ((regnum & 1) && write_access) { | |
1096 | /* don't let the user set kernel-level breakpoints: */ | |
1097 | *ptr = *data & ~(7UL << 56); | |
1098 | return 0; | |
1099 | } | |
1100 | } | |
1101 | if (write_access) | |
1102 | *ptr = *data; | |
1103 | else | |
1104 | *data = *ptr; | |
1105 | return 0; | |
1106 | } | |
1107 | ||
1108 | static long | |
1109 | ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) | |
1110 | { | |
1111 | unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val; | |
1112 | struct unw_frame_info info; | |
1113 | struct ia64_fpreg fpval; | |
1114 | struct switch_stack *sw; | |
1115 | struct pt_regs *pt; | |
1116 | long ret, retval = 0; | |
1117 | char nat = 0; | |
1118 | int i; | |
1119 | ||
1120 | if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs))) | |
1121 | return -EIO; | |
1122 | ||
1123 | pt = ia64_task_regs(child); | |
1124 | sw = (struct switch_stack *) (child->thread.ksp + 16); | |
1125 | unw_init_from_blocked_task(&info, child); | |
1126 | if (unw_unwind_to_user(&info) < 0) { | |
1127 | return -EIO; | |
1128 | } | |
1129 | ||
1130 | if (((unsigned long) ppr & 0x7) != 0) { | |
1131 | dprintk("ptrace:unaligned register address %p\n", ppr); | |
1132 | return -EIO; | |
1133 | } | |
1134 | ||
1135 | if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0 | |
1136 | || access_uarea(child, PT_AR_EC, &ec, 0) < 0 | |
1137 | || access_uarea(child, PT_AR_LC, &lc, 0) < 0 | |
1138 | || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0 | |
1139 | || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0 | |
1140 | || access_uarea(child, PT_CFM, &cfm, 0) | |
1141 | || access_uarea(child, PT_NAT_BITS, &nat_bits, 0)) | |
1142 | return -EIO; | |
1143 | ||
1144 | /* control regs */ | |
1145 | ||
1146 | retval |= __put_user(pt->cr_iip, &ppr->cr_iip); | |
1147 | retval |= __put_user(psr, &ppr->cr_ipsr); | |
1148 | ||
1149 | /* app regs */ | |
1150 | ||
1151 | retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); | |
1152 | retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]); | |
1153 | retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); | |
1154 | retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); | |
1155 | retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); | |
1156 | retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); | |
1157 | ||
1158 | retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]); | |
1159 | retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]); | |
1160 | retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]); | |
1161 | retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]); | |
1162 | retval |= __put_user(cfm, &ppr->cfm); | |
1163 | ||
1164 | /* gr1-gr3 */ | |
1165 | ||
1166 | retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long)); | |
1167 | retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2); | |
1168 | ||
1169 | /* gr4-gr7 */ | |
1170 | ||
1171 | for (i = 4; i < 8; i++) { | |
1172 | if (unw_access_gr(&info, i, &val, &nat, 0) < 0) | |
1173 | return -EIO; | |
1174 | retval |= __put_user(val, &ppr->gr[i]); | |
1175 | } | |
1176 | ||
1177 | /* gr8-gr11 */ | |
1178 | ||
1179 | retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4); | |
1180 | ||
1181 | /* gr12-gr15 */ | |
1182 | ||
1183 | retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2); | |
1184 | retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long)); | |
1185 | retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long)); | |
1186 | ||
1187 | /* gr16-gr31 */ | |
1188 | ||
1189 | retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16); | |
1190 | ||
1191 | /* b0 */ | |
1192 | ||
1193 | retval |= __put_user(pt->b0, &ppr->br[0]); | |
1194 | ||
1195 | /* b1-b5 */ | |
1196 | ||
1197 | for (i = 1; i < 6; i++) { | |
1198 | if (unw_access_br(&info, i, &val, 0) < 0) | |
1199 | return -EIO; | |
1200 | __put_user(val, &ppr->br[i]); | |
1201 | } | |
1202 | ||
1203 | /* b6-b7 */ | |
1204 | ||
1205 | retval |= __put_user(pt->b6, &ppr->br[6]); | |
1206 | retval |= __put_user(pt->b7, &ppr->br[7]); | |
1207 | ||
1208 | /* fr2-fr5 */ | |
1209 | ||
1210 | for (i = 2; i < 6; i++) { | |
1211 | if (unw_get_fr(&info, i, &fpval) < 0) | |
1212 | return -EIO; | |
1213 | retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval)); | |
1214 | } | |
1215 | ||
1216 | /* fr6-fr11 */ | |
1217 | ||
1218 | retval |= __copy_to_user(&ppr->fr[6], &pt->f6, | |
1219 | sizeof(struct ia64_fpreg) * 6); | |
1220 | ||
1221 | /* fp scratch regs(12-15) */ | |
1222 | ||
1223 | retval |= __copy_to_user(&ppr->fr[12], &sw->f12, | |
1224 | sizeof(struct ia64_fpreg) * 4); | |
1225 | ||
1226 | /* fr16-fr31 */ | |
1227 | ||
1228 | for (i = 16; i < 32; i++) { | |
1229 | if (unw_get_fr(&info, i, &fpval) < 0) | |
1230 | return -EIO; | |
1231 | retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval)); | |
1232 | } | |
1233 | ||
1234 | /* fph */ | |
1235 | ||
1236 | ia64_flush_fph(child); | |
1237 | retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph, | |
1238 | sizeof(ppr->fr[32]) * 96); | |
1239 | ||
1240 | /* preds */ | |
1241 | ||
1242 | retval |= __put_user(pt->pr, &ppr->pr); | |
1243 | ||
1244 | /* nat bits */ | |
1245 | ||
1246 | retval |= __put_user(nat_bits, &ppr->nat); | |
1247 | ||
1248 | ret = retval ? -EIO : 0; | |
1249 | return ret; | |
1250 | } | |
1251 | ||
1252 | static long | |
1253 | ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) | |
1254 | { | |
4ea78729 | 1255 | unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0; |
1da177e4 LT |
1256 | struct unw_frame_info info; |
1257 | struct switch_stack *sw; | |
1258 | struct ia64_fpreg fpval; | |
1259 | struct pt_regs *pt; | |
1260 | long ret, retval = 0; | |
1261 | int i; | |
1262 | ||
1263 | memset(&fpval, 0, sizeof(fpval)); | |
1264 | ||
1265 | if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs))) | |
1266 | return -EIO; | |
1267 | ||
1268 | pt = ia64_task_regs(child); | |
1269 | sw = (struct switch_stack *) (child->thread.ksp + 16); | |
1270 | unw_init_from_blocked_task(&info, child); | |
1271 | if (unw_unwind_to_user(&info) < 0) { | |
1272 | return -EIO; | |
1273 | } | |
1274 | ||
1275 | if (((unsigned long) ppr & 0x7) != 0) { | |
1276 | dprintk("ptrace:unaligned register address %p\n", ppr); | |
1277 | return -EIO; | |
1278 | } | |
1279 | ||
1280 | /* control regs */ | |
1281 | ||
1282 | retval |= __get_user(pt->cr_iip, &ppr->cr_iip); | |
1283 | retval |= __get_user(psr, &ppr->cr_ipsr); | |
1284 | ||
1285 | /* app regs */ | |
1286 | ||
1287 | retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); | |
4ea78729 | 1288 | retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]); |
1da177e4 LT |
1289 | retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); |
1290 | retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); | |
1291 | retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); | |
1292 | retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); | |
1293 | ||
1294 | retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]); | |
1295 | retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]); | |
1296 | retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]); | |
1297 | retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]); | |
1298 | retval |= __get_user(cfm, &ppr->cfm); | |
1299 | ||
1300 | /* gr1-gr3 */ | |
1301 | ||
1302 | retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long)); | |
1303 | retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2); | |
1304 | ||
1305 | /* gr4-gr7 */ | |
1306 | ||
1307 | for (i = 4; i < 8; i++) { | |
1308 | retval |= __get_user(val, &ppr->gr[i]); | |
1309 | /* NaT bit will be set via PT_NAT_BITS: */ | |
1310 | if (unw_set_gr(&info, i, val, 0) < 0) | |
1311 | return -EIO; | |
1312 | } | |
1313 | ||
1314 | /* gr8-gr11 */ | |
1315 | ||
1316 | retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4); | |
1317 | ||
1318 | /* gr12-gr15 */ | |
1319 | ||
1320 | retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2); | |
1321 | retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long)); | |
1322 | retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long)); | |
1323 | ||
1324 | /* gr16-gr31 */ | |
1325 | ||
1326 | retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16); | |
1327 | ||
1328 | /* b0 */ | |
1329 | ||
1330 | retval |= __get_user(pt->b0, &ppr->br[0]); | |
1331 | ||
1332 | /* b1-b5 */ | |
1333 | ||
1334 | for (i = 1; i < 6; i++) { | |
1335 | retval |= __get_user(val, &ppr->br[i]); | |
1336 | unw_set_br(&info, i, val); | |
1337 | } | |
1338 | ||
1339 | /* b6-b7 */ | |
1340 | ||
1341 | retval |= __get_user(pt->b6, &ppr->br[6]); | |
1342 | retval |= __get_user(pt->b7, &ppr->br[7]); | |
1343 | ||
1344 | /* fr2-fr5 */ | |
1345 | ||
1346 | for (i = 2; i < 6; i++) { | |
1347 | retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval)); | |
1348 | if (unw_set_fr(&info, i, fpval) < 0) | |
1349 | return -EIO; | |
1350 | } | |
1351 | ||
1352 | /* fr6-fr11 */ | |
1353 | ||
1354 | retval |= __copy_from_user(&pt->f6, &ppr->fr[6], | |
1355 | sizeof(ppr->fr[6]) * 6); | |
1356 | ||
1357 | /* fp scratch regs(12-15) */ | |
1358 | ||
1359 | retval |= __copy_from_user(&sw->f12, &ppr->fr[12], | |
1360 | sizeof(ppr->fr[12]) * 4); | |
1361 | ||
1362 | /* fr16-fr31 */ | |
1363 | ||
1364 | for (i = 16; i < 32; i++) { | |
1365 | retval |= __copy_from_user(&fpval, &ppr->fr[i], | |
1366 | sizeof(fpval)); | |
1367 | if (unw_set_fr(&info, i, fpval) < 0) | |
1368 | return -EIO; | |
1369 | } | |
1370 | ||
1371 | /* fph */ | |
1372 | ||
1373 | ia64_sync_fph(child); | |
1374 | retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32], | |
1375 | sizeof(ppr->fr[32]) * 96); | |
1376 | ||
1377 | /* preds */ | |
1378 | ||
1379 | retval |= __get_user(pt->pr, &ppr->pr); | |
1380 | ||
1381 | /* nat bits */ | |
1382 | ||
1383 | retval |= __get_user(nat_bits, &ppr->nat); | |
1384 | ||
1385 | retval |= access_uarea(child, PT_CR_IPSR, &psr, 1); | |
4ea78729 | 1386 | retval |= access_uarea(child, PT_AR_RSC, &rsc, 1); |
1da177e4 LT |
1387 | retval |= access_uarea(child, PT_AR_EC, &ec, 1); |
1388 | retval |= access_uarea(child, PT_AR_LC, &lc, 1); | |
1389 | retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1); | |
1390 | retval |= access_uarea(child, PT_AR_BSP, &bsp, 1); | |
1391 | retval |= access_uarea(child, PT_CFM, &cfm, 1); | |
1392 | retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1); | |
1393 | ||
1394 | ret = retval ? -EIO : 0; | |
1395 | return ret; | |
1396 | } | |
1397 | ||
1398 | /* | |
1399 | * Called by kernel/ptrace.c when detaching.. | |
1400 | * | |
1401 | * Make sure the single step bit is not set. | |
1402 | */ | |
1403 | void | |
1404 | ptrace_disable (struct task_struct *child) | |
1405 | { | |
1406 | struct ia64_psr *child_psr = ia64_psr(ia64_task_regs(child)); | |
1407 | ||
1408 | /* make sure the single step/taken-branch trap bits are not set: */ | |
1409 | child_psr->ss = 0; | |
1410 | child_psr->tb = 0; | |
1411 | } | |
1412 | ||
1413 | asmlinkage long | |
1414 | sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data) | |
1415 | { | |
1416 | struct pt_regs *pt; | |
1417 | unsigned long urbs_end, peek_or_poke; | |
1418 | struct task_struct *child; | |
1419 | struct switch_stack *sw; | |
1420 | long ret; | |
1421 | ||
1422 | lock_kernel(); | |
1423 | ret = -EPERM; | |
1424 | if (request == PTRACE_TRACEME) { | |
1425 | /* are we already being traced? */ | |
1426 | if (current->ptrace & PT_PTRACED) | |
1427 | goto out; | |
1428 | ret = security_ptrace(current->parent, current); | |
1429 | if (ret) | |
1430 | goto out; | |
1431 | current->ptrace |= PT_PTRACED; | |
1432 | ret = 0; | |
1433 | goto out; | |
1434 | } | |
1435 | ||
1436 | peek_or_poke = (request == PTRACE_PEEKTEXT | |
1437 | || request == PTRACE_PEEKDATA | |
1438 | || request == PTRACE_POKETEXT | |
1439 | || request == PTRACE_POKEDATA); | |
1440 | ret = -ESRCH; | |
1441 | read_lock(&tasklist_lock); | |
1442 | { | |
1443 | child = find_task_by_pid(pid); | |
1444 | if (child) { | |
1445 | if (peek_or_poke) | |
1446 | child = find_thread_for_addr(child, addr); | |
1447 | get_task_struct(child); | |
1448 | } | |
1449 | } | |
1450 | read_unlock(&tasklist_lock); | |
1451 | if (!child) | |
1452 | goto out; | |
1453 | ret = -EPERM; | |
1454 | if (pid == 1) /* no messing around with init! */ | |
1455 | goto out_tsk; | |
1456 | ||
1457 | if (request == PTRACE_ATTACH) { | |
1458 | ret = ptrace_attach(child); | |
1459 | goto out_tsk; | |
1460 | } | |
1461 | ||
1462 | ret = ptrace_check_attach(child, request == PTRACE_KILL); | |
1463 | if (ret < 0) | |
1464 | goto out_tsk; | |
1465 | ||
1466 | pt = ia64_task_regs(child); | |
1467 | sw = (struct switch_stack *) (child->thread.ksp + 16); | |
1468 | ||
1469 | switch (request) { | |
1470 | case PTRACE_PEEKTEXT: | |
1471 | case PTRACE_PEEKDATA: | |
1472 | /* read word at location addr */ | |
1473 | urbs_end = ia64_get_user_rbs_end(child, pt, NULL); | |
1474 | ret = ia64_peek(child, sw, urbs_end, addr, &data); | |
1475 | if (ret == 0) { | |
1476 | ret = data; | |
1477 | /* ensure "ret" is not mistaken as an error code: */ | |
1478 | force_successful_syscall_return(); | |
1479 | } | |
1480 | goto out_tsk; | |
1481 | ||
1482 | case PTRACE_POKETEXT: | |
1483 | case PTRACE_POKEDATA: | |
1484 | /* write the word at location addr */ | |
1485 | urbs_end = ia64_get_user_rbs_end(child, pt, NULL); | |
1486 | ret = ia64_poke(child, sw, urbs_end, addr, data); | |
1487 | goto out_tsk; | |
1488 | ||
1489 | case PTRACE_PEEKUSR: | |
1490 | /* read the word at addr in the USER area */ | |
1491 | if (access_uarea(child, addr, &data, 0) < 0) { | |
1492 | ret = -EIO; | |
1493 | goto out_tsk; | |
1494 | } | |
1495 | ret = data; | |
1496 | /* ensure "ret" is not mistaken as an error code */ | |
1497 | force_successful_syscall_return(); | |
1498 | goto out_tsk; | |
1499 | ||
1500 | case PTRACE_POKEUSR: | |
1501 | /* write the word at addr in the USER area */ | |
1502 | if (access_uarea(child, addr, &data, 1) < 0) { | |
1503 | ret = -EIO; | |
1504 | goto out_tsk; | |
1505 | } | |
1506 | ret = 0; | |
1507 | goto out_tsk; | |
1508 | ||
1509 | case PTRACE_OLD_GETSIGINFO: | |
1510 | /* for backwards-compatibility */ | |
1511 | ret = ptrace_request(child, PTRACE_GETSIGINFO, addr, data); | |
1512 | goto out_tsk; | |
1513 | ||
1514 | case PTRACE_OLD_SETSIGINFO: | |
1515 | /* for backwards-compatibility */ | |
1516 | ret = ptrace_request(child, PTRACE_SETSIGINFO, addr, data); | |
1517 | goto out_tsk; | |
1518 | ||
1519 | case PTRACE_SYSCALL: | |
1520 | /* continue and stop at next (return from) syscall */ | |
1521 | case PTRACE_CONT: | |
1522 | /* restart after signal. */ | |
1523 | ret = -EIO; | |
7ed20e1a | 1524 | if (!valid_signal(data)) |
1da177e4 LT |
1525 | goto out_tsk; |
1526 | if (request == PTRACE_SYSCALL) | |
1527 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | |
1528 | else | |
1529 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | |
1530 | child->exit_code = data; | |
1531 | ||
1532 | /* | |
1533 | * Make sure the single step/taken-branch trap bits | |
1534 | * are not set: | |
1535 | */ | |
1536 | ia64_psr(pt)->ss = 0; | |
1537 | ia64_psr(pt)->tb = 0; | |
1538 | ||
1539 | wake_up_process(child); | |
1540 | ret = 0; | |
1541 | goto out_tsk; | |
1542 | ||
1543 | case PTRACE_KILL: | |
1544 | /* | |
1545 | * Make the child exit. Best I can do is send it a | |
1546 | * sigkill. Perhaps it should be put in the status | |
1547 | * that it wants to exit. | |
1548 | */ | |
1549 | if (child->exit_state == EXIT_ZOMBIE) | |
1550 | /* already dead */ | |
1551 | goto out_tsk; | |
1552 | child->exit_code = SIGKILL; | |
1553 | ||
1554 | ptrace_disable(child); | |
1555 | wake_up_process(child); | |
1556 | ret = 0; | |
1557 | goto out_tsk; | |
1558 | ||
1559 | case PTRACE_SINGLESTEP: | |
1560 | /* let child execute for one instruction */ | |
1561 | case PTRACE_SINGLEBLOCK: | |
1562 | ret = -EIO; | |
7ed20e1a | 1563 | if (!valid_signal(data)) |
1da177e4 LT |
1564 | goto out_tsk; |
1565 | ||
1566 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | |
1567 | if (request == PTRACE_SINGLESTEP) { | |
1568 | ia64_psr(pt)->ss = 1; | |
1569 | } else { | |
1570 | ia64_psr(pt)->tb = 1; | |
1571 | } | |
1572 | child->exit_code = data; | |
1573 | ||
1574 | /* give it a chance to run. */ | |
1575 | wake_up_process(child); | |
1576 | ret = 0; | |
1577 | goto out_tsk; | |
1578 | ||
1579 | case PTRACE_DETACH: | |
1580 | /* detach a process that was attached. */ | |
1581 | ret = ptrace_detach(child, data); | |
1582 | goto out_tsk; | |
1583 | ||
1584 | case PTRACE_GETREGS: | |
1585 | ret = ptrace_getregs(child, | |
1586 | (struct pt_all_user_regs __user *) data); | |
1587 | goto out_tsk; | |
1588 | ||
1589 | case PTRACE_SETREGS: | |
1590 | ret = ptrace_setregs(child, | |
1591 | (struct pt_all_user_regs __user *) data); | |
1592 | goto out_tsk; | |
1593 | ||
1594 | default: | |
1595 | ret = ptrace_request(child, request, addr, data); | |
1596 | goto out_tsk; | |
1597 | } | |
1598 | out_tsk: | |
1599 | put_task_struct(child); | |
1600 | out: | |
1601 | unlock_kernel(); | |
1602 | return ret; | |
1603 | } | |
1604 | ||
1605 | ||
1606 | void | |
1607 | syscall_trace (void) | |
1608 | { | |
1609 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | |
1610 | return; | |
1611 | if (!(current->ptrace & PT_PTRACED)) | |
1612 | return; | |
1613 | /* | |
1614 | * The 0x80 provides a way for the tracing parent to | |
1615 | * distinguish between a syscall stop and SIGTRAP delivery. | |
1616 | */ | |
1617 | ptrace_notify(SIGTRAP | |
1618 | | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); | |
1619 | ||
1620 | /* | |
1621 | * This isn't the same as continuing with a signal, but it | |
1622 | * will do for normal use. strace only continues with a | |
1623 | * signal if the stopping signal is not SIGTRAP. -brl | |
1624 | */ | |
1625 | if (current->exit_code) { | |
1626 | send_sig(current->exit_code, current, 1); | |
1627 | current->exit_code = 0; | |
1628 | } | |
1629 | } | |
1630 | ||
1631 | /* "asmlinkage" so the input arguments are preserved... */ | |
1632 | ||
1633 | asmlinkage void | |
1634 | syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, | |
1635 | long arg4, long arg5, long arg6, long arg7, | |
1636 | struct pt_regs regs) | |
1637 | { | |
2fd6f58b | 1638 | if (test_thread_flag(TIF_SYSCALL_TRACE) |
1639 | && (current->ptrace & PT_PTRACED)) | |
1640 | syscall_trace(); | |
1da177e4 LT |
1641 | |
1642 | if (unlikely(current->audit_context)) { | |
2fd6f58b | 1643 | long syscall; |
1644 | int arch; | |
1645 | ||
1646 | if (IS_IA32_PROCESS(®s)) { | |
1da177e4 | 1647 | syscall = regs.r1; |
2fd6f58b | 1648 | arch = AUDIT_ARCH_I386; |
1649 | } else { | |
1da177e4 | 1650 | syscall = regs.r15; |
2fd6f58b | 1651 | arch = AUDIT_ARCH_IA64; |
1652 | } | |
1da177e4 | 1653 | |
2fd6f58b | 1654 | audit_syscall_entry(current, arch, syscall, arg0, arg1, arg2, arg3); |
1da177e4 LT |
1655 | } |
1656 | ||
1da177e4 LT |
1657 | } |
1658 | ||
1659 | /* "asmlinkage" so the input arguments are preserved... */ | |
1660 | ||
1661 | asmlinkage void | |
1662 | syscall_trace_leave (long arg0, long arg1, long arg2, long arg3, | |
1663 | long arg4, long arg5, long arg6, long arg7, | |
1664 | struct pt_regs regs) | |
1665 | { | |
1666 | if (unlikely(current->audit_context)) | |
2fd6f58b | 1667 | audit_syscall_exit(current, AUDITSC_RESULT(regs.r10), regs.r8); |
1da177e4 LT |
1668 | |
1669 | if (test_thread_flag(TIF_SYSCALL_TRACE) | |
1670 | && (current->ptrace & PT_PTRACED)) | |
1671 | syscall_trace(); | |
1672 | } |