Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * A code-rewriter that enables instruction single-stepping. | |
15 | * Derived from iLib's single-stepping code. | |
16 | */ | |
17 | ||
233325b9 | 18 | #ifndef __tilegx__ /* Hardware support for single step unavailable. */ |
867e359b CM |
19 | |
20 | /* These functions are only used on the TILE platform */ | |
21 | #include <linux/slab.h> | |
22 | #include <linux/thread_info.h> | |
23 | #include <linux/uaccess.h> | |
24 | #include <linux/mman.h> | |
25 | #include <linux/types.h> | |
0707ad30 | 26 | #include <linux/err.h> |
867e359b | 27 | #include <asm/cacheflush.h> |
bd119c69 | 28 | #include <asm/unaligned.h> |
867e359b | 29 | #include <arch/abi.h> |
eb7c792d | 30 | #include <arch/opcode.h> |
867e359b CM |
31 | |
32 | #define signExtend17(val) sign_extend((val), 17) | |
33 | #define TILE_X1_MASK (0xffffffffULL << 31) | |
34 | ||
35 | int unaligned_printk; | |
36 | ||
37 | static int __init setup_unaligned_printk(char *str) | |
38 | { | |
39 | long val; | |
40 | if (strict_strtol(str, 0, &val) != 0) | |
41 | return 0; | |
42 | unaligned_printk = val; | |
0707ad30 CM |
43 | pr_info("Printk for each unaligned data accesses is %s\n", |
44 | unaligned_printk ? "enabled" : "disabled"); | |
867e359b CM |
45 | return 1; |
46 | } | |
47 | __setup("unaligned_printk=", setup_unaligned_printk); | |
48 | ||
49 | unsigned int unaligned_fixup_count; | |
50 | ||
51 | enum mem_op { | |
52 | MEMOP_NONE, | |
53 | MEMOP_LOAD, | |
54 | MEMOP_STORE, | |
55 | MEMOP_LOAD_POSTINCR, | |
56 | MEMOP_STORE_POSTINCR | |
57 | }; | |
58 | ||
04f7a3f1 | 59 | static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, s32 offset) |
867e359b CM |
60 | { |
61 | tile_bundle_bits result; | |
62 | ||
63 | /* mask out the old offset */ | |
64 | tile_bundle_bits mask = create_BrOff_X1(-1); | |
65 | result = n & (~mask); | |
66 | ||
67 | /* or in the new offset */ | |
68 | result |= create_BrOff_X1(offset); | |
69 | ||
70 | return result; | |
71 | } | |
72 | ||
73 | static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src) | |
74 | { | |
75 | tile_bundle_bits result; | |
76 | tile_bundle_bits op; | |
77 | ||
78 | result = n & (~TILE_X1_MASK); | |
79 | ||
80 | op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) | | |
81 | create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) | | |
82 | create_Dest_X1(dest) | | |
83 | create_SrcB_X1(TREG_ZERO) | | |
84 | create_SrcA_X1(src) ; | |
85 | ||
86 | result |= op; | |
87 | return result; | |
88 | } | |
89 | ||
90 | static inline tile_bundle_bits nop_X1(tile_bundle_bits n) | |
91 | { | |
92 | return move_X1(n, TREG_ZERO, TREG_ZERO); | |
93 | } | |
94 | ||
95 | static inline tile_bundle_bits addi_X1( | |
96 | tile_bundle_bits n, int dest, int src, int imm) | |
97 | { | |
98 | n &= ~TILE_X1_MASK; | |
99 | ||
100 | n |= (create_SrcA_X1(src) | | |
101 | create_Dest_X1(dest) | | |
102 | create_Imm8_X1(imm) | | |
103 | create_S_X1(0) | | |
104 | create_Opcode_X1(IMM_0_OPCODE_X1) | | |
105 | create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1)); | |
106 | ||
107 | return n; | |
108 | } | |
109 | ||
110 | static tile_bundle_bits rewrite_load_store_unaligned( | |
111 | struct single_step_state *state, | |
112 | tile_bundle_bits bundle, | |
113 | struct pt_regs *regs, | |
114 | enum mem_op mem_op, | |
115 | int size, int sign_ext) | |
116 | { | |
0707ad30 | 117 | unsigned char __user *addr; |
867e359b CM |
118 | int val_reg, addr_reg, err, val; |
119 | ||
120 | /* Get address and value registers */ | |
eb7c792d | 121 | if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) { |
867e359b CM |
122 | addr_reg = get_SrcA_Y2(bundle); |
123 | val_reg = get_SrcBDest_Y2(bundle); | |
124 | } else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) { | |
125 | addr_reg = get_SrcA_X1(bundle); | |
126 | val_reg = get_Dest_X1(bundle); | |
127 | } else { | |
128 | addr_reg = get_SrcA_X1(bundle); | |
129 | val_reg = get_SrcB_X1(bundle); | |
130 | } | |
131 | ||
132 | /* | |
133 | * If registers are not GPRs, don't try to handle it. | |
134 | * | |
135 | * FIXME: we could handle non-GPR loads by getting the real value | |
136 | * from memory, writing it to the single step buffer, using a | |
137 | * temp_reg to hold a pointer to that memory, then executing that | |
138 | * instruction and resetting temp_reg. For non-GPR stores, it's a | |
139 | * little trickier; we could use the single step buffer for that | |
140 | * too, but we'd have to add some more state bits so that we could | |
141 | * call back in here to copy that value to the real target. For | |
142 | * now, we just handle the simple case. | |
143 | */ | |
144 | if ((val_reg >= PTREGS_NR_GPRS && | |
145 | (val_reg != TREG_ZERO || | |
146 | mem_op == MEMOP_LOAD || | |
147 | mem_op == MEMOP_LOAD_POSTINCR)) || | |
148 | addr_reg >= PTREGS_NR_GPRS) | |
149 | return bundle; | |
150 | ||
151 | /* If it's aligned, don't handle it specially */ | |
0707ad30 | 152 | addr = (void __user *)regs->regs[addr_reg]; |
867e359b CM |
153 | if (((unsigned long)addr % size) == 0) |
154 | return bundle; | |
155 | ||
cdd8e16f CM |
156 | /* |
157 | * Return SIGBUS with the unaligned address, if requested. | |
158 | * Note that we return SIGBUS even for completely invalid addresses | |
159 | * as long as they are in fact unaligned; this matches what the | |
160 | * tilepro hardware would be doing, if it could provide us with the | |
161 | * actual bad address in an SPR, which it doesn't. | |
162 | */ | |
163 | if (unaligned_fixup == 0) { | |
164 | siginfo_t info = { | |
165 | .si_signo = SIGBUS, | |
166 | .si_code = BUS_ADRALN, | |
167 | .si_addr = addr | |
168 | }; | |
169 | trace_unhandled_signal("unaligned trap", regs, | |
170 | (unsigned long)addr, SIGBUS); | |
171 | force_sig_info(info.si_signo, &info, current); | |
172 | return (tilepro_bundle_bits) 0; | |
173 | } | |
174 | ||
867e359b CM |
175 | #ifndef __LITTLE_ENDIAN |
176 | # error We assume little-endian representation with copy_xx_user size 2 here | |
177 | #endif | |
178 | /* Handle unaligned load/store */ | |
179 | if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) { | |
180 | unsigned short val_16; | |
181 | switch (size) { | |
182 | case 2: | |
183 | err = copy_from_user(&val_16, addr, sizeof(val_16)); | |
184 | val = sign_ext ? ((short)val_16) : val_16; | |
185 | break; | |
186 | case 4: | |
187 | err = copy_from_user(&val, addr, sizeof(val)); | |
188 | break; | |
189 | default: | |
190 | BUG(); | |
191 | } | |
192 | if (err == 0) { | |
193 | state->update_reg = val_reg; | |
194 | state->update_value = val; | |
195 | state->update = 1; | |
196 | } | |
197 | } else { | |
198 | val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg]; | |
199 | err = copy_to_user(addr, &val, size); | |
200 | } | |
201 | ||
202 | if (err) { | |
203 | siginfo_t info = { | |
204 | .si_signo = SIGSEGV, | |
205 | .si_code = SEGV_MAPERR, | |
0707ad30 | 206 | .si_addr = addr |
867e359b | 207 | }; |
571d76ac CM |
208 | trace_unhandled_signal("segfault", regs, |
209 | (unsigned long)addr, SIGSEGV); | |
867e359b CM |
210 | force_sig_info(info.si_signo, &info, current); |
211 | return (tile_bundle_bits) 0; | |
212 | } | |
213 | ||
867e359b | 214 | if (unaligned_printk || unaligned_fixup_count == 0) { |
0707ad30 CM |
215 | pr_info("Process %d/%s: PC %#lx: Fixup of" |
216 | " unaligned %s at %#lx.\n", | |
217 | current->pid, current->comm, regs->pc, | |
218 | (mem_op == MEMOP_LOAD || | |
219 | mem_op == MEMOP_LOAD_POSTINCR) ? | |
220 | "load" : "store", | |
221 | (unsigned long)addr); | |
867e359b | 222 | if (!unaligned_printk) { |
0707ad30 CM |
223 | #define P pr_info |
224 | P("\n"); | |
225 | P("Unaligned fixups in the kernel will slow your application considerably.\n"); | |
226 | P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n"); | |
227 | P("which requests the kernel show all unaligned fixups, or write a \"0\"\n"); | |
228 | P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n"); | |
229 | P("access will become a SIGBUS you can debug. No further warnings will be\n"); | |
230 | P("shown so as to avoid additional slowdown, but you can track the number\n"); | |
231 | P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n"); | |
232 | P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n"); | |
233 | P("\n"); | |
234 | #undef P | |
867e359b CM |
235 | } |
236 | } | |
237 | ++unaligned_fixup_count; | |
238 | ||
eb7c792d | 239 | if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) { |
867e359b CM |
240 | /* Convert the Y2 instruction to a prefetch. */ |
241 | bundle &= ~(create_SrcBDest_Y2(-1) | | |
242 | create_Opcode_Y2(-1)); | |
243 | bundle |= (create_SrcBDest_Y2(TREG_ZERO) | | |
244 | create_Opcode_Y2(LW_OPCODE_Y2)); | |
245 | /* Replace the load postincr with an addi */ | |
246 | } else if (mem_op == MEMOP_LOAD_POSTINCR) { | |
247 | bundle = addi_X1(bundle, addr_reg, addr_reg, | |
248 | get_Imm8_X1(bundle)); | |
249 | /* Replace the store postincr with an addi */ | |
250 | } else if (mem_op == MEMOP_STORE_POSTINCR) { | |
251 | bundle = addi_X1(bundle, addr_reg, addr_reg, | |
252 | get_Dest_Imm8_X1(bundle)); | |
253 | } else { | |
254 | /* Convert the X1 instruction to a nop. */ | |
255 | bundle &= ~(create_Opcode_X1(-1) | | |
256 | create_UnShOpcodeExtension_X1(-1) | | |
257 | create_UnOpcodeExtension_X1(-1)); | |
258 | bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) | | |
259 | create_UnShOpcodeExtension_X1( | |
260 | UN_0_SHUN_0_OPCODE_X1) | | |
261 | create_UnOpcodeExtension_X1( | |
262 | NOP_UN_0_SHUN_0_OPCODE_X1)); | |
263 | } | |
264 | ||
265 | return bundle; | |
266 | } | |
267 | ||
04f7a3f1 CM |
268 | /* |
269 | * Called after execve() has started the new image. This allows us | |
270 | * to reset the info state. Note that the the mmap'ed memory, if there | |
271 | * was any, has already been unmapped by the exec. | |
272 | */ | |
273 | void single_step_execve(void) | |
274 | { | |
275 | struct thread_info *ti = current_thread_info(); | |
276 | kfree(ti->step_state); | |
277 | ti->step_state = NULL; | |
278 | } | |
279 | ||
867e359b CM |
280 | /** |
281 | * single_step_once() - entry point when single stepping has been triggered. | |
282 | * @regs: The machine register state | |
283 | * | |
284 | * When we arrive at this routine via a trampoline, the single step | |
285 | * engine copies the executing bundle to the single step buffer. | |
286 | * If the instruction is a condition branch, then the target is | |
287 | * reset to one past the next instruction. If the instruction | |
288 | * sets the lr, then that is noted. If the instruction is a jump | |
289 | * or call, then the new target pc is preserved and the current | |
290 | * bundle instruction set to null. | |
291 | * | |
292 | * The necessary post-single-step rewriting information is stored in | |
293 | * single_step_state-> We use data segment values because the | |
294 | * stack will be rewound when we run the rewritten single-stepped | |
295 | * instruction. | |
296 | */ | |
297 | void single_step_once(struct pt_regs *regs) | |
298 | { | |
299 | extern tile_bundle_bits __single_step_ill_insn; | |
300 | extern tile_bundle_bits __single_step_j_insn; | |
301 | extern tile_bundle_bits __single_step_addli_insn; | |
302 | extern tile_bundle_bits __single_step_auli_insn; | |
303 | struct thread_info *info = (void *)current_thread_info(); | |
304 | struct single_step_state *state = info->step_state; | |
305 | int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP); | |
0707ad30 | 306 | tile_bundle_bits __user *buffer, *pc; |
867e359b CM |
307 | tile_bundle_bits bundle; |
308 | int temp_reg; | |
309 | int target_reg = TREG_LR; | |
310 | int err; | |
311 | enum mem_op mem_op = MEMOP_NONE; | |
312 | int size = 0, sign_ext = 0; /* happy compiler */ | |
313 | ||
314 | asm( | |
315 | " .pushsection .rodata.single_step\n" | |
316 | " .align 8\n" | |
317 | " .globl __single_step_ill_insn\n" | |
318 | "__single_step_ill_insn:\n" | |
319 | " ill\n" | |
320 | " .globl __single_step_addli_insn\n" | |
321 | "__single_step_addli_insn:\n" | |
322 | " { nop; addli r0, zero, 0 }\n" | |
323 | " .globl __single_step_auli_insn\n" | |
324 | "__single_step_auli_insn:\n" | |
325 | " { nop; auli r0, r0, 0 }\n" | |
326 | " .globl __single_step_j_insn\n" | |
327 | "__single_step_j_insn:\n" | |
328 | " j .\n" | |
329 | " .popsection\n" | |
330 | ); | |
331 | ||
313ce674 CM |
332 | /* |
333 | * Enable interrupts here to allow touching userspace and the like. | |
334 | * The callers expect this: do_trap() already has interrupts | |
335 | * enabled, and do_work_pending() handles functions that enable | |
336 | * interrupts internally. | |
337 | */ | |
338 | local_irq_enable(); | |
339 | ||
867e359b CM |
340 | if (state == NULL) { |
341 | /* allocate a page of writable, executable memory */ | |
342 | state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL); | |
343 | if (state == NULL) { | |
0707ad30 | 344 | pr_err("Out of kernel memory trying to single-step\n"); |
867e359b CM |
345 | return; |
346 | } | |
347 | ||
348 | /* allocate a cache line of writable, executable memory */ | |
6be5ceb0 | 349 | buffer = (void __user *) vm_mmap(NULL, 0, 64, |
867e359b CM |
350 | PROT_EXEC | PROT_READ | PROT_WRITE, |
351 | MAP_PRIVATE | MAP_ANONYMOUS, | |
352 | 0); | |
867e359b | 353 | |
0707ad30 | 354 | if (IS_ERR((void __force *)buffer)) { |
867e359b | 355 | kfree(state); |
0707ad30 | 356 | pr_err("Out of kernel pages trying to single-step\n"); |
867e359b CM |
357 | return; |
358 | } | |
359 | ||
360 | state->buffer = buffer; | |
361 | state->is_enabled = 0; | |
362 | ||
363 | info->step_state = state; | |
364 | ||
365 | /* Validate our stored instruction patterns */ | |
366 | BUG_ON(get_Opcode_X1(__single_step_addli_insn) != | |
367 | ADDLI_OPCODE_X1); | |
368 | BUG_ON(get_Opcode_X1(__single_step_auli_insn) != | |
369 | AULI_OPCODE_X1); | |
370 | BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO); | |
371 | BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0); | |
372 | BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0); | |
373 | } | |
374 | ||
375 | /* | |
376 | * If we are returning from a syscall, we still haven't hit the | |
377 | * "ill" for the swint1 instruction. So back the PC up to be | |
378 | * pointing at the swint1, but we'll actually return directly | |
379 | * back to the "ill" so we come back in via SIGILL as if we | |
380 | * had "executed" the swint1 without ever being in kernel space. | |
381 | */ | |
382 | if (regs->faultnum == INT_SWINT_1) | |
383 | regs->pc -= 8; | |
384 | ||
0707ad30 CM |
385 | pc = (tile_bundle_bits __user *)(regs->pc); |
386 | if (get_user(bundle, pc) != 0) { | |
387 | pr_err("Couldn't read instruction at %p trying to step\n", pc); | |
388 | return; | |
389 | } | |
867e359b CM |
390 | |
391 | /* We'll follow the instruction with 2 ill op bundles */ | |
0707ad30 | 392 | state->orig_pc = (unsigned long)pc; |
867e359b CM |
393 | state->next_pc = (unsigned long)(pc + 1); |
394 | state->branch_next_pc = 0; | |
395 | state->update = 0; | |
396 | ||
eb7c792d | 397 | if (!(bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK)) { |
867e359b CM |
398 | /* two wide, check for control flow */ |
399 | int opcode = get_Opcode_X1(bundle); | |
400 | ||
401 | switch (opcode) { | |
402 | /* branches */ | |
403 | case BRANCH_OPCODE_X1: | |
404 | { | |
04f7a3f1 | 405 | s32 offset = signExtend17(get_BrOff_X1(bundle)); |
867e359b CM |
406 | |
407 | /* | |
408 | * For branches, we use a rewriting trick to let the | |
409 | * hardware evaluate whether the branch is taken or | |
410 | * untaken. We record the target offset and then | |
411 | * rewrite the branch instruction to target 1 insn | |
412 | * ahead if the branch is taken. We then follow the | |
413 | * rewritten branch with two bundles, each containing | |
414 | * an "ill" instruction. The supervisor examines the | |
415 | * pc after the single step code is executed, and if | |
416 | * the pc is the first ill instruction, then the | |
417 | * branch (if any) was not taken. If the pc is the | |
418 | * second ill instruction, then the branch was | |
419 | * taken. The new pc is computed for these cases, and | |
420 | * inserted into the registers for the thread. If | |
421 | * the pc is the start of the single step code, then | |
422 | * an exception or interrupt was taken before the | |
423 | * code started processing, and the same "original" | |
424 | * pc is restored. This change, different from the | |
425 | * original implementation, has the advantage of | |
426 | * executing a single user instruction. | |
427 | */ | |
428 | state->branch_next_pc = (unsigned long)(pc + offset); | |
429 | ||
430 | /* rewrite branch offset to go forward one bundle */ | |
431 | bundle = set_BrOff_X1(bundle, 2); | |
432 | } | |
433 | break; | |
434 | ||
435 | /* jumps */ | |
436 | case JALB_OPCODE_X1: | |
437 | case JALF_OPCODE_X1: | |
438 | state->update = 1; | |
439 | state->next_pc = | |
440 | (unsigned long) (pc + get_JOffLong_X1(bundle)); | |
441 | break; | |
442 | ||
443 | case JB_OPCODE_X1: | |
444 | case JF_OPCODE_X1: | |
445 | state->next_pc = | |
446 | (unsigned long) (pc + get_JOffLong_X1(bundle)); | |
447 | bundle = nop_X1(bundle); | |
448 | break; | |
449 | ||
450 | case SPECIAL_0_OPCODE_X1: | |
451 | switch (get_RRROpcodeExtension_X1(bundle)) { | |
452 | /* jump-register */ | |
453 | case JALRP_SPECIAL_0_OPCODE_X1: | |
454 | case JALR_SPECIAL_0_OPCODE_X1: | |
455 | state->update = 1; | |
456 | state->next_pc = | |
457 | regs->regs[get_SrcA_X1(bundle)]; | |
458 | break; | |
459 | ||
460 | case JRP_SPECIAL_0_OPCODE_X1: | |
461 | case JR_SPECIAL_0_OPCODE_X1: | |
462 | state->next_pc = | |
463 | regs->regs[get_SrcA_X1(bundle)]; | |
464 | bundle = nop_X1(bundle); | |
465 | break; | |
466 | ||
467 | case LNK_SPECIAL_0_OPCODE_X1: | |
468 | state->update = 1; | |
469 | target_reg = get_Dest_X1(bundle); | |
470 | break; | |
471 | ||
472 | /* stores */ | |
473 | case SH_SPECIAL_0_OPCODE_X1: | |
474 | mem_op = MEMOP_STORE; | |
475 | size = 2; | |
476 | break; | |
477 | ||
478 | case SW_SPECIAL_0_OPCODE_X1: | |
479 | mem_op = MEMOP_STORE; | |
480 | size = 4; | |
481 | break; | |
482 | } | |
483 | break; | |
484 | ||
485 | /* loads and iret */ | |
486 | case SHUN_0_OPCODE_X1: | |
487 | if (get_UnShOpcodeExtension_X1(bundle) == | |
488 | UN_0_SHUN_0_OPCODE_X1) { | |
489 | switch (get_UnOpcodeExtension_X1(bundle)) { | |
490 | case LH_UN_0_SHUN_0_OPCODE_X1: | |
491 | mem_op = MEMOP_LOAD; | |
492 | size = 2; | |
493 | sign_ext = 1; | |
494 | break; | |
495 | ||
496 | case LH_U_UN_0_SHUN_0_OPCODE_X1: | |
497 | mem_op = MEMOP_LOAD; | |
498 | size = 2; | |
499 | sign_ext = 0; | |
500 | break; | |
501 | ||
502 | case LW_UN_0_SHUN_0_OPCODE_X1: | |
503 | mem_op = MEMOP_LOAD; | |
504 | size = 4; | |
505 | break; | |
506 | ||
507 | case IRET_UN_0_SHUN_0_OPCODE_X1: | |
508 | { | |
509 | unsigned long ex0_0 = __insn_mfspr( | |
510 | SPR_EX_CONTEXT_0_0); | |
511 | unsigned long ex0_1 = __insn_mfspr( | |
512 | SPR_EX_CONTEXT_0_1); | |
513 | /* | |
514 | * Special-case it if we're iret'ing | |
515 | * to PL0 again. Otherwise just let | |
516 | * it run and it will generate SIGILL. | |
517 | */ | |
518 | if (EX1_PL(ex0_1) == USER_PL) { | |
519 | state->next_pc = ex0_0; | |
520 | regs->ex1 = ex0_1; | |
521 | bundle = nop_X1(bundle); | |
522 | } | |
523 | } | |
524 | } | |
525 | } | |
526 | break; | |
527 | ||
528 | #if CHIP_HAS_WH64() | |
529 | /* postincrement operations */ | |
530 | case IMM_0_OPCODE_X1: | |
531 | switch (get_ImmOpcodeExtension_X1(bundle)) { | |
532 | case LWADD_IMM_0_OPCODE_X1: | |
533 | mem_op = MEMOP_LOAD_POSTINCR; | |
534 | size = 4; | |
535 | break; | |
536 | ||
537 | case LHADD_IMM_0_OPCODE_X1: | |
538 | mem_op = MEMOP_LOAD_POSTINCR; | |
539 | size = 2; | |
540 | sign_ext = 1; | |
541 | break; | |
542 | ||
543 | case LHADD_U_IMM_0_OPCODE_X1: | |
544 | mem_op = MEMOP_LOAD_POSTINCR; | |
545 | size = 2; | |
546 | sign_ext = 0; | |
547 | break; | |
548 | ||
549 | case SWADD_IMM_0_OPCODE_X1: | |
550 | mem_op = MEMOP_STORE_POSTINCR; | |
551 | size = 4; | |
552 | break; | |
553 | ||
554 | case SHADD_IMM_0_OPCODE_X1: | |
555 | mem_op = MEMOP_STORE_POSTINCR; | |
556 | size = 2; | |
557 | break; | |
558 | ||
559 | default: | |
560 | break; | |
561 | } | |
562 | break; | |
563 | #endif /* CHIP_HAS_WH64() */ | |
564 | } | |
565 | ||
566 | if (state->update) { | |
567 | /* | |
568 | * Get an available register. We start with a | |
569 | * bitmask with 1's for available registers. | |
570 | * We truncate to the low 32 registers since | |
571 | * we are guaranteed to have set bits in the | |
572 | * low 32 bits, then use ctz to pick the first. | |
573 | */ | |
574 | u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) | | |
575 | (1ULL << get_SrcA_X0(bundle)) | | |
576 | (1ULL << get_SrcB_X0(bundle)) | | |
577 | (1ULL << target_reg)); | |
578 | temp_reg = __builtin_ctz(mask); | |
579 | state->update_reg = temp_reg; | |
580 | state->update_value = regs->regs[temp_reg]; | |
581 | regs->regs[temp_reg] = (unsigned long) (pc+1); | |
582 | regs->flags |= PT_FLAGS_RESTORE_REGS; | |
583 | bundle = move_X1(bundle, target_reg, temp_reg); | |
584 | } | |
585 | } else { | |
586 | int opcode = get_Opcode_Y2(bundle); | |
587 | ||
588 | switch (opcode) { | |
589 | /* loads */ | |
590 | case LH_OPCODE_Y2: | |
591 | mem_op = MEMOP_LOAD; | |
592 | size = 2; | |
593 | sign_ext = 1; | |
594 | break; | |
595 | ||
596 | case LH_U_OPCODE_Y2: | |
597 | mem_op = MEMOP_LOAD; | |
598 | size = 2; | |
599 | sign_ext = 0; | |
600 | break; | |
601 | ||
602 | case LW_OPCODE_Y2: | |
603 | mem_op = MEMOP_LOAD; | |
604 | size = 4; | |
605 | break; | |
606 | ||
607 | /* stores */ | |
608 | case SH_OPCODE_Y2: | |
609 | mem_op = MEMOP_STORE; | |
610 | size = 2; | |
611 | break; | |
612 | ||
613 | case SW_OPCODE_Y2: | |
614 | mem_op = MEMOP_STORE; | |
615 | size = 4; | |
616 | break; | |
617 | } | |
618 | } | |
619 | ||
620 | /* | |
621 | * Check if we need to rewrite an unaligned load/store. | |
622 | * Returning zero is a special value meaning we need to SIGSEGV. | |
623 | */ | |
624 | if (mem_op != MEMOP_NONE && unaligned_fixup >= 0) { | |
625 | bundle = rewrite_load_store_unaligned(state, bundle, regs, | |
626 | mem_op, size, sign_ext); | |
627 | if (bundle == 0) | |
628 | return; | |
629 | } | |
630 | ||
631 | /* write the bundle to our execution area */ | |
632 | buffer = state->buffer; | |
633 | err = __put_user(bundle, buffer++); | |
634 | ||
635 | /* | |
636 | * If we're really single-stepping, we take an INT_ILL after. | |
637 | * If we're just handling an unaligned access, we can just | |
638 | * jump directly back to where we were in user code. | |
639 | */ | |
640 | if (is_single_step) { | |
641 | err |= __put_user(__single_step_ill_insn, buffer++); | |
642 | err |= __put_user(__single_step_ill_insn, buffer++); | |
643 | } else { | |
644 | long delta; | |
645 | ||
646 | if (state->update) { | |
647 | /* We have some state to update; do it inline */ | |
648 | int ha16; | |
649 | bundle = __single_step_addli_insn; | |
650 | bundle |= create_Dest_X1(state->update_reg); | |
651 | bundle |= create_Imm16_X1(state->update_value); | |
652 | err |= __put_user(bundle, buffer++); | |
653 | bundle = __single_step_auli_insn; | |
654 | bundle |= create_Dest_X1(state->update_reg); | |
655 | bundle |= create_SrcA_X1(state->update_reg); | |
656 | ha16 = (state->update_value + 0x8000) >> 16; | |
657 | bundle |= create_Imm16_X1(ha16); | |
658 | err |= __put_user(bundle, buffer++); | |
659 | state->update = 0; | |
660 | } | |
661 | ||
662 | /* End with a jump back to the next instruction */ | |
663 | delta = ((regs->pc + TILE_BUNDLE_SIZE_IN_BYTES) - | |
664 | (unsigned long)buffer) >> | |
665 | TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES; | |
666 | bundle = __single_step_j_insn; | |
667 | bundle |= create_JOffLong_X1(delta); | |
668 | err |= __put_user(bundle, buffer++); | |
669 | } | |
670 | ||
671 | if (err) { | |
0707ad30 | 672 | pr_err("Fault when writing to single-step buffer\n"); |
867e359b CM |
673 | return; |
674 | } | |
675 | ||
676 | /* | |
677 | * Flush the buffer. | |
678 | * We do a local flush only, since this is a thread-specific buffer. | |
679 | */ | |
0707ad30 CM |
680 | __flush_icache_range((unsigned long)state->buffer, |
681 | (unsigned long)buffer); | |
867e359b CM |
682 | |
683 | /* Indicate enabled */ | |
684 | state->is_enabled = is_single_step; | |
0707ad30 | 685 | regs->pc = (unsigned long)state->buffer; |
867e359b CM |
686 | |
687 | /* Fault immediately if we are coming back from a syscall. */ | |
688 | if (regs->faultnum == INT_SWINT_1) | |
689 | regs->pc += 8; | |
690 | } | |
691 | ||
233325b9 CM |
692 | #else |
693 | #include <linux/smp.h> | |
694 | #include <linux/ptrace.h> | |
695 | #include <arch/spr_def.h> | |
696 | ||
697 | static DEFINE_PER_CPU(unsigned long, ss_saved_pc); | |
698 | ||
699 | ||
700 | /* | |
701 | * Called directly on the occasion of an interrupt. | |
702 | * | |
703 | * If the process doesn't have single step set, then we use this as an | |
704 | * opportunity to turn single step off. | |
705 | * | |
706 | * It has been mentioned that we could conditionally turn off single stepping | |
707 | * on each entry into the kernel and rely on single_step_once to turn it | |
708 | * on for the processes that matter (as we already do), but this | |
709 | * implementation is somewhat more efficient in that we muck with registers | |
710 | * once on a bum interrupt rather than on every entry into the kernel. | |
711 | * | |
712 | * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred, | |
713 | * so we have to run through this process again before we can say that an | |
714 | * instruction has executed. | |
715 | * | |
716 | * swint will set CANCELED, but it's a legitimate instruction. Fortunately | |
717 | * it changes the PC. If it hasn't changed, then we know that the interrupt | |
718 | * wasn't generated by swint and we'll need to run this process again before | |
719 | * we can say an instruction has executed. | |
720 | * | |
721 | * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get | |
722 | * on with our lives. | |
723 | */ | |
724 | ||
725 | void gx_singlestep_handle(struct pt_regs *regs, int fault_num) | |
726 | { | |
727 | unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc); | |
728 | struct thread_info *info = (void *)current_thread_info(); | |
729 | int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP); | |
730 | unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K); | |
731 | ||
732 | if (is_single_step == 0) { | |
733 | __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0); | |
734 | ||
735 | } else if ((*ss_pc != regs->pc) || | |
736 | (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) { | |
737 | ||
738 | ptrace_notify(SIGTRAP); | |
739 | control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK; | |
740 | control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK; | |
741 | __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control); | |
742 | } | |
743 | } | |
744 | ||
745 | ||
746 | /* | |
747 | * Called from need_singlestep. Set up the control registers and the enable | |
748 | * register, then return back. | |
749 | */ | |
750 | ||
751 | void single_step_once(struct pt_regs *regs) | |
752 | { | |
753 | unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc); | |
754 | unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K); | |
755 | ||
756 | *ss_pc = regs->pc; | |
757 | control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK; | |
758 | control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK; | |
759 | __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control); | |
760 | __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL); | |
761 | } | |
762 | ||
04f7a3f1 CM |
763 | void single_step_execve(void) |
764 | { | |
765 | /* Nothing */ | |
766 | } | |
767 | ||
867e359b | 768 | #endif /* !__tilegx__ */ |