Commit | Line | Data |
---|---|---|
82da3ff8 IM |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify it | |
3 | * under the terms of the GNU General Public License as published by the | |
4 | * Free Software Foundation; either version 2, or (at your option) any | |
5 | * later version. | |
6 | * | |
7 | * This program is distributed in the hope that it will be useful, but | |
8 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
10 | * General Public License for more details. | |
11 | * | |
12 | */ | |
13 | ||
14 | /* | |
15 | * Copyright (C) 2004 Amit S. Kale <amitkale@linsyssoft.com> | |
16 | * Copyright (C) 2000-2001 VERITAS Software Corporation. | |
17 | * Copyright (C) 2002 Andi Kleen, SuSE Labs | |
18 | * Copyright (C) 2004 LinSysSoft Technologies Pvt. Ltd. | |
19 | * Copyright (C) 2007 MontaVista Software, Inc. | |
20 | * Copyright (C) 2007-2008 Jason Wessel, Wind River Systems, Inc. | |
21 | */ | |
22 | /**************************************************************************** | |
23 | * Contributor: Lake Stevens Instrument Division$ | |
24 | * Written by: Glenn Engel $ | |
25 | * Updated by: Amit Kale<akale@veritas.com> | |
26 | * Updated by: Tom Rini <trini@kernel.crashing.org> | |
27 | * Updated by: Jason Wessel <jason.wessel@windriver.com> | |
28 | * Modified for 386 by Jim Kingdon, Cygnus Support. | |
29 | * Origianl kgdb, compatibility with 2.1.xx kernel by | |
30 | * David Grothe <dave@gcom.com> | |
31 | * Integrated into 2.2.5 kernel by Tigran Aivazian <tigran@sco.com> | |
32 | * X86_64 changes from Andi Kleen's patch merged by Jim Houston | |
33 | */ | |
34 | #include <linux/spinlock.h> | |
35 | #include <linux/kdebug.h> | |
36 | #include <linux/string.h> | |
37 | #include <linux/kernel.h> | |
38 | #include <linux/ptrace.h> | |
39 | #include <linux/sched.h> | |
40 | #include <linux/delay.h> | |
41 | #include <linux/kgdb.h> | |
82da3ff8 | 42 | #include <linux/smp.h> |
d3597524 | 43 | #include <linux/nmi.h> |
cc096749 | 44 | #include <linux/hw_breakpoint.h> |
3751d3e8 JW |
45 | #include <linux/uaccess.h> |
46 | #include <linux/memory.h> | |
82da3ff8 | 47 | |
62edab90 | 48 | #include <asm/debugreg.h> |
82da3ff8 | 49 | #include <asm/apicdef.h> |
7b6aa335 | 50 | #include <asm/apic.h> |
166d7514 | 51 | #include <asm/nmi.h> |
82da3ff8 | 52 | |
12bfa3de | 53 | struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = |
82da3ff8 | 54 | { |
12bfa3de JW |
55 | #ifdef CONFIG_X86_32 |
56 | { "ax", 4, offsetof(struct pt_regs, ax) }, | |
57 | { "cx", 4, offsetof(struct pt_regs, cx) }, | |
58 | { "dx", 4, offsetof(struct pt_regs, dx) }, | |
59 | { "bx", 4, offsetof(struct pt_regs, bx) }, | |
60 | { "sp", 4, offsetof(struct pt_regs, sp) }, | |
61 | { "bp", 4, offsetof(struct pt_regs, bp) }, | |
62 | { "si", 4, offsetof(struct pt_regs, si) }, | |
63 | { "di", 4, offsetof(struct pt_regs, di) }, | |
64 | { "ip", 4, offsetof(struct pt_regs, ip) }, | |
65 | { "flags", 4, offsetof(struct pt_regs, flags) }, | |
66 | { "cs", 4, offsetof(struct pt_regs, cs) }, | |
67 | { "ss", 4, offsetof(struct pt_regs, ss) }, | |
68 | { "ds", 4, offsetof(struct pt_regs, ds) }, | |
69 | { "es", 4, offsetof(struct pt_regs, es) }, | |
12bfa3de JW |
70 | #else |
71 | { "ax", 8, offsetof(struct pt_regs, ax) }, | |
72 | { "bx", 8, offsetof(struct pt_regs, bx) }, | |
73 | { "cx", 8, offsetof(struct pt_regs, cx) }, | |
74 | { "dx", 8, offsetof(struct pt_regs, dx) }, | |
f59df35f | 75 | { "si", 8, offsetof(struct pt_regs, si) }, |
12bfa3de JW |
76 | { "di", 8, offsetof(struct pt_regs, di) }, |
77 | { "bp", 8, offsetof(struct pt_regs, bp) }, | |
78 | { "sp", 8, offsetof(struct pt_regs, sp) }, | |
79 | { "r8", 8, offsetof(struct pt_regs, r8) }, | |
80 | { "r9", 8, offsetof(struct pt_regs, r9) }, | |
81 | { "r10", 8, offsetof(struct pt_regs, r10) }, | |
82 | { "r11", 8, offsetof(struct pt_regs, r11) }, | |
83 | { "r12", 8, offsetof(struct pt_regs, r12) }, | |
84 | { "r13", 8, offsetof(struct pt_regs, r13) }, | |
85 | { "r14", 8, offsetof(struct pt_regs, r14) }, | |
86 | { "r15", 8, offsetof(struct pt_regs, r15) }, | |
87 | { "ip", 8, offsetof(struct pt_regs, ip) }, | |
88 | { "flags", 4, offsetof(struct pt_regs, flags) }, | |
89 | { "cs", 4, offsetof(struct pt_regs, cs) }, | |
90 | { "ss", 4, offsetof(struct pt_regs, ss) }, | |
639077fb JK |
91 | { "ds", 4, -1 }, |
92 | { "es", 4, -1 }, | |
703a1edc | 93 | #endif |
639077fb JK |
94 | { "fs", 4, -1 }, |
95 | { "gs", 4, -1 }, | |
12bfa3de JW |
96 | }; |
97 | ||
98 | int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) | |
99 | { | |
100 | if ( | |
82da3ff8 | 101 | #ifdef CONFIG_X86_32 |
12bfa3de JW |
102 | regno == GDB_SS || regno == GDB_FS || regno == GDB_GS || |
103 | #endif | |
104 | regno == GDB_SP || regno == GDB_ORIG_AX) | |
105 | return 0; | |
106 | ||
107 | if (dbg_reg_def[regno].offset != -1) | |
108 | memcpy((void *)regs + dbg_reg_def[regno].offset, mem, | |
109 | dbg_reg_def[regno].size); | |
110 | return 0; | |
111 | } | |
112 | ||
113 | char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) | |
114 | { | |
115 | if (regno == GDB_ORIG_AX) { | |
116 | memcpy(mem, ®s->orig_ax, sizeof(regs->orig_ax)); | |
117 | return "orig_ax"; | |
cf6f196d | 118 | } |
12bfa3de JW |
119 | if (regno >= DBG_MAX_REG_NUM || regno < 0) |
120 | return NULL; | |
121 | ||
122 | if (dbg_reg_def[regno].offset != -1) | |
123 | memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, | |
124 | dbg_reg_def[regno].size); | |
125 | ||
12bfa3de | 126 | #ifdef CONFIG_X86_32 |
21431c29 | 127 | switch (regno) { |
12bfa3de | 128 | case GDB_SS: |
f39b6f0e | 129 | if (!user_mode(regs)) |
12bfa3de JW |
130 | *(unsigned long *)mem = __KERNEL_DS; |
131 | break; | |
132 | case GDB_SP: | |
f39b6f0e | 133 | if (!user_mode(regs)) |
12bfa3de JW |
134 | *(unsigned long *)mem = kernel_stack_pointer(regs); |
135 | break; | |
136 | case GDB_GS: | |
137 | case GDB_FS: | |
138 | *(unsigned long *)mem = 0xFFFF; | |
139 | break; | |
12bfa3de | 140 | } |
21431c29 | 141 | #endif |
12bfa3de | 142 | return dbg_reg_def[regno].name; |
82da3ff8 IM |
143 | } |
144 | ||
145 | /** | |
146 | * sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs | |
147 | * @gdb_regs: A pointer to hold the registers in the order GDB wants. | |
148 | * @p: The &struct task_struct of the desired process. | |
149 | * | |
150 | * Convert the register values of the sleeping process in @p to | |
151 | * the format that GDB expects. | |
152 | * This function is called when kgdb does not have access to the | |
153 | * &struct pt_regs and therefore it should fill the gdb registers | |
154 | * @gdb_regs with what has been saved in &struct thread_struct | |
155 | * thread field during switch_to. | |
156 | */ | |
157 | void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) | |
158 | { | |
703a1edc JW |
159 | #ifndef CONFIG_X86_32 |
160 | u32 *gdb_regs32 = (u32 *)gdb_regs; | |
161 | #endif | |
82da3ff8 IM |
162 | gdb_regs[GDB_AX] = 0; |
163 | gdb_regs[GDB_BX] = 0; | |
164 | gdb_regs[GDB_CX] = 0; | |
165 | gdb_regs[GDB_DX] = 0; | |
166 | gdb_regs[GDB_SI] = 0; | |
167 | gdb_regs[GDB_DI] = 0; | |
168 | gdb_regs[GDB_BP] = *(unsigned long *)p->thread.sp; | |
169 | #ifdef CONFIG_X86_32 | |
170 | gdb_regs[GDB_DS] = __KERNEL_DS; | |
171 | gdb_regs[GDB_ES] = __KERNEL_DS; | |
172 | gdb_regs[GDB_PS] = 0; | |
173 | gdb_regs[GDB_CS] = __KERNEL_CS; | |
174 | gdb_regs[GDB_PC] = p->thread.ip; | |
175 | gdb_regs[GDB_SS] = __KERNEL_DS; | |
176 | gdb_regs[GDB_FS] = 0xFFFF; | |
177 | gdb_regs[GDB_GS] = 0xFFFF; | |
178 | #else | |
703a1edc JW |
179 | gdb_regs32[GDB_PS] = *(unsigned long *)(p->thread.sp + 8); |
180 | gdb_regs32[GDB_CS] = __KERNEL_CS; | |
181 | gdb_regs32[GDB_SS] = __KERNEL_DS; | |
0c23590f | 182 | gdb_regs[GDB_PC] = 0; |
82da3ff8 IM |
183 | gdb_regs[GDB_R8] = 0; |
184 | gdb_regs[GDB_R9] = 0; | |
185 | gdb_regs[GDB_R10] = 0; | |
186 | gdb_regs[GDB_R11] = 0; | |
187 | gdb_regs[GDB_R12] = 0; | |
188 | gdb_regs[GDB_R13] = 0; | |
189 | gdb_regs[GDB_R14] = 0; | |
190 | gdb_regs[GDB_R15] = 0; | |
191 | #endif | |
192 | gdb_regs[GDB_SP] = p->thread.sp; | |
193 | } | |
194 | ||
64e9ee30 JW |
195 | static struct hw_breakpoint { |
196 | unsigned enabled; | |
64e9ee30 | 197 | unsigned long addr; |
cc096749 JW |
198 | int len; |
199 | int type; | |
8c8aefce | 200 | struct perf_event * __percpu *pev; |
df493935 | 201 | } breakinfo[HBP_NUM]; |
64e9ee30 | 202 | |
031acd8c JW |
203 | static unsigned long early_dr7; |
204 | ||
64e9ee30 JW |
205 | static void kgdb_correct_hw_break(void) |
206 | { | |
64e9ee30 JW |
207 | int breakno; |
208 | ||
df493935 | 209 | for (breakno = 0; breakno < HBP_NUM; breakno++) { |
cc096749 JW |
210 | struct perf_event *bp; |
211 | struct arch_hw_breakpoint *info; | |
212 | int val; | |
213 | int cpu = raw_smp_processor_id(); | |
214 | if (!breakinfo[breakno].enabled) | |
215 | continue; | |
031acd8c JW |
216 | if (dbg_is_early) { |
217 | set_debugreg(breakinfo[breakno].addr, breakno); | |
218 | early_dr7 |= encode_dr7(breakno, | |
219 | breakinfo[breakno].len, | |
220 | breakinfo[breakno].type); | |
221 | set_debugreg(early_dr7, 7); | |
222 | continue; | |
223 | } | |
cc096749 JW |
224 | bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu); |
225 | info = counter_arch_bp(bp); | |
226 | if (bp->attr.disabled != 1) | |
227 | continue; | |
228 | bp->attr.bp_addr = breakinfo[breakno].addr; | |
229 | bp->attr.bp_len = breakinfo[breakno].len; | |
230 | bp->attr.bp_type = breakinfo[breakno].type; | |
231 | info->address = breakinfo[breakno].addr; | |
232 | info->len = breakinfo[breakno].len; | |
233 | info->type = breakinfo[breakno].type; | |
234 | val = arch_install_hw_breakpoint(bp); | |
235 | if (!val) | |
236 | bp->attr.disabled = 0; | |
64e9ee30 | 237 | } |
031acd8c JW |
238 | if (!dbg_is_early) |
239 | hw_breakpoint_restore(); | |
64e9ee30 JW |
240 | } |
241 | ||
5352ae63 JW |
242 | static int hw_break_reserve_slot(int breakno) |
243 | { | |
244 | int cpu; | |
245 | int cnt = 0; | |
246 | struct perf_event **pevent; | |
247 | ||
031acd8c JW |
248 | if (dbg_is_early) |
249 | return 0; | |
250 | ||
5352ae63 JW |
251 | for_each_online_cpu(cpu) { |
252 | cnt++; | |
253 | pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); | |
254 | if (dbg_reserve_bp_slot(*pevent)) | |
255 | goto fail; | |
256 | } | |
257 | ||
258 | return 0; | |
259 | ||
260 | fail: | |
261 | for_each_online_cpu(cpu) { | |
262 | cnt--; | |
263 | if (!cnt) | |
264 | break; | |
265 | pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); | |
266 | dbg_release_bp_slot(*pevent); | |
267 | } | |
268 | return -1; | |
269 | } | |
270 | ||
271 | static int hw_break_release_slot(int breakno) | |
272 | { | |
273 | struct perf_event **pevent; | |
274 | int cpu; | |
275 | ||
031acd8c JW |
276 | if (dbg_is_early) |
277 | return 0; | |
278 | ||
5352ae63 JW |
279 | for_each_online_cpu(cpu) { |
280 | pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); | |
281 | if (dbg_release_bp_slot(*pevent)) | |
282 | /* | |
0d2eb44f | 283 | * The debugger is responsible for handing the retry on |
5352ae63 JW |
284 | * remove failure. |
285 | */ | |
286 | return -1; | |
287 | } | |
288 | return 0; | |
289 | } | |
290 | ||
64e9ee30 JW |
291 | static int |
292 | kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) | |
293 | { | |
294 | int i; | |
295 | ||
df493935 | 296 | for (i = 0; i < HBP_NUM; i++) |
64e9ee30 JW |
297 | if (breakinfo[i].addr == addr && breakinfo[i].enabled) |
298 | break; | |
df493935 | 299 | if (i == HBP_NUM) |
64e9ee30 JW |
300 | return -1; |
301 | ||
5352ae63 JW |
302 | if (hw_break_release_slot(i)) { |
303 | printk(KERN_ERR "Cannot remove hw breakpoint at %lx\n", addr); | |
304 | return -1; | |
305 | } | |
64e9ee30 JW |
306 | breakinfo[i].enabled = 0; |
307 | ||
308 | return 0; | |
309 | } | |
310 | ||
311 | static void kgdb_remove_all_hw_break(void) | |
312 | { | |
313 | int i; | |
cc096749 JW |
314 | int cpu = raw_smp_processor_id(); |
315 | struct perf_event *bp; | |
64e9ee30 | 316 | |
df493935 | 317 | for (i = 0; i < HBP_NUM; i++) { |
cc096749 JW |
318 | if (!breakinfo[i].enabled) |
319 | continue; | |
320 | bp = *per_cpu_ptr(breakinfo[i].pev, cpu); | |
10a6e676 JW |
321 | if (!bp->attr.disabled) { |
322 | arch_uninstall_hw_breakpoint(bp); | |
323 | bp->attr.disabled = 1; | |
cc096749 | 324 | continue; |
10a6e676 | 325 | } |
031acd8c JW |
326 | if (dbg_is_early) |
327 | early_dr7 &= ~encode_dr7(i, breakinfo[i].len, | |
328 | breakinfo[i].type); | |
10a6e676 JW |
329 | else if (hw_break_release_slot(i)) |
330 | printk(KERN_ERR "KGDB: hw bpt remove failed %lx\n", | |
331 | breakinfo[i].addr); | |
332 | breakinfo[i].enabled = 0; | |
cc096749 | 333 | } |
64e9ee30 JW |
334 | } |
335 | ||
336 | static int | |
337 | kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) | |
338 | { | |
64e9ee30 JW |
339 | int i; |
340 | ||
df493935 | 341 | for (i = 0; i < HBP_NUM; i++) |
64e9ee30 JW |
342 | if (!breakinfo[i].enabled) |
343 | break; | |
df493935 | 344 | if (i == HBP_NUM) |
64e9ee30 JW |
345 | return -1; |
346 | ||
347 | switch (bptype) { | |
348 | case BP_HARDWARE_BREAKPOINT: | |
cc096749 JW |
349 | len = 1; |
350 | breakinfo[i].type = X86_BREAKPOINT_EXECUTE; | |
64e9ee30 JW |
351 | break; |
352 | case BP_WRITE_WATCHPOINT: | |
cc096749 | 353 | breakinfo[i].type = X86_BREAKPOINT_WRITE; |
64e9ee30 JW |
354 | break; |
355 | case BP_ACCESS_WATCHPOINT: | |
cc096749 | 356 | breakinfo[i].type = X86_BREAKPOINT_RW; |
64e9ee30 JW |
357 | break; |
358 | default: | |
359 | return -1; | |
360 | } | |
cc096749 JW |
361 | switch (len) { |
362 | case 1: | |
363 | breakinfo[i].len = X86_BREAKPOINT_LEN_1; | |
364 | break; | |
365 | case 2: | |
366 | breakinfo[i].len = X86_BREAKPOINT_LEN_2; | |
367 | break; | |
368 | case 4: | |
369 | breakinfo[i].len = X86_BREAKPOINT_LEN_4; | |
370 | break; | |
371 | #ifdef CONFIG_X86_64 | |
372 | case 8: | |
373 | breakinfo[i].len = X86_BREAKPOINT_LEN_8; | |
374 | break; | |
375 | #endif | |
376 | default: | |
64e9ee30 | 377 | return -1; |
cc096749 | 378 | } |
64e9ee30 | 379 | breakinfo[i].addr = addr; |
5352ae63 JW |
380 | if (hw_break_reserve_slot(i)) { |
381 | breakinfo[i].addr = 0; | |
382 | return -1; | |
383 | } | |
cc096749 | 384 | breakinfo[i].enabled = 1; |
64e9ee30 JW |
385 | |
386 | return 0; | |
387 | } | |
388 | ||
389 | /** | |
390 | * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb. | |
391 | * @regs: Current &struct pt_regs. | |
392 | * | |
393 | * This function will be called if the particular architecture must | |
394 | * disable hardware debugging while it is processing gdb packets or | |
395 | * handling exception. | |
396 | */ | |
d7ba979d | 397 | static void kgdb_disable_hw_debug(struct pt_regs *regs) |
64e9ee30 | 398 | { |
cc096749 JW |
399 | int i; |
400 | int cpu = raw_smp_processor_id(); | |
401 | struct perf_event *bp; | |
402 | ||
64e9ee30 JW |
403 | /* Disable hardware debugging while we are in kgdb: */ |
404 | set_debugreg(0UL, 7); | |
df493935 | 405 | for (i = 0; i < HBP_NUM; i++) { |
cc096749 JW |
406 | if (!breakinfo[i].enabled) |
407 | continue; | |
031acd8c JW |
408 | if (dbg_is_early) { |
409 | early_dr7 &= ~encode_dr7(i, breakinfo[i].len, | |
410 | breakinfo[i].type); | |
411 | continue; | |
412 | } | |
cc096749 JW |
413 | bp = *per_cpu_ptr(breakinfo[i].pev, cpu); |
414 | if (bp->attr.disabled == 1) | |
415 | continue; | |
416 | arch_uninstall_hw_breakpoint(bp); | |
417 | bp->attr.disabled = 1; | |
418 | } | |
64e9ee30 JW |
419 | } |
420 | ||
82da3ff8 IM |
421 | #ifdef CONFIG_SMP |
422 | /** | |
423 | * kgdb_roundup_cpus - Get other CPUs into a holding pattern | |
424 | * @flags: Current IRQ state | |
425 | * | |
426 | * On SMP systems, we need to get the attention of the other CPUs | |
427 | * and get them be in a known state. This should do what is needed | |
428 | * to get the other CPUs to call kgdb_wait(). Note that on some arches, | |
429 | * the NMI approach is not used for rounding up all the CPUs. For example, | |
430 | * in case of MIPS, smp_call_function() is used to roundup CPUs. In | |
431 | * this case, we have to make sure that interrupts are enabled before | |
432 | * calling smp_call_function(). The argument to this function is | |
433 | * the flags that will be used when restoring the interrupts. There is | |
434 | * local_irq_save() call before kgdb_roundup_cpus(). | |
435 | * | |
436 | * On non-SMP systems, this is not called. | |
437 | */ | |
438 | void kgdb_roundup_cpus(unsigned long flags) | |
439 | { | |
dac5f412 | 440 | apic->send_IPI_allbutself(APIC_DM_NMI); |
82da3ff8 IM |
441 | } |
442 | #endif | |
443 | ||
444 | /** | |
445 | * kgdb_arch_handle_exception - Handle architecture specific GDB packets. | |
c15acff3 | 446 | * @e_vector: The error vector of the exception that happened. |
82da3ff8 IM |
447 | * @signo: The signal number of the exception that happened. |
448 | * @err_code: The error code of the exception that happened. | |
c15acff3 WL |
449 | * @remcomInBuffer: The buffer of the packet we have read. |
450 | * @remcomOutBuffer: The buffer of %BUFMAX bytes to write a packet into. | |
451 | * @linux_regs: The &struct pt_regs of the current process. | |
82da3ff8 IM |
452 | * |
453 | * This function MUST handle the 'c' and 's' command packets, | |
454 | * as well packets to set / remove a hardware breakpoint, if used. | |
455 | * If there are additional packets which the hardware needs to handle, | |
456 | * they are handled here. The code should return -1 if it wants to | |
457 | * process more packets, and a %0 or %1 if it wants to exit from the | |
458 | * kgdb callback. | |
459 | */ | |
460 | int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, | |
461 | char *remcomInBuffer, char *remcomOutBuffer, | |
462 | struct pt_regs *linux_regs) | |
463 | { | |
464 | unsigned long addr; | |
465 | char *ptr; | |
82da3ff8 IM |
466 | |
467 | switch (remcomInBuffer[0]) { | |
468 | case 'c': | |
469 | case 's': | |
470 | /* try to read optional parameter, pc unchanged if no parm */ | |
471 | ptr = &remcomInBuffer[1]; | |
472 | if (kgdb_hex2long(&ptr, &addr)) | |
473 | linux_regs->ip = addr; | |
737a460f JW |
474 | case 'D': |
475 | case 'k': | |
82da3ff8 | 476 | /* clear the trace bit */ |
fda31d7d | 477 | linux_regs->flags &= ~X86_EFLAGS_TF; |
82da3ff8 IM |
478 | atomic_set(&kgdb_cpu_doing_single_step, -1); |
479 | ||
480 | /* set the trace bit if we're stepping */ | |
481 | if (remcomInBuffer[0] == 's') { | |
fda31d7d | 482 | linux_regs->flags |= X86_EFLAGS_TF; |
d7161a65 JW |
483 | atomic_set(&kgdb_cpu_doing_single_step, |
484 | raw_smp_processor_id()); | |
82da3ff8 IM |
485 | } |
486 | ||
487 | return 0; | |
488 | } | |
489 | ||
490 | /* this means that we do not want to exit from the handler: */ | |
491 | return -1; | |
492 | } | |
493 | ||
494 | static inline int | |
495 | single_step_cont(struct pt_regs *regs, struct die_args *args) | |
496 | { | |
497 | /* | |
498 | * Single step exception from kernel space to user space so | |
499 | * eat the exception and continue the process: | |
500 | */ | |
501 | printk(KERN_ERR "KGDB: trap/step from kernel to user space, " | |
502 | "resuming...\n"); | |
503 | kgdb_arch_handle_exception(args->trapnr, args->signr, | |
504 | args->err, "c", "", regs); | |
62edab90 P |
505 | /* |
506 | * Reset the BS bit in dr6 (pointed by args->err) to | |
507 | * denote completion of processing | |
508 | */ | |
509 | (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP; | |
82da3ff8 IM |
510 | |
511 | return NOTIFY_STOP; | |
512 | } | |
513 | ||
0d44975d | 514 | static DECLARE_BITMAP(was_in_debug_nmi, NR_CPUS); |
d3597524 | 515 | |
9c48f1c6 | 516 | static int kgdb_nmi_handler(unsigned int cmd, struct pt_regs *regs) |
82da3ff8 | 517 | { |
0d44975d DV |
518 | int cpu; |
519 | ||
82da3ff8 | 520 | switch (cmd) { |
9c48f1c6 | 521 | case NMI_LOCAL: |
82da3ff8 IM |
522 | if (atomic_read(&kgdb_active) != -1) { |
523 | /* KGDB CPU roundup */ | |
0d44975d DV |
524 | cpu = raw_smp_processor_id(); |
525 | kgdb_nmicallback(cpu, regs); | |
526 | set_bit(cpu, was_in_debug_nmi); | |
d3597524 | 527 | touch_nmi_watchdog(); |
0d44975d | 528 | |
9c48f1c6 | 529 | return NMI_HANDLED; |
82da3ff8 | 530 | } |
9c48f1c6 | 531 | break; |
82da3ff8 | 532 | |
9c48f1c6 | 533 | case NMI_UNKNOWN: |
0d44975d DV |
534 | cpu = raw_smp_processor_id(); |
535 | ||
536 | if (__test_and_clear_bit(cpu, was_in_debug_nmi)) | |
9c48f1c6 | 537 | return NMI_HANDLED; |
0d44975d | 538 | |
9c48f1c6 DZ |
539 | break; |
540 | default: | |
541 | /* do nothing */ | |
542 | break; | |
543 | } | |
544 | return NMI_DONE; | |
545 | } | |
546 | ||
547 | static int __kgdb_notify(struct die_args *args, unsigned long cmd) | |
548 | { | |
549 | struct pt_regs *regs = args->regs; | |
82da3ff8 | 550 | |
9c48f1c6 | 551 | switch (cmd) { |
82da3ff8 | 552 | case DIE_DEBUG: |
cc096749 | 553 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { |
d7161a65 JW |
554 | if (user_mode(regs)) |
555 | return single_step_cont(regs, args); | |
556 | break; | |
557 | } else if (test_thread_flag(TIF_SINGLESTEP)) | |
558 | /* This means a user thread is single stepping | |
559 | * a system call which should be ignored | |
560 | */ | |
561 | return NOTIFY_DONE; | |
82da3ff8 IM |
562 | /* fall through */ |
563 | default: | |
564 | if (user_mode(regs)) | |
565 | return NOTIFY_DONE; | |
566 | } | |
567 | ||
f503b5ae | 568 | if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs)) |
82da3ff8 IM |
569 | return NOTIFY_DONE; |
570 | ||
737a460f JW |
571 | /* Must touch watchdog before return to normal operation */ |
572 | touch_nmi_watchdog(); | |
82da3ff8 IM |
573 | return NOTIFY_STOP; |
574 | } | |
575 | ||
f503b5ae JW |
576 | int kgdb_ll_trap(int cmd, const char *str, |
577 | struct pt_regs *regs, long err, int trap, int sig) | |
578 | { | |
579 | struct die_args args = { | |
580 | .regs = regs, | |
581 | .str = str, | |
582 | .err = err, | |
583 | .trapnr = trap, | |
584 | .signr = sig, | |
585 | ||
586 | }; | |
587 | ||
588 | if (!kgdb_io_module_registered) | |
589 | return NOTIFY_DONE; | |
590 | ||
591 | return __kgdb_notify(&args, cmd); | |
592 | } | |
f503b5ae | 593 | |
82da3ff8 IM |
594 | static int |
595 | kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) | |
596 | { | |
597 | unsigned long flags; | |
598 | int ret; | |
599 | ||
600 | local_irq_save(flags); | |
601 | ret = __kgdb_notify(ptr, cmd); | |
602 | local_irq_restore(flags); | |
603 | ||
604 | return ret; | |
605 | } | |
606 | ||
607 | static struct notifier_block kgdb_notifier = { | |
608 | .notifier_call = kgdb_notify, | |
82da3ff8 IM |
609 | }; |
610 | ||
611 | /** | |
612 | * kgdb_arch_init - Perform any architecture specific initalization. | |
613 | * | |
614 | * This function will handle the initalization of any architecture | |
615 | * specific callbacks. | |
616 | */ | |
617 | int kgdb_arch_init(void) | |
0b4b3827 | 618 | { |
9c48f1c6 DZ |
619 | int retval; |
620 | ||
621 | retval = register_die_notifier(&kgdb_notifier); | |
622 | if (retval) | |
623 | goto out; | |
624 | ||
625 | retval = register_nmi_handler(NMI_LOCAL, kgdb_nmi_handler, | |
626 | 0, "kgdb"); | |
627 | if (retval) | |
628 | goto out1; | |
629 | ||
630 | retval = register_nmi_handler(NMI_UNKNOWN, kgdb_nmi_handler, | |
631 | 0, "kgdb"); | |
632 | ||
633 | if (retval) | |
634 | goto out2; | |
635 | ||
636 | return retval; | |
637 | ||
638 | out2: | |
639 | unregister_nmi_handler(NMI_LOCAL, "kgdb"); | |
640 | out1: | |
641 | unregister_die_notifier(&kgdb_notifier); | |
642 | out: | |
643 | return retval; | |
0b4b3827 JW |
644 | } |
645 | ||
a8b0ca17 | 646 | static void kgdb_hw_overflow_handler(struct perf_event *event, |
ba773f7c JW |
647 | struct perf_sample_data *data, struct pt_regs *regs) |
648 | { | |
fad99fac JW |
649 | struct task_struct *tsk = current; |
650 | int i; | |
651 | ||
652 | for (i = 0; i < 4; i++) | |
653 | if (breakinfo[i].enabled) | |
654 | tsk->thread.debugreg6 |= (DR_TRAP0 << i); | |
ba773f7c JW |
655 | } |
656 | ||
0b4b3827 | 657 | void kgdb_arch_late(void) |
82da3ff8 | 658 | { |
cc096749 | 659 | int i, cpu; |
cc096749 JW |
660 | struct perf_event_attr attr; |
661 | struct perf_event **pevent; | |
662 | ||
cc096749 JW |
663 | /* |
664 | * Pre-allocate the hw breakpoint structions in the non-atomic | |
665 | * portion of kgdb because this operation requires mutexs to | |
666 | * complete. | |
667 | */ | |
ab310b5e | 668 | hw_breakpoint_init(&attr); |
cc096749 | 669 | attr.bp_addr = (unsigned long)kgdb_arch_init; |
cc096749 JW |
670 | attr.bp_len = HW_BREAKPOINT_LEN_1; |
671 | attr.bp_type = HW_BREAKPOINT_W; | |
672 | attr.disabled = 1; | |
df493935 | 673 | for (i = 0; i < HBP_NUM; i++) { |
0b4b3827 JW |
674 | if (breakinfo[i].pev) |
675 | continue; | |
4dc0da86 | 676 | breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL); |
91b152aa | 677 | if (IS_ERR((void * __force)breakinfo[i].pev)) { |
0b4b3827 JW |
678 | printk(KERN_ERR "kgdb: Could not allocate hw" |
679 | "breakpoints\nDisabling the kernel debugger\n"); | |
cc096749 JW |
680 | breakinfo[i].pev = NULL; |
681 | kgdb_arch_exit(); | |
0b4b3827 | 682 | return; |
cc096749 JW |
683 | } |
684 | for_each_online_cpu(cpu) { | |
685 | pevent = per_cpu_ptr(breakinfo[i].pev, cpu); | |
686 | pevent[0]->hw.sample_period = 1; | |
ba773f7c | 687 | pevent[0]->overflow_handler = kgdb_hw_overflow_handler; |
cc096749 JW |
688 | if (pevent[0]->destroy != NULL) { |
689 | pevent[0]->destroy = NULL; | |
690 | release_bp_slot(*pevent); | |
691 | } | |
692 | } | |
693 | } | |
82da3ff8 IM |
694 | } |
695 | ||
696 | /** | |
697 | * kgdb_arch_exit - Perform any architecture specific uninitalization. | |
698 | * | |
699 | * This function will handle the uninitalization of any architecture | |
700 | * specific callbacks, for dynamic registration and unregistration. | |
701 | */ | |
702 | void kgdb_arch_exit(void) | |
703 | { | |
cc096749 JW |
704 | int i; |
705 | for (i = 0; i < 4; i++) { | |
706 | if (breakinfo[i].pev) { | |
707 | unregister_wide_hw_breakpoint(breakinfo[i].pev); | |
708 | breakinfo[i].pev = NULL; | |
709 | } | |
710 | } | |
9c48f1c6 DZ |
711 | unregister_nmi_handler(NMI_UNKNOWN, "kgdb"); |
712 | unregister_nmi_handler(NMI_LOCAL, "kgdb"); | |
82da3ff8 IM |
713 | unregister_die_notifier(&kgdb_notifier); |
714 | } | |
715 | ||
716 | /** | |
717 | * | |
718 | * kgdb_skipexception - Bail out of KGDB when we've been triggered. | |
719 | * @exception: Exception vector number | |
720 | * @regs: Current &struct pt_regs. | |
721 | * | |
722 | * On some architectures we need to skip a breakpoint exception when | |
723 | * it occurs after a breakpoint has been removed. | |
724 | * | |
725 | * Skip an int3 exception when it occurs after a breakpoint has been | |
726 | * removed. Backtrack eip by 1 since the int3 would have caused it to | |
727 | * increment by 1. | |
728 | */ | |
729 | int kgdb_skipexception(int exception, struct pt_regs *regs) | |
730 | { | |
731 | if (exception == 3 && kgdb_isremovedbreak(regs->ip - 1)) { | |
732 | regs->ip -= 1; | |
733 | return 1; | |
734 | } | |
735 | return 0; | |
736 | } | |
737 | ||
738 | unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs) | |
739 | { | |
740 | if (exception == 3) | |
741 | return instruction_pointer(regs) - 1; | |
742 | return instruction_pointer(regs); | |
743 | } | |
744 | ||
dcc78711 JW |
745 | void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) |
746 | { | |
747 | regs->ip = ip; | |
748 | } | |
749 | ||
3751d3e8 JW |
750 | int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) |
751 | { | |
752 | int err; | |
42c12213 | 753 | #ifdef CONFIG_DEBUG_RODATA |
3751d3e8 | 754 | char opc[BREAK_INSTR_SIZE]; |
42c12213 | 755 | #endif /* CONFIG_DEBUG_RODATA */ |
3751d3e8 JW |
756 | |
757 | bpt->type = BP_BREAKPOINT; | |
758 | err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, | |
759 | BREAK_INSTR_SIZE); | |
760 | if (err) | |
761 | return err; | |
762 | err = probe_kernel_write((char *)bpt->bpt_addr, | |
763 | arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); | |
764 | #ifdef CONFIG_DEBUG_RODATA | |
765 | if (!err) | |
766 | return err; | |
767 | /* | |
768 | * It is safe to call text_poke() because normal kernel execution | |
769 | * is stopped on all cores, so long as the text_mutex is not locked. | |
770 | */ | |
771 | if (mutex_is_locked(&text_mutex)) | |
772 | return -EBUSY; | |
773 | text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr, | |
774 | BREAK_INSTR_SIZE); | |
775 | err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); | |
776 | if (err) | |
777 | return err; | |
778 | if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE)) | |
779 | return -EINVAL; | |
780 | bpt->type = BP_POKE_BREAKPOINT; | |
781 | #endif /* CONFIG_DEBUG_RODATA */ | |
782 | return err; | |
783 | } | |
784 | ||
785 | int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) | |
786 | { | |
787 | #ifdef CONFIG_DEBUG_RODATA | |
788 | int err; | |
789 | char opc[BREAK_INSTR_SIZE]; | |
790 | ||
791 | if (bpt->type != BP_POKE_BREAKPOINT) | |
792 | goto knl_write; | |
793 | /* | |
794 | * It is safe to call text_poke() because normal kernel execution | |
795 | * is stopped on all cores, so long as the text_mutex is not locked. | |
796 | */ | |
797 | if (mutex_is_locked(&text_mutex)) | |
798 | goto knl_write; | |
799 | text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE); | |
800 | err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); | |
801 | if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE)) | |
802 | goto knl_write; | |
803 | return err; | |
804 | knl_write: | |
805 | #endif /* CONFIG_DEBUG_RODATA */ | |
806 | return probe_kernel_write((char *)bpt->bpt_addr, | |
807 | (char *)bpt->saved_instr, BREAK_INSTR_SIZE); | |
808 | } | |
809 | ||
82da3ff8 IM |
810 | struct kgdb_arch arch_kgdb_ops = { |
811 | /* Breakpoint instruction: */ | |
812 | .gdb_bpt_instr = { 0xcc }, | |
64e9ee30 JW |
813 | .flags = KGDB_HW_BREAKPOINT, |
814 | .set_hw_breakpoint = kgdb_set_hw_break, | |
815 | .remove_hw_breakpoint = kgdb_remove_hw_break, | |
d7ba979d | 816 | .disable_hw_break = kgdb_disable_hw_debug, |
64e9ee30 JW |
817 | .remove_all_hw_break = kgdb_remove_all_hw_break, |
818 | .correct_hw_break = kgdb_correct_hw_break, | |
82da3ff8 | 819 | }; |