Commit | Line | Data |
---|---|---|
dc7d5527 | 1 | /* |
53197fc4 | 2 | * Kernel Debug Core |
dc7d5527 JW |
3 | * |
4 | * Maintainer: Jason Wessel <jason.wessel@windriver.com> | |
5 | * | |
6 | * Copyright (C) 2000-2001 VERITAS Software Corporation. | |
7 | * Copyright (C) 2002-2004 Timesys Corporation | |
8 | * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com> | |
a2531293 | 9 | * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz> |
dc7d5527 JW |
10 | * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org> |
11 | * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd. | |
53197fc4 | 12 | * Copyright (C) 2005-2009 Wind River Systems, Inc. |
dc7d5527 JW |
13 | * Copyright (C) 2007 MontaVista Software, Inc. |
14 | * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
15 | * | |
16 | * Contributors at various stages not listed above: | |
17 | * Jason Wessel ( jason.wessel@windriver.com ) | |
18 | * George Anzinger <george@mvista.com> | |
19 | * Anurekh Saxena (anurekh.saxena@timesys.com) | |
20 | * Lake Stevens Instrument Division (Glenn Engel) | |
21 | * Jim Kingdon, Cygnus Support. | |
22 | * | |
23 | * Original KGDB stub: David Grothe <dave@gcom.com>, | |
24 | * Tigran Aivazian <tigran@sco.com> | |
25 | * | |
26 | * This file is licensed under the terms of the GNU General Public License | |
27 | * version 2. This program is licensed "as is" without any warranty of any | |
28 | * kind, whether express or implied. | |
29 | */ | |
30 | #include <linux/pid_namespace.h> | |
7c3078b6 | 31 | #include <linux/clocksource.h> |
dc7d5527 JW |
32 | #include <linux/interrupt.h> |
33 | #include <linux/spinlock.h> | |
34 | #include <linux/console.h> | |
35 | #include <linux/threads.h> | |
36 | #include <linux/uaccess.h> | |
37 | #include <linux/kernel.h> | |
38 | #include <linux/module.h> | |
39 | #include <linux/ptrace.h> | |
dc7d5527 JW |
40 | #include <linux/string.h> |
41 | #include <linux/delay.h> | |
42 | #include <linux/sched.h> | |
43 | #include <linux/sysrq.h> | |
44 | #include <linux/init.h> | |
45 | #include <linux/kgdb.h> | |
dcc78711 | 46 | #include <linux/kdb.h> |
dc7d5527 JW |
47 | #include <linux/pid.h> |
48 | #include <linux/smp.h> | |
49 | #include <linux/mm.h> | |
fb70b588 | 50 | #include <linux/rcupdate.h> |
dc7d5527 JW |
51 | |
52 | #include <asm/cacheflush.h> | |
53 | #include <asm/byteorder.h> | |
54 | #include <asm/atomic.h> | |
55 | #include <asm/system.h> | |
56 | ||
53197fc4 | 57 | #include "debug_core.h" |
dc7d5527 | 58 | |
53197fc4 | 59 | static int kgdb_break_asap; |
62fae312 | 60 | |
53197fc4 | 61 | struct debuggerinfo_struct kgdb_info[NR_CPUS]; |
dc7d5527 JW |
62 | |
63 | /** | |
64 | * kgdb_connected - Is a host GDB connected to us? | |
65 | */ | |
66 | int kgdb_connected; | |
67 | EXPORT_SYMBOL_GPL(kgdb_connected); | |
68 | ||
69 | /* All the KGDB handlers are installed */ | |
f503b5ae | 70 | int kgdb_io_module_registered; |
dc7d5527 JW |
71 | |
72 | /* Guard for recursive entry */ | |
73 | static int exception_level; | |
74 | ||
53197fc4 | 75 | struct kgdb_io *dbg_io_ops; |
dc7d5527 JW |
76 | static DEFINE_SPINLOCK(kgdb_registration_lock); |
77 | ||
78 | /* kgdb console driver is loaded */ | |
79 | static int kgdb_con_registered; | |
80 | /* determine if kgdb console output should be used */ | |
81 | static int kgdb_use_con; | |
0b4b3827 JW |
82 | /* Flag for alternate operations for early debugging */ |
83 | bool dbg_is_early = true; | |
dcc78711 JW |
84 | /* Next cpu to become the master debug core */ |
85 | int dbg_switch_cpu; | |
86 | ||
87 | /* Use kdb or gdbserver mode */ | |
a0de055c | 88 | int dbg_kdb_mode = 1; |
dc7d5527 JW |
89 | |
90 | static int __init opt_kgdb_con(char *str) | |
91 | { | |
92 | kgdb_use_con = 1; | |
93 | return 0; | |
94 | } | |
95 | ||
96 | early_param("kgdbcon", opt_kgdb_con); | |
97 | ||
98 | module_param(kgdb_use_con, int, 0644); | |
99 | ||
100 | /* | |
101 | * Holds information about breakpoints in a kernel. These breakpoints are | |
102 | * added and removed by gdb. | |
103 | */ | |
104 | static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = { | |
105 | [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED } | |
106 | }; | |
107 | ||
108 | /* | |
109 | * The CPU# of the active CPU, or -1 if none: | |
110 | */ | |
111 | atomic_t kgdb_active = ATOMIC_INIT(-1); | |
dcc78711 | 112 | EXPORT_SYMBOL_GPL(kgdb_active); |
dc7d5527 JW |
113 | |
114 | /* | |
115 | * We use NR_CPUs not PERCPU, in case kgdb is used to debug early | |
116 | * bootup code (which might not have percpu set up yet): | |
117 | */ | |
118 | static atomic_t passive_cpu_wait[NR_CPUS]; | |
119 | static atomic_t cpu_in_kgdb[NR_CPUS]; | |
1cee5e35 | 120 | static atomic_t kgdb_break_tasklet_var; |
dc7d5527 JW |
121 | atomic_t kgdb_setting_breakpoint; |
122 | ||
123 | struct task_struct *kgdb_usethread; | |
124 | struct task_struct *kgdb_contthread; | |
125 | ||
126 | int kgdb_single_step; | |
53197fc4 | 127 | static pid_t kgdb_sstep_pid; |
dc7d5527 JW |
128 | |
129 | /* to keep track of the CPU which is doing the single stepping*/ | |
130 | atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1); | |
131 | ||
132 | /* | |
133 | * If you are debugging a problem where roundup (the collection of | |
134 | * all other CPUs) is a problem [this should be extremely rare], | |
135 | * then use the nokgdbroundup option to avoid roundup. In that case | |
136 | * the other CPUs might interfere with your debugging context, so | |
137 | * use this with care: | |
138 | */ | |
688b744d | 139 | static int kgdb_do_roundup = 1; |
dc7d5527 JW |
140 | |
141 | static int __init opt_nokgdbroundup(char *str) | |
142 | { | |
143 | kgdb_do_roundup = 0; | |
144 | ||
145 | return 0; | |
146 | } | |
147 | ||
148 | early_param("nokgdbroundup", opt_nokgdbroundup); | |
149 | ||
150 | /* | |
151 | * Finally, some KGDB code :-) | |
152 | */ | |
153 | ||
154 | /* | |
155 | * Weak aliases for breakpoint management, | |
156 | * can be overriden by architectures when needed: | |
157 | */ | |
dc7d5527 JW |
158 | int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr) |
159 | { | |
160 | int err; | |
161 | ||
162 | err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE); | |
163 | if (err) | |
164 | return err; | |
165 | ||
166 | return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr, | |
167 | BREAK_INSTR_SIZE); | |
168 | } | |
169 | ||
170 | int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle) | |
171 | { | |
172 | return probe_kernel_write((char *)addr, | |
173 | (char *)bundle, BREAK_INSTR_SIZE); | |
174 | } | |
175 | ||
a9b60bf4 JW |
176 | int __weak kgdb_validate_break_address(unsigned long addr) |
177 | { | |
178 | char tmp_variable[BREAK_INSTR_SIZE]; | |
179 | int err; | |
180 | /* Validate setting the breakpoint and then removing it. In the | |
181 | * remove fails, the kernel needs to emit a bad message because we | |
182 | * are deep trouble not being able to put things back the way we | |
183 | * found them. | |
184 | */ | |
185 | err = kgdb_arch_set_breakpoint(addr, tmp_variable); | |
186 | if (err) | |
187 | return err; | |
188 | err = kgdb_arch_remove_breakpoint(addr, tmp_variable); | |
189 | if (err) | |
190 | printk(KERN_ERR "KGDB: Critical breakpoint error, kernel " | |
191 | "memory destroyed at: %lx", addr); | |
192 | return err; | |
193 | } | |
194 | ||
dc7d5527 JW |
195 | unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs) |
196 | { | |
197 | return instruction_pointer(regs); | |
198 | } | |
199 | ||
200 | int __weak kgdb_arch_init(void) | |
201 | { | |
202 | return 0; | |
203 | } | |
204 | ||
b4b8ac52 JW |
205 | int __weak kgdb_skipexception(int exception, struct pt_regs *regs) |
206 | { | |
207 | return 0; | |
208 | } | |
209 | ||
dc7d5527 JW |
210 | /** |
211 | * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb. | |
212 | * @regs: Current &struct pt_regs. | |
213 | * | |
214 | * This function will be called if the particular architecture must | |
215 | * disable hardware debugging while it is processing gdb packets or | |
216 | * handling exception. | |
217 | */ | |
218 | void __weak kgdb_disable_hw_debug(struct pt_regs *regs) | |
219 | { | |
220 | } | |
221 | ||
dc7d5527 JW |
222 | /* |
223 | * Some architectures need cache flushes when we set/clear a | |
224 | * breakpoint: | |
225 | */ | |
226 | static void kgdb_flush_swbreak_addr(unsigned long addr) | |
227 | { | |
228 | if (!CACHE_FLUSH_IS_SAFE) | |
229 | return; | |
230 | ||
737a460f | 231 | if (current->mm && current->mm->mmap_cache) { |
dc7d5527 JW |
232 | flush_cache_range(current->mm->mmap_cache, |
233 | addr, addr + BREAK_INSTR_SIZE); | |
dc7d5527 | 234 | } |
1a9a3e76 JW |
235 | /* Force flush instruction cache if it was outside the mm */ |
236 | flush_icache_range(addr, addr + BREAK_INSTR_SIZE); | |
dc7d5527 JW |
237 | } |
238 | ||
239 | /* | |
240 | * SW breakpoint management: | |
241 | */ | |
53197fc4 | 242 | int dbg_activate_sw_breakpoints(void) |
dc7d5527 JW |
243 | { |
244 | unsigned long addr; | |
7f8b7ed6 JW |
245 | int error; |
246 | int ret = 0; | |
dc7d5527 JW |
247 | int i; |
248 | ||
249 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | |
250 | if (kgdb_break[i].state != BP_SET) | |
251 | continue; | |
252 | ||
253 | addr = kgdb_break[i].bpt_addr; | |
254 | error = kgdb_arch_set_breakpoint(addr, | |
255 | kgdb_break[i].saved_instr); | |
7f8b7ed6 JW |
256 | if (error) { |
257 | ret = error; | |
258 | printk(KERN_INFO "KGDB: BP install failed: %lx", addr); | |
259 | continue; | |
260 | } | |
dc7d5527 JW |
261 | |
262 | kgdb_flush_swbreak_addr(addr); | |
263 | kgdb_break[i].state = BP_ACTIVE; | |
264 | } | |
7f8b7ed6 | 265 | return ret; |
dc7d5527 JW |
266 | } |
267 | ||
53197fc4 | 268 | int dbg_set_sw_break(unsigned long addr) |
dc7d5527 JW |
269 | { |
270 | int err = kgdb_validate_break_address(addr); | |
271 | int breakno = -1; | |
272 | int i; | |
273 | ||
274 | if (err) | |
275 | return err; | |
276 | ||
277 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | |
278 | if ((kgdb_break[i].state == BP_SET) && | |
279 | (kgdb_break[i].bpt_addr == addr)) | |
280 | return -EEXIST; | |
281 | } | |
282 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | |
283 | if (kgdb_break[i].state == BP_REMOVED && | |
284 | kgdb_break[i].bpt_addr == addr) { | |
285 | breakno = i; | |
286 | break; | |
287 | } | |
288 | } | |
289 | ||
290 | if (breakno == -1) { | |
291 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | |
292 | if (kgdb_break[i].state == BP_UNDEFINED) { | |
293 | breakno = i; | |
294 | break; | |
295 | } | |
296 | } | |
297 | } | |
298 | ||
299 | if (breakno == -1) | |
300 | return -E2BIG; | |
301 | ||
302 | kgdb_break[breakno].state = BP_SET; | |
303 | kgdb_break[breakno].type = BP_BREAKPOINT; | |
304 | kgdb_break[breakno].bpt_addr = addr; | |
305 | ||
306 | return 0; | |
307 | } | |
308 | ||
dcc78711 | 309 | int dbg_deactivate_sw_breakpoints(void) |
dc7d5527 JW |
310 | { |
311 | unsigned long addr; | |
7f8b7ed6 JW |
312 | int error; |
313 | int ret = 0; | |
dc7d5527 JW |
314 | int i; |
315 | ||
316 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | |
317 | if (kgdb_break[i].state != BP_ACTIVE) | |
318 | continue; | |
319 | addr = kgdb_break[i].bpt_addr; | |
320 | error = kgdb_arch_remove_breakpoint(addr, | |
321 | kgdb_break[i].saved_instr); | |
7f8b7ed6 JW |
322 | if (error) { |
323 | printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr); | |
324 | ret = error; | |
325 | } | |
dc7d5527 JW |
326 | |
327 | kgdb_flush_swbreak_addr(addr); | |
328 | kgdb_break[i].state = BP_SET; | |
329 | } | |
7f8b7ed6 | 330 | return ret; |
dc7d5527 JW |
331 | } |
332 | ||
53197fc4 | 333 | int dbg_remove_sw_break(unsigned long addr) |
dc7d5527 JW |
334 | { |
335 | int i; | |
336 | ||
337 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | |
338 | if ((kgdb_break[i].state == BP_SET) && | |
339 | (kgdb_break[i].bpt_addr == addr)) { | |
340 | kgdb_break[i].state = BP_REMOVED; | |
341 | return 0; | |
342 | } | |
343 | } | |
344 | return -ENOENT; | |
345 | } | |
346 | ||
347 | int kgdb_isremovedbreak(unsigned long addr) | |
348 | { | |
349 | int i; | |
350 | ||
351 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | |
352 | if ((kgdb_break[i].state == BP_REMOVED) && | |
353 | (kgdb_break[i].bpt_addr == addr)) | |
354 | return 1; | |
355 | } | |
356 | return 0; | |
357 | } | |
358 | ||
53197fc4 | 359 | int dbg_remove_all_break(void) |
dc7d5527 JW |
360 | { |
361 | unsigned long addr; | |
362 | int error; | |
363 | int i; | |
364 | ||
365 | /* Clear memory breakpoints. */ | |
366 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | |
737a460f JW |
367 | if (kgdb_break[i].state != BP_ACTIVE) |
368 | goto setundefined; | |
dc7d5527 JW |
369 | addr = kgdb_break[i].bpt_addr; |
370 | error = kgdb_arch_remove_breakpoint(addr, | |
371 | kgdb_break[i].saved_instr); | |
372 | if (error) | |
737a460f JW |
373 | printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n", |
374 | addr); | |
375 | setundefined: | |
376 | kgdb_break[i].state = BP_UNDEFINED; | |
dc7d5527 JW |
377 | } |
378 | ||
379 | /* Clear hardware breakpoints. */ | |
380 | if (arch_kgdb_ops.remove_all_hw_break) | |
381 | arch_kgdb_ops.remove_all_hw_break(); | |
382 | ||
383 | return 0; | |
384 | } | |
385 | ||
dc7d5527 JW |
386 | /* |
387 | * Return true if there is a valid kgdb I/O module. Also if no | |
388 | * debugger is attached a message can be printed to the console about | |
389 | * waiting for the debugger to attach. | |
390 | * | |
391 | * The print_wait argument is only to be true when called from inside | |
392 | * the core kgdb_handle_exception, because it will wait for the | |
393 | * debugger to attach. | |
394 | */ | |
395 | static int kgdb_io_ready(int print_wait) | |
396 | { | |
53197fc4 | 397 | if (!dbg_io_ops) |
dc7d5527 JW |
398 | return 0; |
399 | if (kgdb_connected) | |
400 | return 1; | |
401 | if (atomic_read(&kgdb_setting_breakpoint)) | |
402 | return 1; | |
dcc78711 JW |
403 | if (print_wait) { |
404 | #ifdef CONFIG_KGDB_KDB | |
405 | if (!dbg_kdb_mode) | |
406 | printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n"); | |
407 | #else | |
dc7d5527 | 408 | printk(KERN_CRIT "KGDB: Waiting for remote debugger\n"); |
dcc78711 JW |
409 | #endif |
410 | } | |
dc7d5527 JW |
411 | return 1; |
412 | } | |
413 | ||
dc7d5527 JW |
414 | static int kgdb_reenter_check(struct kgdb_state *ks) |
415 | { | |
416 | unsigned long addr; | |
417 | ||
418 | if (atomic_read(&kgdb_active) != raw_smp_processor_id()) | |
419 | return 0; | |
420 | ||
421 | /* Panic on recursive debugger calls: */ | |
422 | exception_level++; | |
423 | addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs); | |
dcc78711 | 424 | dbg_deactivate_sw_breakpoints(); |
dc7d5527 JW |
425 | |
426 | /* | |
427 | * If the break point removed ok at the place exception | |
428 | * occurred, try to recover and print a warning to the end | |
429 | * user because the user planted a breakpoint in a place that | |
430 | * KGDB needs in order to function. | |
431 | */ | |
53197fc4 | 432 | if (dbg_remove_sw_break(addr) == 0) { |
dc7d5527 JW |
433 | exception_level = 0; |
434 | kgdb_skipexception(ks->ex_vector, ks->linux_regs); | |
53197fc4 | 435 | dbg_activate_sw_breakpoints(); |
67baf94c JW |
436 | printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n", |
437 | addr); | |
dc7d5527 JW |
438 | WARN_ON_ONCE(1); |
439 | ||
440 | return 1; | |
441 | } | |
53197fc4 | 442 | dbg_remove_all_break(); |
dc7d5527 JW |
443 | kgdb_skipexception(ks->ex_vector, ks->linux_regs); |
444 | ||
445 | if (exception_level > 1) { | |
446 | dump_stack(); | |
447 | panic("Recursive entry to debugger"); | |
448 | } | |
449 | ||
450 | printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n"); | |
6d906340 JW |
451 | #ifdef CONFIG_KGDB_KDB |
452 | /* Allow kdb to debug itself one level */ | |
453 | return 0; | |
454 | #endif | |
dc7d5527 JW |
455 | dump_stack(); |
456 | panic("Recursive entry to debugger"); | |
457 | ||
458 | return 1; | |
459 | } | |
460 | ||
dcc78711 JW |
461 | static void dbg_cpu_switch(int cpu, int next_cpu) |
462 | { | |
463 | /* Mark the cpu we are switching away from as a slave when it | |
464 | * holds the kgdb_active token. This must be done so that the | |
465 | * that all the cpus wait in for the debug core will not enter | |
466 | * again as the master. */ | |
467 | if (cpu == atomic_read(&kgdb_active)) { | |
468 | kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE; | |
469 | kgdb_info[cpu].exception_state &= ~DCPU_WANT_MASTER; | |
470 | } | |
471 | kgdb_info[next_cpu].exception_state |= DCPU_NEXT_MASTER; | |
472 | } | |
473 | ||
16cdc628 JW |
474 | static void dbg_touch_watchdogs(void) |
475 | { | |
476 | touch_softlockup_watchdog_sync(); | |
477 | clocksource_touch_watchdog(); | |
fb70b588 | 478 | rcu_cpu_stall_reset(); |
16cdc628 JW |
479 | } |
480 | ||
62fae312 | 481 | static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs) |
dc7d5527 | 482 | { |
dc7d5527 | 483 | unsigned long flags; |
028e7b17 | 484 | int sstep_tries = 100; |
dcc78711 | 485 | int error; |
dc7d5527 | 486 | int i, cpu; |
4da75b9c | 487 | int trace_on = 0; |
dc7d5527 JW |
488 | acquirelock: |
489 | /* | |
490 | * Interrupts will be restored by the 'trap return' code, except when | |
491 | * single stepping. | |
492 | */ | |
493 | local_irq_save(flags); | |
494 | ||
62fae312 JW |
495 | cpu = ks->cpu; |
496 | kgdb_info[cpu].debuggerinfo = regs; | |
497 | kgdb_info[cpu].task = current; | |
dcc78711 JW |
498 | kgdb_info[cpu].ret_state = 0; |
499 | kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT; | |
62fae312 JW |
500 | /* |
501 | * Make sure the above info reaches the primary CPU before | |
502 | * our cpu_in_kgdb[] flag setting does: | |
503 | */ | |
ae6bf53e | 504 | atomic_inc(&cpu_in_kgdb[cpu]); |
dc7d5527 | 505 | |
6d906340 JW |
506 | if (exception_level == 1) |
507 | goto cpu_master_loop; | |
508 | ||
dc7d5527 | 509 | /* |
62fae312 JW |
510 | * CPU will loop if it is a slave or request to become a kgdb |
511 | * master cpu and acquire the kgdb_active lock: | |
dc7d5527 | 512 | */ |
62fae312 | 513 | while (1) { |
dcc78711 JW |
514 | cpu_loop: |
515 | if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) { | |
516 | kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER; | |
517 | goto cpu_master_loop; | |
518 | } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) { | |
62fae312 JW |
519 | if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu) |
520 | break; | |
521 | } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) { | |
522 | if (!atomic_read(&passive_cpu_wait[cpu])) | |
523 | goto return_normal; | |
524 | } else { | |
525 | return_normal: | |
526 | /* Return to normal operation by executing any | |
527 | * hw breakpoint fixup. | |
528 | */ | |
529 | if (arch_kgdb_ops.correct_hw_break) | |
530 | arch_kgdb_ops.correct_hw_break(); | |
4da75b9c JW |
531 | if (trace_on) |
532 | tracing_on(); | |
ae6bf53e | 533 | atomic_dec(&cpu_in_kgdb[cpu]); |
16cdc628 | 534 | dbg_touch_watchdogs(); |
62fae312 JW |
535 | local_irq_restore(flags); |
536 | return 0; | |
537 | } | |
dc7d5527 | 538 | cpu_relax(); |
62fae312 | 539 | } |
dc7d5527 JW |
540 | |
541 | /* | |
028e7b17 JW |
542 | * For single stepping, try to only enter on the processor |
543 | * that was single stepping. To gaurd against a deadlock, the | |
544 | * kernel will only try for the value of sstep_tries before | |
545 | * giving up and continuing on. | |
dc7d5527 JW |
546 | */ |
547 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1 && | |
028e7b17 JW |
548 | (kgdb_info[cpu].task && |
549 | kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { | |
dc7d5527 | 550 | atomic_set(&kgdb_active, -1); |
16cdc628 | 551 | dbg_touch_watchdogs(); |
dc7d5527 JW |
552 | local_irq_restore(flags); |
553 | ||
554 | goto acquirelock; | |
555 | } | |
556 | ||
557 | if (!kgdb_io_ready(1)) { | |
dcc78711 | 558 | kgdb_info[cpu].ret_state = 1; |
53197fc4 | 559 | goto kgdb_restore; /* No I/O connection, resume the system */ |
dc7d5527 JW |
560 | } |
561 | ||
562 | /* | |
563 | * Don't enter if we have hit a removed breakpoint. | |
564 | */ | |
565 | if (kgdb_skipexception(ks->ex_vector, ks->linux_regs)) | |
566 | goto kgdb_restore; | |
567 | ||
568 | /* Call the I/O driver's pre_exception routine */ | |
53197fc4 JW |
569 | if (dbg_io_ops->pre_exception) |
570 | dbg_io_ops->pre_exception(); | |
dc7d5527 | 571 | |
dc7d5527 JW |
572 | kgdb_disable_hw_debug(ks->linux_regs); |
573 | ||
574 | /* | |
575 | * Get the passive CPU lock which will hold all the non-primary | |
576 | * CPU in a spin state while the debugger is active | |
577 | */ | |
d7161a65 | 578 | if (!kgdb_single_step) { |
dc7d5527 | 579 | for (i = 0; i < NR_CPUS; i++) |
ae6bf53e | 580 | atomic_inc(&passive_cpu_wait[i]); |
dc7d5527 JW |
581 | } |
582 | ||
56fb7093 JW |
583 | #ifdef CONFIG_SMP |
584 | /* Signal the other CPUs to enter kgdb_wait() */ | |
d7161a65 | 585 | if ((!kgdb_single_step) && kgdb_do_roundup) |
56fb7093 JW |
586 | kgdb_roundup_cpus(flags); |
587 | #endif | |
588 | ||
dc7d5527 JW |
589 | /* |
590 | * Wait for the other CPUs to be notified and be waiting for us: | |
591 | */ | |
592 | for_each_online_cpu(i) { | |
dcc78711 | 593 | while (kgdb_do_roundup && !atomic_read(&cpu_in_kgdb[i])) |
dc7d5527 JW |
594 | cpu_relax(); |
595 | } | |
596 | ||
597 | /* | |
598 | * At this point the primary processor is completely | |
599 | * in the debugger and all secondary CPUs are quiescent | |
600 | */ | |
dcc78711 | 601 | dbg_deactivate_sw_breakpoints(); |
dc7d5527 | 602 | kgdb_single_step = 0; |
d7161a65 | 603 | kgdb_contthread = current; |
dc7d5527 | 604 | exception_level = 0; |
4da75b9c JW |
605 | trace_on = tracing_is_on(); |
606 | if (trace_on) | |
607 | tracing_off(); | |
dc7d5527 | 608 | |
dcc78711 JW |
609 | while (1) { |
610 | cpu_master_loop: | |
611 | if (dbg_kdb_mode) { | |
612 | kgdb_connected = 1; | |
613 | error = kdb_stub(ks); | |
3fa43aba JW |
614 | if (error == -1) |
615 | continue; | |
b0679c63 | 616 | kgdb_connected = 0; |
dcc78711 JW |
617 | } else { |
618 | error = gdb_serial_stub(ks); | |
619 | } | |
620 | ||
621 | if (error == DBG_PASS_EVENT) { | |
622 | dbg_kdb_mode = !dbg_kdb_mode; | |
dcc78711 JW |
623 | } else if (error == DBG_SWITCH_CPU_EVENT) { |
624 | dbg_cpu_switch(cpu, dbg_switch_cpu); | |
625 | goto cpu_loop; | |
626 | } else { | |
627 | kgdb_info[cpu].ret_state = error; | |
628 | break; | |
629 | } | |
630 | } | |
dc7d5527 JW |
631 | |
632 | /* Call the I/O driver's post_exception routine */ | |
53197fc4 JW |
633 | if (dbg_io_ops->post_exception) |
634 | dbg_io_ops->post_exception(); | |
dc7d5527 | 635 | |
ae6bf53e | 636 | atomic_dec(&cpu_in_kgdb[ks->cpu]); |
dc7d5527 | 637 | |
d7161a65 | 638 | if (!kgdb_single_step) { |
dc7d5527 | 639 | for (i = NR_CPUS-1; i >= 0; i--) |
ae6bf53e | 640 | atomic_dec(&passive_cpu_wait[i]); |
dc7d5527 | 641 | /* |
dcc78711 JW |
642 | * Wait till all the CPUs have quit from the debugger, |
643 | * but allow a CPU that hit an exception and is | |
644 | * waiting to become the master to remain in the debug | |
645 | * core. | |
dc7d5527 JW |
646 | */ |
647 | for_each_online_cpu(i) { | |
dcc78711 JW |
648 | while (kgdb_do_roundup && |
649 | atomic_read(&cpu_in_kgdb[i]) && | |
650 | !(kgdb_info[i].exception_state & | |
651 | DCPU_WANT_MASTER)) | |
dc7d5527 JW |
652 | cpu_relax(); |
653 | } | |
654 | } | |
655 | ||
656 | kgdb_restore: | |
028e7b17 JW |
657 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { |
658 | int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step); | |
659 | if (kgdb_info[sstep_cpu].task) | |
660 | kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid; | |
661 | else | |
662 | kgdb_sstep_pid = 0; | |
663 | } | |
4da75b9c JW |
664 | if (trace_on) |
665 | tracing_on(); | |
dc7d5527 JW |
666 | /* Free kgdb_active */ |
667 | atomic_set(&kgdb_active, -1); | |
16cdc628 | 668 | dbg_touch_watchdogs(); |
dc7d5527 JW |
669 | local_irq_restore(flags); |
670 | ||
dcc78711 | 671 | return kgdb_info[cpu].ret_state; |
dc7d5527 JW |
672 | } |
673 | ||
62fae312 JW |
674 | /* |
675 | * kgdb_handle_exception() - main entry point from a kernel exception | |
676 | * | |
677 | * Locking hierarchy: | |
678 | * interface locks, if any (begin_session) | |
679 | * kgdb lock (kgdb_active) | |
680 | */ | |
681 | int | |
682 | kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) | |
683 | { | |
684 | struct kgdb_state kgdb_var; | |
685 | struct kgdb_state *ks = &kgdb_var; | |
686 | int ret; | |
687 | ||
688 | ks->cpu = raw_smp_processor_id(); | |
689 | ks->ex_vector = evector; | |
690 | ks->signo = signo; | |
62fae312 JW |
691 | ks->err_code = ecode; |
692 | ks->kgdb_usethreadid = 0; | |
693 | ks->linux_regs = regs; | |
694 | ||
695 | if (kgdb_reenter_check(ks)) | |
696 | return 0; /* Ouch, double exception ! */ | |
697 | kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER; | |
698 | ret = kgdb_cpu_enter(ks, regs); | |
dcc78711 JW |
699 | kgdb_info[ks->cpu].exception_state &= ~(DCPU_WANT_MASTER | |
700 | DCPU_IS_SLAVE); | |
62fae312 JW |
701 | return ret; |
702 | } | |
703 | ||
dc7d5527 JW |
704 | int kgdb_nmicallback(int cpu, void *regs) |
705 | { | |
706 | #ifdef CONFIG_SMP | |
62fae312 JW |
707 | struct kgdb_state kgdb_var; |
708 | struct kgdb_state *ks = &kgdb_var; | |
709 | ||
710 | memset(ks, 0, sizeof(struct kgdb_state)); | |
711 | ks->cpu = cpu; | |
712 | ks->linux_regs = regs; | |
713 | ||
dc7d5527 | 714 | if (!atomic_read(&cpu_in_kgdb[cpu]) && |
62fae312 JW |
715 | atomic_read(&kgdb_active) != -1 && |
716 | atomic_read(&kgdb_active) != cpu) { | |
717 | kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE; | |
718 | kgdb_cpu_enter(ks, regs); | |
719 | kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE; | |
dc7d5527 JW |
720 | return 0; |
721 | } | |
722 | #endif | |
723 | return 1; | |
724 | } | |
725 | ||
aabdc3b8 JW |
726 | static void kgdb_console_write(struct console *co, const char *s, |
727 | unsigned count) | |
dc7d5527 JW |
728 | { |
729 | unsigned long flags; | |
730 | ||
731 | /* If we're debugging, or KGDB has not connected, don't try | |
732 | * and print. */ | |
dcc78711 | 733 | if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode) |
dc7d5527 JW |
734 | return; |
735 | ||
736 | local_irq_save(flags); | |
53197fc4 | 737 | gdbstub_msg_write(s, count); |
dc7d5527 JW |
738 | local_irq_restore(flags); |
739 | } | |
740 | ||
741 | static struct console kgdbcons = { | |
742 | .name = "kgdb", | |
743 | .write = kgdb_console_write, | |
744 | .flags = CON_PRINTBUFFER | CON_ENABLED, | |
745 | .index = -1, | |
746 | }; | |
747 | ||
748 | #ifdef CONFIG_MAGIC_SYSRQ | |
1495cc9d | 749 | static void sysrq_handle_dbg(int key) |
dc7d5527 | 750 | { |
53197fc4 | 751 | if (!dbg_io_ops) { |
dc7d5527 JW |
752 | printk(KERN_CRIT "ERROR: No KGDB I/O module available\n"); |
753 | return; | |
754 | } | |
dcc78711 JW |
755 | if (!kgdb_connected) { |
756 | #ifdef CONFIG_KGDB_KDB | |
757 | if (!dbg_kdb_mode) | |
758 | printk(KERN_CRIT "KGDB or $3#33 for KDB\n"); | |
759 | #else | |
dc7d5527 | 760 | printk(KERN_CRIT "Entering KGDB\n"); |
dcc78711 JW |
761 | #endif |
762 | } | |
dc7d5527 JW |
763 | |
764 | kgdb_breakpoint(); | |
765 | } | |
766 | ||
53197fc4 JW |
767 | static struct sysrq_key_op sysrq_dbg_op = { |
768 | .handler = sysrq_handle_dbg, | |
364b5b7b JW |
769 | .help_msg = "debug(G)", |
770 | .action_msg = "DEBUG", | |
dc7d5527 JW |
771 | }; |
772 | #endif | |
773 | ||
4402c153 JW |
774 | static int kgdb_panic_event(struct notifier_block *self, |
775 | unsigned long val, | |
776 | void *data) | |
777 | { | |
778 | if (dbg_kdb_mode) | |
779 | kdb_printf("PANIC: %s\n", (char *)data); | |
780 | kgdb_breakpoint(); | |
781 | return NOTIFY_DONE; | |
782 | } | |
783 | ||
784 | static struct notifier_block kgdb_panic_event_nb = { | |
785 | .notifier_call = kgdb_panic_event, | |
786 | .priority = INT_MAX, | |
787 | }; | |
788 | ||
0b4b3827 JW |
789 | void __weak kgdb_arch_late(void) |
790 | { | |
791 | } | |
792 | ||
793 | void __init dbg_late_init(void) | |
794 | { | |
795 | dbg_is_early = false; | |
796 | if (kgdb_io_module_registered) | |
797 | kgdb_arch_late(); | |
798 | kdb_init(KDB_INIT_FULL); | |
799 | } | |
800 | ||
dc7d5527 JW |
801 | static void kgdb_register_callbacks(void) |
802 | { | |
803 | if (!kgdb_io_module_registered) { | |
804 | kgdb_io_module_registered = 1; | |
805 | kgdb_arch_init(); | |
0b4b3827 JW |
806 | if (!dbg_is_early) |
807 | kgdb_arch_late(); | |
4402c153 JW |
808 | atomic_notifier_chain_register(&panic_notifier_list, |
809 | &kgdb_panic_event_nb); | |
dc7d5527 | 810 | #ifdef CONFIG_MAGIC_SYSRQ |
53197fc4 | 811 | register_sysrq_key('g', &sysrq_dbg_op); |
dc7d5527 JW |
812 | #endif |
813 | if (kgdb_use_con && !kgdb_con_registered) { | |
814 | register_console(&kgdbcons); | |
815 | kgdb_con_registered = 1; | |
816 | } | |
817 | } | |
818 | } | |
819 | ||
820 | static void kgdb_unregister_callbacks(void) | |
821 | { | |
822 | /* | |
823 | * When this routine is called KGDB should unregister from the | |
824 | * panic handler and clean up, making sure it is not handling any | |
825 | * break exceptions at the time. | |
826 | */ | |
827 | if (kgdb_io_module_registered) { | |
828 | kgdb_io_module_registered = 0; | |
4402c153 JW |
829 | atomic_notifier_chain_unregister(&panic_notifier_list, |
830 | &kgdb_panic_event_nb); | |
dc7d5527 JW |
831 | kgdb_arch_exit(); |
832 | #ifdef CONFIG_MAGIC_SYSRQ | |
53197fc4 | 833 | unregister_sysrq_key('g', &sysrq_dbg_op); |
dc7d5527 JW |
834 | #endif |
835 | if (kgdb_con_registered) { | |
836 | unregister_console(&kgdbcons); | |
837 | kgdb_con_registered = 0; | |
838 | } | |
839 | } | |
840 | } | |
841 | ||
1cee5e35 JW |
842 | /* |
843 | * There are times a tasklet needs to be used vs a compiled in | |
844 | * break point so as to cause an exception outside a kgdb I/O module, | |
845 | * such as is the case with kgdboe, where calling a breakpoint in the | |
846 | * I/O driver itself would be fatal. | |
847 | */ | |
848 | static void kgdb_tasklet_bpt(unsigned long ing) | |
849 | { | |
850 | kgdb_breakpoint(); | |
851 | atomic_set(&kgdb_break_tasklet_var, 0); | |
852 | } | |
853 | ||
854 | static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0); | |
855 | ||
856 | void kgdb_schedule_breakpoint(void) | |
857 | { | |
858 | if (atomic_read(&kgdb_break_tasklet_var) || | |
859 | atomic_read(&kgdb_active) != -1 || | |
860 | atomic_read(&kgdb_setting_breakpoint)) | |
861 | return; | |
862 | atomic_inc(&kgdb_break_tasklet_var); | |
863 | tasklet_schedule(&kgdb_tasklet_breakpoint); | |
864 | } | |
865 | EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint); | |
866 | ||
dc7d5527 JW |
867 | static void kgdb_initial_breakpoint(void) |
868 | { | |
869 | kgdb_break_asap = 0; | |
870 | ||
871 | printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n"); | |
872 | kgdb_breakpoint(); | |
873 | } | |
874 | ||
875 | /** | |
737a460f | 876 | * kgdb_register_io_module - register KGDB IO module |
53197fc4 | 877 | * @new_dbg_io_ops: the io ops vector |
dc7d5527 JW |
878 | * |
879 | * Register it with the KGDB core. | |
880 | */ | |
53197fc4 | 881 | int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops) |
dc7d5527 JW |
882 | { |
883 | int err; | |
884 | ||
885 | spin_lock(&kgdb_registration_lock); | |
886 | ||
53197fc4 | 887 | if (dbg_io_ops) { |
dc7d5527 JW |
888 | spin_unlock(&kgdb_registration_lock); |
889 | ||
890 | printk(KERN_ERR "kgdb: Another I/O driver is already " | |
891 | "registered with KGDB.\n"); | |
892 | return -EBUSY; | |
893 | } | |
894 | ||
53197fc4 JW |
895 | if (new_dbg_io_ops->init) { |
896 | err = new_dbg_io_ops->init(); | |
dc7d5527 JW |
897 | if (err) { |
898 | spin_unlock(&kgdb_registration_lock); | |
899 | return err; | |
900 | } | |
901 | } | |
902 | ||
53197fc4 | 903 | dbg_io_ops = new_dbg_io_ops; |
dc7d5527 JW |
904 | |
905 | spin_unlock(&kgdb_registration_lock); | |
906 | ||
907 | printk(KERN_INFO "kgdb: Registered I/O driver %s.\n", | |
53197fc4 | 908 | new_dbg_io_ops->name); |
dc7d5527 JW |
909 | |
910 | /* Arm KGDB now. */ | |
911 | kgdb_register_callbacks(); | |
912 | ||
913 | if (kgdb_break_asap) | |
914 | kgdb_initial_breakpoint(); | |
915 | ||
916 | return 0; | |
917 | } | |
918 | EXPORT_SYMBOL_GPL(kgdb_register_io_module); | |
919 | ||
920 | /** | |
921 | * kkgdb_unregister_io_module - unregister KGDB IO module | |
53197fc4 | 922 | * @old_dbg_io_ops: the io ops vector |
dc7d5527 JW |
923 | * |
924 | * Unregister it with the KGDB core. | |
925 | */ | |
53197fc4 | 926 | void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops) |
dc7d5527 JW |
927 | { |
928 | BUG_ON(kgdb_connected); | |
929 | ||
930 | /* | |
931 | * KGDB is no longer able to communicate out, so | |
932 | * unregister our callbacks and reset state. | |
933 | */ | |
934 | kgdb_unregister_callbacks(); | |
935 | ||
936 | spin_lock(&kgdb_registration_lock); | |
937 | ||
53197fc4 JW |
938 | WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops); |
939 | dbg_io_ops = NULL; | |
dc7d5527 JW |
940 | |
941 | spin_unlock(&kgdb_registration_lock); | |
942 | ||
943 | printk(KERN_INFO | |
944 | "kgdb: Unregistered I/O driver %s, debugger disabled.\n", | |
53197fc4 | 945 | old_dbg_io_ops->name); |
dc7d5527 JW |
946 | } |
947 | EXPORT_SYMBOL_GPL(kgdb_unregister_io_module); | |
948 | ||
dcc78711 JW |
949 | int dbg_io_get_char(void) |
950 | { | |
951 | int ret = dbg_io_ops->read_char(); | |
f5316b4a JW |
952 | if (ret == NO_POLL_CHAR) |
953 | return -1; | |
dcc78711 JW |
954 | if (!dbg_kdb_mode) |
955 | return ret; | |
956 | if (ret == 127) | |
957 | return 8; | |
958 | return ret; | |
959 | } | |
960 | ||
dc7d5527 JW |
961 | /** |
962 | * kgdb_breakpoint - generate breakpoint exception | |
963 | * | |
964 | * This function will generate a breakpoint exception. It is used at the | |
965 | * beginning of a program to sync up with a debugger and can be used | |
966 | * otherwise as a quick means to stop program execution and "break" into | |
967 | * the debugger. | |
968 | */ | |
969 | void kgdb_breakpoint(void) | |
970 | { | |
ae6bf53e | 971 | atomic_inc(&kgdb_setting_breakpoint); |
dc7d5527 JW |
972 | wmb(); /* Sync point before breakpoint */ |
973 | arch_kgdb_breakpoint(); | |
974 | wmb(); /* Sync point after breakpoint */ | |
ae6bf53e | 975 | atomic_dec(&kgdb_setting_breakpoint); |
dc7d5527 JW |
976 | } |
977 | EXPORT_SYMBOL_GPL(kgdb_breakpoint); | |
978 | ||
979 | static int __init opt_kgdb_wait(char *str) | |
980 | { | |
981 | kgdb_break_asap = 1; | |
982 | ||
dcc78711 | 983 | kdb_init(KDB_INIT_EARLY); |
dc7d5527 JW |
984 | if (kgdb_io_module_registered) |
985 | kgdb_initial_breakpoint(); | |
986 | ||
987 | return 0; | |
988 | } | |
989 | ||
990 | early_param("kgdbwait", opt_kgdb_wait); |