2 * arch/sh/kernel/hw_breakpoint.c
4 * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
6 * Copyright (C) 2009 - 2010 Paul Mundt
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
12 #include <linux/init.h>
13 #include <linux/perf_event.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/percpu.h>
16 #include <linux/kallsyms.h>
17 #include <linux/notifier.h>
18 #include <linux/kprobes.h>
19 #include <linux/kdebug.h>
21 #include <linux/clk.h>
22 #include <asm/hw_breakpoint.h>
23 #include <asm/mmu_context.h>
24 #include <asm/ptrace.h>
27 * Stores the breakpoints currently in use on each breakpoint address
28 * register for each cpus
30 static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
33 * A dummy placeholder for early accesses until the CPUs get a chance to
34 * register their UBCs later in the boot process.
36 static struct sh_ubc ubc_dummy = { .num_events = 0 };
38 static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
41 * Install a perf counter breakpoint.
43 * We seek a free UBC channel and use it for this breakpoint.
45 * Atomic: we hold the counter->ctx->lock and we only handle variables
46 * and registers local to this cpu.
48 int arch_install_hw_breakpoint(struct perf_event *bp)
50 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
53 for (i = 0; i < sh_ubc->num_events; i++) {
54 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
62 if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
65 clk_enable(sh_ubc->clk);
66 sh_ubc->enable(info, i);
72 * Uninstall the breakpoint contained in the given counter.
74 * First we search the debug address register it uses and then we disable
77 * Atomic: we hold the counter->ctx->lock and we only handle variables
78 * and registers local to this cpu.
80 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
82 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
85 for (i = 0; i < sh_ubc->num_events; i++) {
86 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
94 if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
97 sh_ubc->disable(info, i);
98 clk_disable(sh_ubc->clk);
101 static int get_hbp_len(u16 hbp_len)
103 unsigned int len_in_bytes = 0;
106 case SH_BREAKPOINT_LEN_1:
109 case SH_BREAKPOINT_LEN_2:
112 case SH_BREAKPOINT_LEN_4:
115 case SH_BREAKPOINT_LEN_8:
123 * Check for virtual address in user space.
125 int arch_check_va_in_userspace(unsigned long va, u16 hbp_len)
129 len = get_hbp_len(hbp_len);
131 return (va <= TASK_SIZE - len);
135 * Check for virtual address in kernel space.
137 static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
141 len = get_hbp_len(hbp_len);
143 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
147 * Store a breakpoint's encoded address, length, and type.
149 static int arch_store_info(struct perf_event *bp)
151 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
154 * User-space requests will always have the address field populated
155 * For kernel-addresses, either the address or symbol name can be
159 info->address = (unsigned long)kallsyms_lookup_name(info->name);
166 int arch_bp_generic_fields(int sh_len, int sh_type,
167 int *gen_len, int *gen_type)
171 case SH_BREAKPOINT_LEN_1:
172 *gen_len = HW_BREAKPOINT_LEN_1;
174 case SH_BREAKPOINT_LEN_2:
175 *gen_len = HW_BREAKPOINT_LEN_2;
177 case SH_BREAKPOINT_LEN_4:
178 *gen_len = HW_BREAKPOINT_LEN_4;
180 case SH_BREAKPOINT_LEN_8:
181 *gen_len = HW_BREAKPOINT_LEN_8;
189 case SH_BREAKPOINT_READ:
190 *gen_type = HW_BREAKPOINT_R;
191 case SH_BREAKPOINT_WRITE:
192 *gen_type = HW_BREAKPOINT_W;
194 case SH_BREAKPOINT_RW:
195 *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
204 static int arch_build_bp_info(struct perf_event *bp)
206 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
208 info->address = bp->attr.bp_addr;
211 switch (bp->attr.bp_len) {
212 case HW_BREAKPOINT_LEN_1:
213 info->len = SH_BREAKPOINT_LEN_1;
215 case HW_BREAKPOINT_LEN_2:
216 info->len = SH_BREAKPOINT_LEN_2;
218 case HW_BREAKPOINT_LEN_4:
219 info->len = SH_BREAKPOINT_LEN_4;
221 case HW_BREAKPOINT_LEN_8:
222 info->len = SH_BREAKPOINT_LEN_8;
229 switch (bp->attr.bp_type) {
230 case HW_BREAKPOINT_R:
231 info->type = SH_BREAKPOINT_READ;
233 case HW_BREAKPOINT_W:
234 info->type = SH_BREAKPOINT_WRITE;
236 case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
237 info->type = SH_BREAKPOINT_RW;
247 * Validate the arch-specific HW Breakpoint register settings
249 int arch_validate_hwbkpt_settings(struct perf_event *bp,
250 struct task_struct *tsk)
252 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
256 ret = arch_build_bp_info(bp);
263 case SH_BREAKPOINT_LEN_1:
266 case SH_BREAKPOINT_LEN_2:
269 case SH_BREAKPOINT_LEN_4:
272 case SH_BREAKPOINT_LEN_8:
279 ret = arch_store_info(bp);
285 * Check that the low-order bits of the address are appropriate
286 * for the alignment implied by len.
288 if (info->address & align)
291 /* Check that the virtual address is in the proper range */
293 if (!arch_check_va_in_userspace(info->address, info->len))
296 if (!arch_check_va_in_kernelspace(info->address, info->len))
304 * Release the user breakpoints used by ptrace
306 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
309 struct thread_struct *t = &tsk->thread;
311 for (i = 0; i < sh_ubc->num_events; i++) {
312 unregister_hw_breakpoint(t->ptrace_bps[i]);
313 t->ptrace_bps[i] = NULL;
317 static int __kprobes hw_breakpoint_handler(struct die_args *args)
319 int cpu, i, rc = NOTIFY_STOP;
320 struct perf_event *bp;
321 unsigned int cmf, resume_mask;
324 * Do an early return if none of the channels triggered.
326 cmf = sh_ubc->triggered_mask();
331 * By default, resume all of the active channels.
333 resume_mask = sh_ubc->active_mask();
336 * Disable breakpoints during exception handling.
338 sh_ubc->disable_all();
341 for (i = 0; i < sh_ubc->num_events; i++) {
342 unsigned long event_mask = (1 << i);
344 if (likely(!(cmf & event_mask)))
348 * The counter may be concurrently released but that can only
349 * occur from a call_rcu() path. We can then safely fetch
350 * the breakpoint, use its callback, touch its counter
351 * while we are in an rcu_read_lock() path.
355 bp = per_cpu(bp_per_reg[i], cpu);
360 * Reset the condition match flag to denote completion of
361 * exception handling.
363 sh_ubc->clear_triggered_mask(event_mask);
366 * bp can be NULL due to concurrent perf counter
375 * Don't restore the channel if the breakpoint is from
376 * ptrace, as it always operates in one-shot mode.
378 if (bp->overflow_handler == ptrace_triggered)
379 resume_mask &= ~(1 << i);
381 perf_bp_event(bp, args->regs);
383 /* Deliver the signal to userspace */
384 if (arch_check_va_in_userspace(bp->attr.bp_addr,
388 info.si_signo = args->signr;
389 info.si_errno = notifier_to_errno(rc);
390 info.si_code = TRAP_HWBKPT;
392 force_sig_info(args->signr, &info, current);
401 sh_ubc->enable_all(resume_mask);
408 BUILD_TRAP_HANDLER(breakpoint)
410 unsigned long ex = lookup_exception_vector();
413 notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
417 * Handle debug exception notifications.
419 int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
420 unsigned long val, void *data)
422 struct die_args *args = data;
424 if (val != DIE_BREAKPOINT)
428 * If the breakpoint hasn't been triggered by the UBC, it's
429 * probably from a debugger, so don't do anything more here.
431 * This also permits the UBC interface clock to remain off for
432 * non-UBC breakpoints, as we don't need to check the triggered
433 * or active channel masks.
435 if (args->trapnr != sh_ubc->trap_nr)
438 return hw_breakpoint_handler(data);
441 void hw_breakpoint_pmu_read(struct perf_event *bp)
446 void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
451 int register_sh_ubc(struct sh_ubc *ubc)
453 /* Bail if it's already assigned */
454 if (sh_ubc != &ubc_dummy)
458 pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
460 WARN_ON(ubc->num_events > HBP_NUM);