Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6
[linux-2.6-block.git] / arch / sh / kernel / hw_breakpoint.c
CommitLineData
09a07294
PM
1/*
2 * arch/sh/kernel/hw_breakpoint.c
3 *
4 * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
5 *
4352fc1b 6 * Copyright (C) 2009 - 2010 Paul Mundt
09a07294
PM
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/perf_event.h>
14#include <linux/hw_breakpoint.h>
15#include <linux/percpu.h>
16#include <linux/kallsyms.h>
17#include <linux/notifier.h>
18#include <linux/kprobes.h>
19#include <linux/kdebug.h>
20#include <linux/io.h>
4352fc1b 21#include <linux/clk.h>
09a07294
PM
22#include <asm/hw_breakpoint.h>
23#include <asm/mmu_context.h>
34d0b5af 24#include <asm/ptrace.h>
09a07294 25
09a07294
PM
26/*
27 * Stores the breakpoints currently in use on each breakpoint address
28 * register for each cpus
29 */
30static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
31
4352fc1b
PM
32/*
33 * A dummy placeholder for early accesses until the CPUs get a chance to
34 * register their UBCs later in the boot process.
35 */
36static struct sh_ubc ubc_dummy = { .num_events = 0 };
09a07294 37
4352fc1b 38static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
09a07294
PM
39
40/*
41 * Install a perf counter breakpoint.
42 *
43 * We seek a free UBC channel and use it for this breakpoint.
44 *
45 * Atomic: we hold the counter->ctx->lock and we only handle variables
46 * and registers local to this cpu.
47 */
48int arch_install_hw_breakpoint(struct perf_event *bp)
49{
50 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
09a07294
PM
51 int i;
52
4352fc1b 53 for (i = 0; i < sh_ubc->num_events; i++) {
09a07294
PM
54 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
55
56 if (!*slot) {
57 *slot = bp;
58 break;
59 }
60 }
61
4352fc1b 62 if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
09a07294
PM
63 return -EBUSY;
64
4352fc1b
PM
65 clk_enable(sh_ubc->clk);
66 sh_ubc->enable(info, i);
09a07294
PM
67
68 return 0;
69}
70
71/*
72 * Uninstall the breakpoint contained in the given counter.
73 *
74 * First we search the debug address register it uses and then we disable
75 * it.
76 *
77 * Atomic: we hold the counter->ctx->lock and we only handle variables
78 * and registers local to this cpu.
79 */
80void arch_uninstall_hw_breakpoint(struct perf_event *bp)
81{
82 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
09a07294
PM
83 int i;
84
4352fc1b 85 for (i = 0; i < sh_ubc->num_events; i++) {
09a07294
PM
86 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
87
88 if (*slot == bp) {
89 *slot = NULL;
90 break;
91 }
92 }
93
4352fc1b 94 if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
09a07294
PM
95 return;
96
4352fc1b
PM
97 sh_ubc->disable(info, i);
98 clk_disable(sh_ubc->clk);
09a07294
PM
99}
100
101static int get_hbp_len(u16 hbp_len)
102{
103 unsigned int len_in_bytes = 0;
104
105 switch (hbp_len) {
106 case SH_BREAKPOINT_LEN_1:
107 len_in_bytes = 1;
108 break;
109 case SH_BREAKPOINT_LEN_2:
110 len_in_bytes = 2;
111 break;
112 case SH_BREAKPOINT_LEN_4:
113 len_in_bytes = 4;
114 break;
115 case SH_BREAKPOINT_LEN_8:
116 len_in_bytes = 8;
117 break;
118 }
119 return len_in_bytes;
120}
121
09a07294
PM
122/*
123 * Check for virtual address in kernel space.
124 */
b2812d03 125int arch_check_bp_in_kernelspace(struct perf_event *bp)
09a07294
PM
126{
127 unsigned int len;
b2812d03
FW
128 unsigned long va;
129 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
09a07294 130
b2812d03
FW
131 va = info->address;
132 len = get_hbp_len(info->len);
09a07294
PM
133
134 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
135}
136
09a07294
PM
137int arch_bp_generic_fields(int sh_len, int sh_type,
138 int *gen_len, int *gen_type)
139{
140 /* Len */
141 switch (sh_len) {
142 case SH_BREAKPOINT_LEN_1:
143 *gen_len = HW_BREAKPOINT_LEN_1;
144 break;
145 case SH_BREAKPOINT_LEN_2:
146 *gen_len = HW_BREAKPOINT_LEN_2;
147 break;
148 case SH_BREAKPOINT_LEN_4:
149 *gen_len = HW_BREAKPOINT_LEN_4;
150 break;
151 case SH_BREAKPOINT_LEN_8:
152 *gen_len = HW_BREAKPOINT_LEN_8;
153 break;
154 default:
155 return -EINVAL;
156 }
157
158 /* Type */
159 switch (sh_type) {
160 case SH_BREAKPOINT_READ:
161 *gen_type = HW_BREAKPOINT_R;
162 case SH_BREAKPOINT_WRITE:
163 *gen_type = HW_BREAKPOINT_W;
164 break;
165 case SH_BREAKPOINT_RW:
166 *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
167 break;
168 default:
169 return -EINVAL;
170 }
171
172 return 0;
173}
174
175static int arch_build_bp_info(struct perf_event *bp)
176{
177 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
178
179 info->address = bp->attr.bp_addr;
180
181 /* Len */
182 switch (bp->attr.bp_len) {
183 case HW_BREAKPOINT_LEN_1:
184 info->len = SH_BREAKPOINT_LEN_1;
185 break;
186 case HW_BREAKPOINT_LEN_2:
187 info->len = SH_BREAKPOINT_LEN_2;
188 break;
189 case HW_BREAKPOINT_LEN_4:
190 info->len = SH_BREAKPOINT_LEN_4;
191 break;
192 case HW_BREAKPOINT_LEN_8:
193 info->len = SH_BREAKPOINT_LEN_8;
194 break;
195 default:
196 return -EINVAL;
197 }
198
199 /* Type */
200 switch (bp->attr.bp_type) {
201 case HW_BREAKPOINT_R:
202 info->type = SH_BREAKPOINT_READ;
203 break;
204 case HW_BREAKPOINT_W:
205 info->type = SH_BREAKPOINT_WRITE;
206 break;
207 case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
208 info->type = SH_BREAKPOINT_RW;
209 break;
210 default:
211 return -EINVAL;
212 }
213
214 return 0;
215}
216
217/*
218 * Validate the arch-specific HW Breakpoint register settings
219 */
b2812d03 220int arch_validate_hwbkpt_settings(struct perf_event *bp)
09a07294
PM
221{
222 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
223 unsigned int align;
224 int ret;
225
226 ret = arch_build_bp_info(bp);
227 if (ret)
228 return ret;
229
230 ret = -EINVAL;
231
232 switch (info->len) {
233 case SH_BREAKPOINT_LEN_1:
234 align = 0;
235 break;
236 case SH_BREAKPOINT_LEN_2:
237 align = 1;
238 break;
239 case SH_BREAKPOINT_LEN_4:
240 align = 3;
241 break;
242 case SH_BREAKPOINT_LEN_8:
243 align = 7;
244 break;
245 default:
246 return ret;
247 }
248
105244ec
PM
249 /*
250 * For kernel-addresses, either the address or symbol name can be
251 * specified.
252 */
253 if (info->name)
254 info->address = (unsigned long)kallsyms_lookup_name(info->name);
09a07294
PM
255
256 /*
257 * Check that the low-order bits of the address are appropriate
258 * for the alignment implied by len.
259 */
260 if (info->address & align)
261 return -EINVAL;
262
09a07294
PM
263 return 0;
264}
265
266/*
267 * Release the user breakpoints used by ptrace
268 */
269void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
270{
271 int i;
272 struct thread_struct *t = &tsk->thread;
273
4352fc1b 274 for (i = 0; i < sh_ubc->num_events; i++) {
09a07294
PM
275 unregister_hw_breakpoint(t->ptrace_bps[i]);
276 t->ptrace_bps[i] = NULL;
277 }
278}
279
280static int __kprobes hw_breakpoint_handler(struct die_args *args)
281{
282 int cpu, i, rc = NOTIFY_STOP;
283 struct perf_event *bp;
4352fc1b
PM
284 unsigned int cmf, resume_mask;
285
286 /*
287 * Do an early return if none of the channels triggered.
288 */
289 cmf = sh_ubc->triggered_mask();
290 if (unlikely(!cmf))
291 return NOTIFY_DONE;
292
293 /*
294 * By default, resume all of the active channels.
295 */
296 resume_mask = sh_ubc->active_mask();
09a07294 297
4352fc1b
PM
298 /*
299 * Disable breakpoints during exception handling.
300 */
301 sh_ubc->disable_all();
09a07294
PM
302
303 cpu = get_cpu();
4352fc1b
PM
304 for (i = 0; i < sh_ubc->num_events; i++) {
305 unsigned long event_mask = (1 << i);
306
307 if (likely(!(cmf & event_mask)))
308 continue;
309
09a07294
PM
310 /*
311 * The counter may be concurrently released but that can only
312 * occur from a call_rcu() path. We can then safely fetch
313 * the breakpoint, use its callback, touch its counter
314 * while we are in an rcu_read_lock() path.
315 */
316 rcu_read_lock();
317
318 bp = per_cpu(bp_per_reg[i], cpu);
4352fc1b 319 if (bp)
09a07294 320 rc = NOTIFY_DONE;
4352fc1b
PM
321
322 /*
323 * Reset the condition match flag to denote completion of
324 * exception handling.
325 */
326 sh_ubc->clear_triggered_mask(event_mask);
327
328 /*
329 * bp can be NULL due to concurrent perf counter
330 * removing.
331 */
332 if (!bp) {
09a07294
PM
333 rcu_read_unlock();
334 break;
335 }
336
4352fc1b
PM
337 /*
338 * Don't restore the channel if the breakpoint is from
339 * ptrace, as it always operates in one-shot mode.
340 */
341 if (bp->overflow_handler == ptrace_triggered)
342 resume_mask &= ~(1 << i);
343
a28b460e 344 perf_bp_event(bp, args->regs);
09a07294 345
4352fc1b 346 /* Deliver the signal to userspace */
b2812d03 347 if (!arch_check_bp_in_kernelspace(bp)) {
4352fc1b
PM
348 siginfo_t info;
349
350 info.si_signo = args->signr;
351 info.si_errno = notifier_to_errno(rc);
352 info.si_code = TRAP_HWBKPT;
353
354 force_sig_info(args->signr, &info, current);
355 }
356
09a07294
PM
357 rcu_read_unlock();
358 }
359
4352fc1b
PM
360 if (cmf == 0)
361 rc = NOTIFY_DONE;
09a07294 362
4352fc1b 363 sh_ubc->enable_all(resume_mask);
09a07294
PM
364
365 put_cpu();
366
367 return rc;
368}
369
370BUILD_TRAP_HANDLER(breakpoint)
371{
372 unsigned long ex = lookup_exception_vector();
373 TRAP_HANDLER_DECL;
374
4352fc1b 375 notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
09a07294
PM
376}
377
378/*
379 * Handle debug exception notifications.
380 */
381int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
382 unsigned long val, void *data)
383{
b74ab703
PM
384 struct die_args *args = data;
385
09a07294
PM
386 if (val != DIE_BREAKPOINT)
387 return NOTIFY_DONE;
388
b74ab703
PM
389 /*
390 * If the breakpoint hasn't been triggered by the UBC, it's
391 * probably from a debugger, so don't do anything more here.
4352fc1b
PM
392 *
393 * This also permits the UBC interface clock to remain off for
394 * non-UBC breakpoints, as we don't need to check the triggered
395 * or active channel masks.
b74ab703 396 */
4352fc1b 397 if (args->trapnr != sh_ubc->trap_nr)
b74ab703
PM
398 return NOTIFY_DONE;
399
09a07294
PM
400 return hw_breakpoint_handler(data);
401}
402
403void hw_breakpoint_pmu_read(struct perf_event *bp)
404{
405 /* TODO */
406}
407
4352fc1b
PM
408int register_sh_ubc(struct sh_ubc *ubc)
409{
410 /* Bail if it's already assigned */
411 if (sh_ubc != &ubc_dummy)
412 return -EBUSY;
413 sh_ubc = ubc;
414
415 pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
416
417 WARN_ON(ubc->num_events > HBP_NUM);
418
419 return 0;
420}