Merge branch 'master' into sh/hw-breakpoints
[linux-2.6-block.git] / arch / sh / kernel / hw_breakpoint.c
CommitLineData
09a07294
PM
1/*
2 * arch/sh/kernel/hw_breakpoint.c
3 *
4 * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
5 *
6 * Copyright (C) 2009 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/perf_event.h>
14#include <linux/hw_breakpoint.h>
15#include <linux/percpu.h>
16#include <linux/kallsyms.h>
17#include <linux/notifier.h>
18#include <linux/kprobes.h>
19#include <linux/kdebug.h>
20#include <linux/io.h>
21#include <asm/hw_breakpoint.h>
22#include <asm/mmu_context.h>
23
24struct ubc_context {
25 unsigned long pc;
26 unsigned long state;
27};
28
29/* Per cpu ubc channel state */
30static DEFINE_PER_CPU(struct ubc_context, ubc_ctx[HBP_NUM]);
31
32/*
33 * Stores the breakpoints currently in use on each breakpoint address
34 * register for each cpus
35 */
36static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
37
38static int __init ubc_init(void)
39{
40 __raw_writel(0, UBC_CAMR0);
41 __raw_writel(0, UBC_CBR0);
42 __raw_writel(0, UBC_CBCR);
43
44 __raw_writel(UBC_CRR_BIE | UBC_CRR_PCB, UBC_CRR0);
45
46 /* dummy read for write posting */
47 (void)__raw_readl(UBC_CRR0);
48
49 return 0;
50}
51arch_initcall(ubc_init);
52
53/*
54 * Install a perf counter breakpoint.
55 *
56 * We seek a free UBC channel and use it for this breakpoint.
57 *
58 * Atomic: we hold the counter->ctx->lock and we only handle variables
59 * and registers local to this cpu.
60 */
61int arch_install_hw_breakpoint(struct perf_event *bp)
62{
63 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
64 struct ubc_context *ubc_ctx;
65 int i;
66
67 for (i = 0; i < HBP_NUM; i++) {
68 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
69
70 if (!*slot) {
71 *slot = bp;
72 break;
73 }
74 }
75
76 if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
77 return -EBUSY;
78
79 ubc_ctx = &__get_cpu_var(ubc_ctx[i]);
80
81 ubc_ctx->pc = info->address;
82 ubc_ctx->state = info->len | info->type;
83
84 __raw_writel(UBC_CBR_CE | ubc_ctx->state, UBC_CBR0);
85 __raw_writel(ubc_ctx->pc, UBC_CAR0);
86
87 return 0;
88}
89
90/*
91 * Uninstall the breakpoint contained in the given counter.
92 *
93 * First we search the debug address register it uses and then we disable
94 * it.
95 *
96 * Atomic: we hold the counter->ctx->lock and we only handle variables
97 * and registers local to this cpu.
98 */
99void arch_uninstall_hw_breakpoint(struct perf_event *bp)
100{
101 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
102 struct ubc_context *ubc_ctx;
103 int i;
104
105 for (i = 0; i < HBP_NUM; i++) {
106 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
107
108 if (*slot == bp) {
109 *slot = NULL;
110 break;
111 }
112 }
113
114 if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
115 return;
116
117 ubc_ctx = &__get_cpu_var(ubc_ctx[i]);
118 ubc_ctx->pc = 0;
119 ubc_ctx->state &= ~(info->len | info->type);
120
121 __raw_writel(ubc_ctx->pc, UBC_CBR0);
122 __raw_writel(ubc_ctx->state, UBC_CAR0);
123}
124
125static int get_hbp_len(u16 hbp_len)
126{
127 unsigned int len_in_bytes = 0;
128
129 switch (hbp_len) {
130 case SH_BREAKPOINT_LEN_1:
131 len_in_bytes = 1;
132 break;
133 case SH_BREAKPOINT_LEN_2:
134 len_in_bytes = 2;
135 break;
136 case SH_BREAKPOINT_LEN_4:
137 len_in_bytes = 4;
138 break;
139 case SH_BREAKPOINT_LEN_8:
140 len_in_bytes = 8;
141 break;
142 }
143 return len_in_bytes;
144}
145
146/*
147 * Check for virtual address in user space.
148 */
149int arch_check_va_in_userspace(unsigned long va, u16 hbp_len)
150{
151 unsigned int len;
152
153 len = get_hbp_len(hbp_len);
154
155 return (va <= TASK_SIZE - len);
156}
157
158/*
159 * Check for virtual address in kernel space.
160 */
161static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
162{
163 unsigned int len;
164
165 len = get_hbp_len(hbp_len);
166
167 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
168}
169
170/*
171 * Store a breakpoint's encoded address, length, and type.
172 */
173static int arch_store_info(struct perf_event *bp)
174{
175 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
176
177 /*
178 * User-space requests will always have the address field populated
179 * For kernel-addresses, either the address or symbol name can be
180 * specified.
181 */
182 if (info->name)
183 info->address = (unsigned long)kallsyms_lookup_name(info->name);
184 if (info->address) {
185 info->asid = get_asid();
186 return 0;
187 }
188
189 return -EINVAL;
190}
191
192int arch_bp_generic_fields(int sh_len, int sh_type,
193 int *gen_len, int *gen_type)
194{
195 /* Len */
196 switch (sh_len) {
197 case SH_BREAKPOINT_LEN_1:
198 *gen_len = HW_BREAKPOINT_LEN_1;
199 break;
200 case SH_BREAKPOINT_LEN_2:
201 *gen_len = HW_BREAKPOINT_LEN_2;
202 break;
203 case SH_BREAKPOINT_LEN_4:
204 *gen_len = HW_BREAKPOINT_LEN_4;
205 break;
206 case SH_BREAKPOINT_LEN_8:
207 *gen_len = HW_BREAKPOINT_LEN_8;
208 break;
209 default:
210 return -EINVAL;
211 }
212
213 /* Type */
214 switch (sh_type) {
215 case SH_BREAKPOINT_READ:
216 *gen_type = HW_BREAKPOINT_R;
217 case SH_BREAKPOINT_WRITE:
218 *gen_type = HW_BREAKPOINT_W;
219 break;
220 case SH_BREAKPOINT_RW:
221 *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
222 break;
223 default:
224 return -EINVAL;
225 }
226
227 return 0;
228}
229
230static int arch_build_bp_info(struct perf_event *bp)
231{
232 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
233
234 info->address = bp->attr.bp_addr;
235
236 /* Len */
237 switch (bp->attr.bp_len) {
238 case HW_BREAKPOINT_LEN_1:
239 info->len = SH_BREAKPOINT_LEN_1;
240 break;
241 case HW_BREAKPOINT_LEN_2:
242 info->len = SH_BREAKPOINT_LEN_2;
243 break;
244 case HW_BREAKPOINT_LEN_4:
245 info->len = SH_BREAKPOINT_LEN_4;
246 break;
247 case HW_BREAKPOINT_LEN_8:
248 info->len = SH_BREAKPOINT_LEN_8;
249 break;
250 default:
251 return -EINVAL;
252 }
253
254 /* Type */
255 switch (bp->attr.bp_type) {
256 case HW_BREAKPOINT_R:
257 info->type = SH_BREAKPOINT_READ;
258 break;
259 case HW_BREAKPOINT_W:
260 info->type = SH_BREAKPOINT_WRITE;
261 break;
262 case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
263 info->type = SH_BREAKPOINT_RW;
264 break;
265 default:
266 return -EINVAL;
267 }
268
269 return 0;
270}
271
272/*
273 * Validate the arch-specific HW Breakpoint register settings
274 */
275int arch_validate_hwbkpt_settings(struct perf_event *bp,
276 struct task_struct *tsk)
277{
278 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
279 unsigned int align;
280 int ret;
281
282 ret = arch_build_bp_info(bp);
283 if (ret)
284 return ret;
285
286 ret = -EINVAL;
287
288 switch (info->len) {
289 case SH_BREAKPOINT_LEN_1:
290 align = 0;
291 break;
292 case SH_BREAKPOINT_LEN_2:
293 align = 1;
294 break;
295 case SH_BREAKPOINT_LEN_4:
296 align = 3;
297 break;
298 case SH_BREAKPOINT_LEN_8:
299 align = 7;
300 break;
301 default:
302 return ret;
303 }
304
305 if (bp->callback)
306 ret = arch_store_info(bp);
307
308 if (ret < 0)
309 return ret;
310
311 /*
312 * Check that the low-order bits of the address are appropriate
313 * for the alignment implied by len.
314 */
315 if (info->address & align)
316 return -EINVAL;
317
318 /* Check that the virtual address is in the proper range */
319 if (tsk) {
320 if (!arch_check_va_in_userspace(info->address, info->len))
321 return -EFAULT;
322 } else {
323 if (!arch_check_va_in_kernelspace(info->address, info->len))
324 return -EFAULT;
325 }
326
327 return 0;
328}
329
330/*
331 * Release the user breakpoints used by ptrace
332 */
333void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
334{
335 int i;
336 struct thread_struct *t = &tsk->thread;
337
338 for (i = 0; i < HBP_NUM; i++) {
339 unregister_hw_breakpoint(t->ptrace_bps[i]);
340 t->ptrace_bps[i] = NULL;
341 }
342}
343
344static int __kprobes hw_breakpoint_handler(struct die_args *args)
345{
346 int cpu, i, rc = NOTIFY_STOP;
347 struct perf_event *bp;
348 unsigned long val;
349
350 val = __raw_readl(UBC_CBR0);
351 __raw_writel(val & ~UBC_CBR_CE, UBC_CBR0);
352
353 cpu = get_cpu();
354 for (i = 0; i < HBP_NUM; i++) {
355 /*
356 * The counter may be concurrently released but that can only
357 * occur from a call_rcu() path. We can then safely fetch
358 * the breakpoint, use its callback, touch its counter
359 * while we are in an rcu_read_lock() path.
360 */
361 rcu_read_lock();
362
363 bp = per_cpu(bp_per_reg[i], cpu);
364 if (bp) {
365 rc = NOTIFY_DONE;
366 } else {
367 rcu_read_unlock();
368 break;
369 }
370
371 (bp->callback)(bp, args->regs);
372
373 rcu_read_unlock();
374 }
375
376 if (bp) {
377 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
378
379 __raw_writel(UBC_CBR_CE | info->len | info->type, UBC_CBR0);
380 __raw_writel(info->address, UBC_CAR0);
381 }
382
383 put_cpu();
384
385 return rc;
386}
387
388BUILD_TRAP_HANDLER(breakpoint)
389{
390 unsigned long ex = lookup_exception_vector();
391 TRAP_HANDLER_DECL;
392
393 notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
394}
395
396/*
397 * Handle debug exception notifications.
398 */
399int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
400 unsigned long val, void *data)
401{
402 if (val != DIE_BREAKPOINT)
403 return NOTIFY_DONE;
404
405 return hw_breakpoint_handler(data);
406}
407
408void hw_breakpoint_pmu_read(struct perf_event *bp)
409{
410 /* TODO */
411}
412
413void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
414{
415 /* TODO */
416}