Commit | Line | Data |
---|---|---|
09a07294 PM |
1 | /* |
2 | * arch/sh/kernel/hw_breakpoint.c | |
3 | * | |
4 | * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC. | |
5 | * | |
4352fc1b | 6 | * Copyright (C) 2009 - 2010 Paul Mundt |
09a07294 PM |
7 | * |
8 | * This file is subject to the terms and conditions of the GNU General Public | |
9 | * License. See the file "COPYING" in the main directory of this archive | |
10 | * for more details. | |
11 | */ | |
12 | #include <linux/init.h> | |
13 | #include <linux/perf_event.h> | |
14 | #include <linux/hw_breakpoint.h> | |
15 | #include <linux/percpu.h> | |
16 | #include <linux/kallsyms.h> | |
17 | #include <linux/notifier.h> | |
18 | #include <linux/kprobes.h> | |
19 | #include <linux/kdebug.h> | |
20 | #include <linux/io.h> | |
4352fc1b | 21 | #include <linux/clk.h> |
09a07294 PM |
22 | #include <asm/hw_breakpoint.h> |
23 | #include <asm/mmu_context.h> | |
34d0b5af | 24 | #include <asm/ptrace.h> |
09a07294 | 25 | |
09a07294 PM |
26 | /* |
27 | * Stores the breakpoints currently in use on each breakpoint address | |
28 | * register for each cpus | |
29 | */ | |
30 | static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]); | |
31 | ||
4352fc1b PM |
32 | /* |
33 | * A dummy placeholder for early accesses until the CPUs get a chance to | |
34 | * register their UBCs later in the boot process. | |
35 | */ | |
36 | static struct sh_ubc ubc_dummy = { .num_events = 0 }; | |
09a07294 | 37 | |
4352fc1b | 38 | static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy; |
09a07294 PM |
39 | |
40 | /* | |
41 | * Install a perf counter breakpoint. | |
42 | * | |
43 | * We seek a free UBC channel and use it for this breakpoint. | |
44 | * | |
45 | * Atomic: we hold the counter->ctx->lock and we only handle variables | |
46 | * and registers local to this cpu. | |
47 | */ | |
48 | int arch_install_hw_breakpoint(struct perf_event *bp) | |
49 | { | |
50 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | |
09a07294 PM |
51 | int i; |
52 | ||
4352fc1b | 53 | for (i = 0; i < sh_ubc->num_events; i++) { |
09a07294 PM |
54 | struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); |
55 | ||
56 | if (!*slot) { | |
57 | *slot = bp; | |
58 | break; | |
59 | } | |
60 | } | |
61 | ||
4352fc1b | 62 | if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot")) |
09a07294 PM |
63 | return -EBUSY; |
64 | ||
4352fc1b PM |
65 | clk_enable(sh_ubc->clk); |
66 | sh_ubc->enable(info, i); | |
09a07294 PM |
67 | |
68 | return 0; | |
69 | } | |
70 | ||
71 | /* | |
72 | * Uninstall the breakpoint contained in the given counter. | |
73 | * | |
74 | * First we search the debug address register it uses and then we disable | |
75 | * it. | |
76 | * | |
77 | * Atomic: we hold the counter->ctx->lock and we only handle variables | |
78 | * and registers local to this cpu. | |
79 | */ | |
80 | void arch_uninstall_hw_breakpoint(struct perf_event *bp) | |
81 | { | |
82 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | |
09a07294 PM |
83 | int i; |
84 | ||
4352fc1b | 85 | for (i = 0; i < sh_ubc->num_events; i++) { |
09a07294 PM |
86 | struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); |
87 | ||
88 | if (*slot == bp) { | |
89 | *slot = NULL; | |
90 | break; | |
91 | } | |
92 | } | |
93 | ||
4352fc1b | 94 | if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot")) |
09a07294 PM |
95 | return; |
96 | ||
4352fc1b PM |
97 | sh_ubc->disable(info, i); |
98 | clk_disable(sh_ubc->clk); | |
09a07294 PM |
99 | } |
100 | ||
101 | static int get_hbp_len(u16 hbp_len) | |
102 | { | |
103 | unsigned int len_in_bytes = 0; | |
104 | ||
105 | switch (hbp_len) { | |
106 | case SH_BREAKPOINT_LEN_1: | |
107 | len_in_bytes = 1; | |
108 | break; | |
109 | case SH_BREAKPOINT_LEN_2: | |
110 | len_in_bytes = 2; | |
111 | break; | |
112 | case SH_BREAKPOINT_LEN_4: | |
113 | len_in_bytes = 4; | |
114 | break; | |
115 | case SH_BREAKPOINT_LEN_8: | |
116 | len_in_bytes = 8; | |
117 | break; | |
118 | } | |
119 | return len_in_bytes; | |
120 | } | |
121 | ||
122 | /* | |
123 | * Check for virtual address in user space. | |
124 | */ | |
125 | int arch_check_va_in_userspace(unsigned long va, u16 hbp_len) | |
126 | { | |
127 | unsigned int len; | |
128 | ||
129 | len = get_hbp_len(hbp_len); | |
130 | ||
131 | return (va <= TASK_SIZE - len); | |
132 | } | |
133 | ||
134 | /* | |
135 | * Check for virtual address in kernel space. | |
136 | */ | |
137 | static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len) | |
138 | { | |
139 | unsigned int len; | |
140 | ||
141 | len = get_hbp_len(hbp_len); | |
142 | ||
143 | return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); | |
144 | } | |
145 | ||
09a07294 PM |
146 | int arch_bp_generic_fields(int sh_len, int sh_type, |
147 | int *gen_len, int *gen_type) | |
148 | { | |
149 | /* Len */ | |
150 | switch (sh_len) { | |
151 | case SH_BREAKPOINT_LEN_1: | |
152 | *gen_len = HW_BREAKPOINT_LEN_1; | |
153 | break; | |
154 | case SH_BREAKPOINT_LEN_2: | |
155 | *gen_len = HW_BREAKPOINT_LEN_2; | |
156 | break; | |
157 | case SH_BREAKPOINT_LEN_4: | |
158 | *gen_len = HW_BREAKPOINT_LEN_4; | |
159 | break; | |
160 | case SH_BREAKPOINT_LEN_8: | |
161 | *gen_len = HW_BREAKPOINT_LEN_8; | |
162 | break; | |
163 | default: | |
164 | return -EINVAL; | |
165 | } | |
166 | ||
167 | /* Type */ | |
168 | switch (sh_type) { | |
169 | case SH_BREAKPOINT_READ: | |
170 | *gen_type = HW_BREAKPOINT_R; | |
171 | case SH_BREAKPOINT_WRITE: | |
172 | *gen_type = HW_BREAKPOINT_W; | |
173 | break; | |
174 | case SH_BREAKPOINT_RW: | |
175 | *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; | |
176 | break; | |
177 | default: | |
178 | return -EINVAL; | |
179 | } | |
180 | ||
181 | return 0; | |
182 | } | |
183 | ||
184 | static int arch_build_bp_info(struct perf_event *bp) | |
185 | { | |
186 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | |
187 | ||
188 | info->address = bp->attr.bp_addr; | |
189 | ||
190 | /* Len */ | |
191 | switch (bp->attr.bp_len) { | |
192 | case HW_BREAKPOINT_LEN_1: | |
193 | info->len = SH_BREAKPOINT_LEN_1; | |
194 | break; | |
195 | case HW_BREAKPOINT_LEN_2: | |
196 | info->len = SH_BREAKPOINT_LEN_2; | |
197 | break; | |
198 | case HW_BREAKPOINT_LEN_4: | |
199 | info->len = SH_BREAKPOINT_LEN_4; | |
200 | break; | |
201 | case HW_BREAKPOINT_LEN_8: | |
202 | info->len = SH_BREAKPOINT_LEN_8; | |
203 | break; | |
204 | default: | |
205 | return -EINVAL; | |
206 | } | |
207 | ||
208 | /* Type */ | |
209 | switch (bp->attr.bp_type) { | |
210 | case HW_BREAKPOINT_R: | |
211 | info->type = SH_BREAKPOINT_READ; | |
212 | break; | |
213 | case HW_BREAKPOINT_W: | |
214 | info->type = SH_BREAKPOINT_WRITE; | |
215 | break; | |
216 | case HW_BREAKPOINT_W | HW_BREAKPOINT_R: | |
217 | info->type = SH_BREAKPOINT_RW; | |
218 | break; | |
219 | default: | |
220 | return -EINVAL; | |
221 | } | |
222 | ||
223 | return 0; | |
224 | } | |
225 | ||
226 | /* | |
227 | * Validate the arch-specific HW Breakpoint register settings | |
228 | */ | |
229 | int arch_validate_hwbkpt_settings(struct perf_event *bp, | |
230 | struct task_struct *tsk) | |
231 | { | |
232 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | |
233 | unsigned int align; | |
234 | int ret; | |
235 | ||
236 | ret = arch_build_bp_info(bp); | |
237 | if (ret) | |
238 | return ret; | |
239 | ||
240 | ret = -EINVAL; | |
241 | ||
242 | switch (info->len) { | |
243 | case SH_BREAKPOINT_LEN_1: | |
244 | align = 0; | |
245 | break; | |
246 | case SH_BREAKPOINT_LEN_2: | |
247 | align = 1; | |
248 | break; | |
249 | case SH_BREAKPOINT_LEN_4: | |
250 | align = 3; | |
251 | break; | |
252 | case SH_BREAKPOINT_LEN_8: | |
253 | align = 7; | |
254 | break; | |
255 | default: | |
256 | return ret; | |
257 | } | |
258 | ||
105244ec PM |
259 | /* |
260 | * For kernel-addresses, either the address or symbol name can be | |
261 | * specified. | |
262 | */ | |
263 | if (info->name) | |
264 | info->address = (unsigned long)kallsyms_lookup_name(info->name); | |
09a07294 PM |
265 | |
266 | /* | |
267 | * Check that the low-order bits of the address are appropriate | |
268 | * for the alignment implied by len. | |
269 | */ | |
270 | if (info->address & align) | |
271 | return -EINVAL; | |
272 | ||
273 | /* Check that the virtual address is in the proper range */ | |
274 | if (tsk) { | |
275 | if (!arch_check_va_in_userspace(info->address, info->len)) | |
276 | return -EFAULT; | |
277 | } else { | |
278 | if (!arch_check_va_in_kernelspace(info->address, info->len)) | |
279 | return -EFAULT; | |
280 | } | |
281 | ||
282 | return 0; | |
283 | } | |
284 | ||
285 | /* | |
286 | * Release the user breakpoints used by ptrace | |
287 | */ | |
288 | void flush_ptrace_hw_breakpoint(struct task_struct *tsk) | |
289 | { | |
290 | int i; | |
291 | struct thread_struct *t = &tsk->thread; | |
292 | ||
4352fc1b | 293 | for (i = 0; i < sh_ubc->num_events; i++) { |
09a07294 PM |
294 | unregister_hw_breakpoint(t->ptrace_bps[i]); |
295 | t->ptrace_bps[i] = NULL; | |
296 | } | |
297 | } | |
298 | ||
299 | static int __kprobes hw_breakpoint_handler(struct die_args *args) | |
300 | { | |
301 | int cpu, i, rc = NOTIFY_STOP; | |
302 | struct perf_event *bp; | |
4352fc1b PM |
303 | unsigned int cmf, resume_mask; |
304 | ||
305 | /* | |
306 | * Do an early return if none of the channels triggered. | |
307 | */ | |
308 | cmf = sh_ubc->triggered_mask(); | |
309 | if (unlikely(!cmf)) | |
310 | return NOTIFY_DONE; | |
311 | ||
312 | /* | |
313 | * By default, resume all of the active channels. | |
314 | */ | |
315 | resume_mask = sh_ubc->active_mask(); | |
09a07294 | 316 | |
4352fc1b PM |
317 | /* |
318 | * Disable breakpoints during exception handling. | |
319 | */ | |
320 | sh_ubc->disable_all(); | |
09a07294 PM |
321 | |
322 | cpu = get_cpu(); | |
4352fc1b PM |
323 | for (i = 0; i < sh_ubc->num_events; i++) { |
324 | unsigned long event_mask = (1 << i); | |
325 | ||
326 | if (likely(!(cmf & event_mask))) | |
327 | continue; | |
328 | ||
09a07294 PM |
329 | /* |
330 | * The counter may be concurrently released but that can only | |
331 | * occur from a call_rcu() path. We can then safely fetch | |
332 | * the breakpoint, use its callback, touch its counter | |
333 | * while we are in an rcu_read_lock() path. | |
334 | */ | |
335 | rcu_read_lock(); | |
336 | ||
337 | bp = per_cpu(bp_per_reg[i], cpu); | |
4352fc1b | 338 | if (bp) |
09a07294 | 339 | rc = NOTIFY_DONE; |
4352fc1b PM |
340 | |
341 | /* | |
342 | * Reset the condition match flag to denote completion of | |
343 | * exception handling. | |
344 | */ | |
345 | sh_ubc->clear_triggered_mask(event_mask); | |
346 | ||
347 | /* | |
348 | * bp can be NULL due to concurrent perf counter | |
349 | * removing. | |
350 | */ | |
351 | if (!bp) { | |
09a07294 PM |
352 | rcu_read_unlock(); |
353 | break; | |
354 | } | |
355 | ||
4352fc1b PM |
356 | /* |
357 | * Don't restore the channel if the breakpoint is from | |
358 | * ptrace, as it always operates in one-shot mode. | |
359 | */ | |
360 | if (bp->overflow_handler == ptrace_triggered) | |
361 | resume_mask &= ~(1 << i); | |
362 | ||
a28b460e | 363 | perf_bp_event(bp, args->regs); |
09a07294 | 364 | |
4352fc1b PM |
365 | /* Deliver the signal to userspace */ |
366 | if (arch_check_va_in_userspace(bp->attr.bp_addr, | |
367 | bp->attr.bp_len)) { | |
368 | siginfo_t info; | |
369 | ||
370 | info.si_signo = args->signr; | |
371 | info.si_errno = notifier_to_errno(rc); | |
372 | info.si_code = TRAP_HWBKPT; | |
373 | ||
374 | force_sig_info(args->signr, &info, current); | |
375 | } | |
376 | ||
09a07294 PM |
377 | rcu_read_unlock(); |
378 | } | |
379 | ||
4352fc1b PM |
380 | if (cmf == 0) |
381 | rc = NOTIFY_DONE; | |
09a07294 | 382 | |
4352fc1b | 383 | sh_ubc->enable_all(resume_mask); |
09a07294 PM |
384 | |
385 | put_cpu(); | |
386 | ||
387 | return rc; | |
388 | } | |
389 | ||
390 | BUILD_TRAP_HANDLER(breakpoint) | |
391 | { | |
392 | unsigned long ex = lookup_exception_vector(); | |
393 | TRAP_HANDLER_DECL; | |
394 | ||
4352fc1b | 395 | notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP); |
09a07294 PM |
396 | } |
397 | ||
398 | /* | |
399 | * Handle debug exception notifications. | |
400 | */ | |
401 | int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused, | |
402 | unsigned long val, void *data) | |
403 | { | |
b74ab703 PM |
404 | struct die_args *args = data; |
405 | ||
09a07294 PM |
406 | if (val != DIE_BREAKPOINT) |
407 | return NOTIFY_DONE; | |
408 | ||
b74ab703 PM |
409 | /* |
410 | * If the breakpoint hasn't been triggered by the UBC, it's | |
411 | * probably from a debugger, so don't do anything more here. | |
4352fc1b PM |
412 | * |
413 | * This also permits the UBC interface clock to remain off for | |
414 | * non-UBC breakpoints, as we don't need to check the triggered | |
415 | * or active channel masks. | |
b74ab703 | 416 | */ |
4352fc1b | 417 | if (args->trapnr != sh_ubc->trap_nr) |
b74ab703 PM |
418 | return NOTIFY_DONE; |
419 | ||
09a07294 PM |
420 | return hw_breakpoint_handler(data); |
421 | } | |
422 | ||
423 | void hw_breakpoint_pmu_read(struct perf_event *bp) | |
424 | { | |
425 | /* TODO */ | |
426 | } | |
427 | ||
4352fc1b PM |
428 | int register_sh_ubc(struct sh_ubc *ubc) |
429 | { | |
430 | /* Bail if it's already assigned */ | |
431 | if (sh_ubc != &ubc_dummy) | |
432 | return -EBUSY; | |
433 | sh_ubc = ubc; | |
434 | ||
435 | pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name); | |
436 | ||
437 | WARN_ON(ubc->num_events > HBP_NUM); | |
438 | ||
439 | return 0; | |
440 | } |