Merge tag 'arm64-perf' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[linux-2.6-block.git] / arch / arm64 / kernel / hw_breakpoint.c
CommitLineData
478fcb2c
WD
1/*
2 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
3 * using the CPU's debug registers.
4 *
5 * Copyright (C) 2012 ARM Limited
6 * Author: Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#define pr_fmt(fmt) "hw-breakpoint: " fmt
22
fd92d4a5 23#include <linux/compat.h>
60fc6942 24#include <linux/cpu_pm.h>
478fcb2c
WD
25#include <linux/errno.h>
26#include <linux/hw_breakpoint.h>
27#include <linux/perf_event.h>
28#include <linux/ptrace.h>
29#include <linux/smp.h>
30
cb50ce32 31#include <asm/compat.h>
478fcb2c
WD
32#include <asm/current.h>
33#include <asm/debug-monitors.h>
34#include <asm/hw_breakpoint.h>
478fcb2c
WD
35#include <asm/traps.h>
36#include <asm/cputype.h>
37#include <asm/system_misc.h>
38
39/* Breakpoint currently in use for each BRP. */
40static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
41
42/* Watchpoint currently in use for each WRP. */
43static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
44
45/* Currently stepping a per-CPU kernel breakpoint. */
46static DEFINE_PER_CPU(int, stepping_kernel_bp);
47
48/* Number of BRP/WRP registers on this CPU. */
49static int core_num_brps;
50static int core_num_wrps;
51
478fcb2c
WD
52int hw_breakpoint_slots(int type)
53{
54 /*
55 * We can be called early, so don't rely on
56 * our static variables being initialised.
57 */
58 switch (type) {
59 case TYPE_INST:
60 return get_num_brps();
61 case TYPE_DATA:
62 return get_num_wrps();
63 default:
64 pr_warning("unknown slot type: %d\n", type);
65 return 0;
66 }
67}
68
69#define READ_WB_REG_CASE(OFF, N, REG, VAL) \
70 case (OFF + N): \
71 AARCH64_DBG_READ(N, REG, VAL); \
72 break
73
74#define WRITE_WB_REG_CASE(OFF, N, REG, VAL) \
75 case (OFF + N): \
76 AARCH64_DBG_WRITE(N, REG, VAL); \
77 break
78
79#define GEN_READ_WB_REG_CASES(OFF, REG, VAL) \
80 READ_WB_REG_CASE(OFF, 0, REG, VAL); \
81 READ_WB_REG_CASE(OFF, 1, REG, VAL); \
82 READ_WB_REG_CASE(OFF, 2, REG, VAL); \
83 READ_WB_REG_CASE(OFF, 3, REG, VAL); \
84 READ_WB_REG_CASE(OFF, 4, REG, VAL); \
85 READ_WB_REG_CASE(OFF, 5, REG, VAL); \
86 READ_WB_REG_CASE(OFF, 6, REG, VAL); \
87 READ_WB_REG_CASE(OFF, 7, REG, VAL); \
88 READ_WB_REG_CASE(OFF, 8, REG, VAL); \
89 READ_WB_REG_CASE(OFF, 9, REG, VAL); \
90 READ_WB_REG_CASE(OFF, 10, REG, VAL); \
91 READ_WB_REG_CASE(OFF, 11, REG, VAL); \
92 READ_WB_REG_CASE(OFF, 12, REG, VAL); \
93 READ_WB_REG_CASE(OFF, 13, REG, VAL); \
94 READ_WB_REG_CASE(OFF, 14, REG, VAL); \
95 READ_WB_REG_CASE(OFF, 15, REG, VAL)
96
97#define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL) \
98 WRITE_WB_REG_CASE(OFF, 0, REG, VAL); \
99 WRITE_WB_REG_CASE(OFF, 1, REG, VAL); \
100 WRITE_WB_REG_CASE(OFF, 2, REG, VAL); \
101 WRITE_WB_REG_CASE(OFF, 3, REG, VAL); \
102 WRITE_WB_REG_CASE(OFF, 4, REG, VAL); \
103 WRITE_WB_REG_CASE(OFF, 5, REG, VAL); \
104 WRITE_WB_REG_CASE(OFF, 6, REG, VAL); \
105 WRITE_WB_REG_CASE(OFF, 7, REG, VAL); \
106 WRITE_WB_REG_CASE(OFF, 8, REG, VAL); \
107 WRITE_WB_REG_CASE(OFF, 9, REG, VAL); \
108 WRITE_WB_REG_CASE(OFF, 10, REG, VAL); \
109 WRITE_WB_REG_CASE(OFF, 11, REG, VAL); \
110 WRITE_WB_REG_CASE(OFF, 12, REG, VAL); \
111 WRITE_WB_REG_CASE(OFF, 13, REG, VAL); \
112 WRITE_WB_REG_CASE(OFF, 14, REG, VAL); \
113 WRITE_WB_REG_CASE(OFF, 15, REG, VAL)
114
115static u64 read_wb_reg(int reg, int n)
116{
117 u64 val = 0;
118
119 switch (reg + n) {
120 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
121 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
122 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
123 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
124 default:
125 pr_warning("attempt to read from unknown breakpoint register %d\n", n);
126 }
127
128 return val;
129}
130
131static void write_wb_reg(int reg, int n, u64 val)
132{
133 switch (reg + n) {
134 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
135 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
136 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
137 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
138 default:
139 pr_warning("attempt to write to unknown breakpoint register %d\n", n);
140 }
141 isb();
142}
143
144/*
145 * Convert a breakpoint privilege level to the corresponding exception
146 * level.
147 */
6f883d10 148static enum dbg_active_el debug_exception_level(int privilege)
478fcb2c
WD
149{
150 switch (privilege) {
151 case AARCH64_BREAKPOINT_EL0:
152 return DBG_ACTIVE_EL0;
153 case AARCH64_BREAKPOINT_EL1:
154 return DBG_ACTIVE_EL1;
155 default:
156 pr_warning("invalid breakpoint privilege level %d\n", privilege);
157 return -EINVAL;
158 }
159}
160
2f043045
LP
161enum hw_breakpoint_ops {
162 HW_BREAKPOINT_INSTALL,
60fc6942
LP
163 HW_BREAKPOINT_UNINSTALL,
164 HW_BREAKPOINT_RESTORE
2f043045
LP
165};
166
8f48c062
WD
167static int is_compat_bp(struct perf_event *bp)
168{
169 struct task_struct *tsk = bp->hw.target;
170
171 /*
172 * tsk can be NULL for per-cpu (non-ptrace) breakpoints.
173 * In this case, use the native interface, since we don't have
174 * the notion of a "compat CPU" and could end up relying on
175 * deprecated behaviour if we use unaligned watchpoints in
176 * AArch64 state.
177 */
178 return tsk && is_compat_thread(task_thread_info(tsk));
179}
180
2f043045
LP
181/**
182 * hw_breakpoint_slot_setup - Find and setup a perf slot according to
183 * operations
184 *
185 * @slots: pointer to array of slots
186 * @max_slots: max number of slots
187 * @bp: perf_event to setup
188 * @ops: operation to be carried out on the slot
189 *
190 * Return:
191 * slot index on success
192 * -ENOSPC if no slot is available/matches
193 * -EINVAL on wrong operations parameter
478fcb2c 194 */
2f043045
LP
195static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
196 struct perf_event *bp,
197 enum hw_breakpoint_ops ops)
198{
199 int i;
200 struct perf_event **slot;
201
202 for (i = 0; i < max_slots; ++i) {
203 slot = &slots[i];
204 switch (ops) {
205 case HW_BREAKPOINT_INSTALL:
206 if (!*slot) {
207 *slot = bp;
208 return i;
209 }
210 break;
211 case HW_BREAKPOINT_UNINSTALL:
212 if (*slot == bp) {
213 *slot = NULL;
214 return i;
215 }
216 break;
60fc6942
LP
217 case HW_BREAKPOINT_RESTORE:
218 if (*slot == bp)
219 return i;
220 break;
2f043045
LP
221 default:
222 pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
223 return -EINVAL;
224 }
225 }
226 return -ENOSPC;
227}
228
229static int hw_breakpoint_control(struct perf_event *bp,
230 enum hw_breakpoint_ops ops)
478fcb2c
WD
231{
232 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
2f043045 233 struct perf_event **slots;
478fcb2c
WD
234 struct debug_info *debug_info = &current->thread.debug;
235 int i, max_slots, ctrl_reg, val_reg, reg_enable;
6f883d10 236 enum dbg_active_el dbg_el = debug_exception_level(info->ctrl.privilege);
478fcb2c
WD
237 u32 ctrl;
238
239 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
240 /* Breakpoint */
241 ctrl_reg = AARCH64_DBG_REG_BCR;
242 val_reg = AARCH64_DBG_REG_BVR;
1436c1aa 243 slots = this_cpu_ptr(bp_on_reg);
478fcb2c
WD
244 max_slots = core_num_brps;
245 reg_enable = !debug_info->bps_disabled;
246 } else {
247 /* Watchpoint */
248 ctrl_reg = AARCH64_DBG_REG_WCR;
249 val_reg = AARCH64_DBG_REG_WVR;
1436c1aa 250 slots = this_cpu_ptr(wp_on_reg);
478fcb2c
WD
251 max_slots = core_num_wrps;
252 reg_enable = !debug_info->wps_disabled;
253 }
254
2f043045 255 i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
478fcb2c 256
2f043045
LP
257 if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
258 return i;
478fcb2c 259
2f043045
LP
260 switch (ops) {
261 case HW_BREAKPOINT_INSTALL:
262 /*
263 * Ensure debug monitors are enabled at the correct exception
264 * level.
265 */
266 enable_debug_monitors(dbg_el);
60fc6942
LP
267 /* Fall through */
268 case HW_BREAKPOINT_RESTORE:
2f043045
LP
269 /* Setup the address register. */
270 write_wb_reg(val_reg, i, info->address);
478fcb2c 271
2f043045
LP
272 /* Setup the control register. */
273 ctrl = encode_ctrl_reg(info->ctrl);
274 write_wb_reg(ctrl_reg, i,
275 reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
276 break;
277 case HW_BREAKPOINT_UNINSTALL:
278 /* Reset the control register. */
279 write_wb_reg(ctrl_reg, i, 0);
280
281 /*
282 * Release the debug monitors for the correct exception
283 * level.
284 */
285 disable_debug_monitors(dbg_el);
286 break;
287 }
478fcb2c
WD
288
289 return 0;
290}
291
2f043045
LP
292/*
293 * Install a perf counter breakpoint.
294 */
295int arch_install_hw_breakpoint(struct perf_event *bp)
478fcb2c 296{
2f043045
LP
297 return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
298}
478fcb2c 299
2f043045
LP
300void arch_uninstall_hw_breakpoint(struct perf_event *bp)
301{
302 hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
478fcb2c
WD
303}
304
305static int get_hbp_len(u8 hbp_len)
306{
307 unsigned int len_in_bytes = 0;
308
309 switch (hbp_len) {
310 case ARM_BREAKPOINT_LEN_1:
311 len_in_bytes = 1;
312 break;
313 case ARM_BREAKPOINT_LEN_2:
314 len_in_bytes = 2;
315 break;
316 case ARM_BREAKPOINT_LEN_4:
317 len_in_bytes = 4;
318 break;
319 case ARM_BREAKPOINT_LEN_8:
320 len_in_bytes = 8;
321 break;
322 }
323
324 return len_in_bytes;
325}
326
327/*
328 * Check whether bp virtual address is in kernel space.
329 */
330int arch_check_bp_in_kernelspace(struct perf_event *bp)
331{
332 unsigned int len;
333 unsigned long va;
334 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
335
336 va = info->address;
337 len = get_hbp_len(info->ctrl.len);
338
339 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
340}
341
342/*
343 * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
344 * Hopefully this will disappear when ptrace can bypass the conversion
345 * to generic breakpoint descriptions.
346 */
347int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
348 int *gen_len, int *gen_type)
349{
350 /* Type */
351 switch (ctrl.type) {
352 case ARM_BREAKPOINT_EXECUTE:
353 *gen_type = HW_BREAKPOINT_X;
354 break;
355 case ARM_BREAKPOINT_LOAD:
356 *gen_type = HW_BREAKPOINT_R;
357 break;
358 case ARM_BREAKPOINT_STORE:
359 *gen_type = HW_BREAKPOINT_W;
360 break;
361 case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
362 *gen_type = HW_BREAKPOINT_RW;
363 break;
364 default:
365 return -EINVAL;
366 }
367
368 /* Len */
369 switch (ctrl.len) {
370 case ARM_BREAKPOINT_LEN_1:
371 *gen_len = HW_BREAKPOINT_LEN_1;
372 break;
373 case ARM_BREAKPOINT_LEN_2:
374 *gen_len = HW_BREAKPOINT_LEN_2;
375 break;
376 case ARM_BREAKPOINT_LEN_4:
377 *gen_len = HW_BREAKPOINT_LEN_4;
378 break;
379 case ARM_BREAKPOINT_LEN_8:
380 *gen_len = HW_BREAKPOINT_LEN_8;
381 break;
382 default:
383 return -EINVAL;
384 }
385
386 return 0;
387}
388
389/*
390 * Construct an arch_hw_breakpoint from a perf_event.
391 */
392static int arch_build_bp_info(struct perf_event *bp)
393{
394 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
395
396 /* Type */
397 switch (bp->attr.bp_type) {
398 case HW_BREAKPOINT_X:
399 info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
400 break;
401 case HW_BREAKPOINT_R:
402 info->ctrl.type = ARM_BREAKPOINT_LOAD;
403 break;
404 case HW_BREAKPOINT_W:
405 info->ctrl.type = ARM_BREAKPOINT_STORE;
406 break;
407 case HW_BREAKPOINT_RW:
408 info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
409 break;
410 default:
411 return -EINVAL;
412 }
413
414 /* Len */
415 switch (bp->attr.bp_len) {
416 case HW_BREAKPOINT_LEN_1:
417 info->ctrl.len = ARM_BREAKPOINT_LEN_1;
418 break;
419 case HW_BREAKPOINT_LEN_2:
420 info->ctrl.len = ARM_BREAKPOINT_LEN_2;
421 break;
422 case HW_BREAKPOINT_LEN_4:
423 info->ctrl.len = ARM_BREAKPOINT_LEN_4;
424 break;
425 case HW_BREAKPOINT_LEN_8:
426 info->ctrl.len = ARM_BREAKPOINT_LEN_8;
427 break;
428 default:
429 return -EINVAL;
430 }
431
432 /*
433 * On AArch64, we only permit breakpoints of length 4, whereas
434 * AArch32 also requires breakpoints of length 2 for Thumb.
435 * Watchpoints can be of length 1, 2, 4 or 8 bytes.
436 */
437 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
8f48c062 438 if (is_compat_bp(bp)) {
478fcb2c
WD
439 if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
440 info->ctrl.len != ARM_BREAKPOINT_LEN_4)
441 return -EINVAL;
442 } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) {
443 /*
444 * FIXME: Some tools (I'm looking at you perf) assume
445 * that breakpoints should be sizeof(long). This
446 * is nonsense. For now, we fix up the parameter
447 * but we should probably return -EINVAL instead.
448 */
449 info->ctrl.len = ARM_BREAKPOINT_LEN_4;
450 }
451 }
452
453 /* Address */
454 info->address = bp->attr.bp_addr;
455
456 /*
457 * Privilege
458 * Note that we disallow combined EL0/EL1 breakpoints because
459 * that would complicate the stepping code.
460 */
461 if (arch_check_bp_in_kernelspace(bp))
462 info->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
463 else
464 info->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
465
466 /* Enabled? */
467 info->ctrl.enabled = !bp->attr.disabled;
468
469 return 0;
470}
471
472/*
473 * Validate the arch-specific HW Breakpoint register settings.
474 */
475int arch_validate_hwbkpt_settings(struct perf_event *bp)
476{
477 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
478 int ret;
479 u64 alignment_mask, offset;
480
481 /* Build the arch_hw_breakpoint. */
482 ret = arch_build_bp_info(bp);
483 if (ret)
484 return ret;
485
486 /*
487 * Check address alignment.
488 * We don't do any clever alignment correction for watchpoints
489 * because using 64-bit unaligned addresses is deprecated for
490 * AArch64.
491 *
492 * AArch32 tasks expect some simple alignment fixups, so emulate
493 * that here.
494 */
8f48c062 495 if (is_compat_bp(bp)) {
478fcb2c
WD
496 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
497 alignment_mask = 0x7;
498 else
499 alignment_mask = 0x3;
500 offset = info->address & alignment_mask;
501 switch (offset) {
502 case 0:
503 /* Aligned */
504 break;
505 case 1:
506 /* Allow single byte watchpoint. */
507 if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
508 break;
509 case 2:
510 /* Allow halfword watchpoints and breakpoints. */
511 if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
512 break;
513 default:
514 return -EINVAL;
515 }
516
517 info->address &= ~alignment_mask;
518 info->ctrl.len <<= offset;
519 } else {
520 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE)
521 alignment_mask = 0x3;
522 else
523 alignment_mask = 0x7;
524 if (info->address & alignment_mask)
525 return -EINVAL;
526 }
527
528 /*
529 * Disallow per-task kernel breakpoints since these would
530 * complicate the stepping code.
531 */
50f16a8b 532 if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
478fcb2c
WD
533 return -EINVAL;
534
535 return 0;
536}
537
538/*
539 * Enable/disable all of the breakpoints active at the specified
540 * exception level at the register level.
541 * This is used when single-stepping after a breakpoint exception.
542 */
6f883d10 543static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
478fcb2c
WD
544{
545 int i, max_slots, privilege;
546 u32 ctrl;
547 struct perf_event **slots;
548
549 switch (reg) {
550 case AARCH64_DBG_REG_BCR:
1436c1aa 551 slots = this_cpu_ptr(bp_on_reg);
478fcb2c
WD
552 max_slots = core_num_brps;
553 break;
554 case AARCH64_DBG_REG_WCR:
1436c1aa 555 slots = this_cpu_ptr(wp_on_reg);
478fcb2c
WD
556 max_slots = core_num_wrps;
557 break;
558 default:
559 return;
560 }
561
562 for (i = 0; i < max_slots; ++i) {
563 if (!slots[i])
564 continue;
565
566 privilege = counter_arch_bp(slots[i])->ctrl.privilege;
567 if (debug_exception_level(privilege) != el)
568 continue;
569
570 ctrl = read_wb_reg(reg, i);
571 if (enable)
572 ctrl |= 0x1;
573 else
574 ctrl &= ~0x1;
575 write_wb_reg(reg, i, ctrl);
576 }
577}
578
579/*
580 * Debug exception handlers.
581 */
582static int breakpoint_handler(unsigned long unused, unsigned int esr,
583 struct pt_regs *regs)
584{
585 int i, step = 0, *kernel_step;
586 u32 ctrl_reg;
587 u64 addr, val;
588 struct perf_event *bp, **slots;
589 struct debug_info *debug_info;
590 struct arch_hw_breakpoint_ctrl ctrl;
591
1436c1aa 592 slots = this_cpu_ptr(bp_on_reg);
478fcb2c
WD
593 addr = instruction_pointer(regs);
594 debug_info = &current->thread.debug;
595
596 for (i = 0; i < core_num_brps; ++i) {
597 rcu_read_lock();
598
599 bp = slots[i];
600
601 if (bp == NULL)
602 goto unlock;
603
604 /* Check if the breakpoint value matches. */
605 val = read_wb_reg(AARCH64_DBG_REG_BVR, i);
606 if (val != (addr & ~0x3))
607 goto unlock;
608
609 /* Possible match, check the byte address select to confirm. */
610 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i);
611 decode_ctrl_reg(ctrl_reg, &ctrl);
612 if (!((1 << (addr & 0x3)) & ctrl.len))
613 goto unlock;
614
615 counter_arch_bp(bp)->trigger = addr;
616 perf_bp_event(bp, regs);
617
618 /* Do we need to handle the stepping? */
1879445d 619 if (is_default_overflow_handler(bp))
478fcb2c
WD
620 step = 1;
621unlock:
622 rcu_read_unlock();
623 }
624
625 if (!step)
626 return 0;
627
628 if (user_mode(regs)) {
629 debug_info->bps_disabled = 1;
630 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0);
631
632 /* If we're already stepping a watchpoint, just return. */
633 if (debug_info->wps_disabled)
634 return 0;
635
636 if (test_thread_flag(TIF_SINGLESTEP))
637 debug_info->suspended_step = 1;
638 else
639 user_enable_single_step(current);
640 } else {
641 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
1436c1aa 642 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
478fcb2c
WD
643
644 if (*kernel_step != ARM_KERNEL_STEP_NONE)
645 return 0;
646
647 if (kernel_active_single_step()) {
648 *kernel_step = ARM_KERNEL_STEP_SUSPEND;
649 } else {
650 *kernel_step = ARM_KERNEL_STEP_ACTIVE;
651 kernel_enable_single_step(regs);
652 }
653 }
654
655 return 0;
656}
657
658static int watchpoint_handler(unsigned long addr, unsigned int esr,
659 struct pt_regs *regs)
660{
661 int i, step = 0, *kernel_step, access;
662 u32 ctrl_reg;
663 u64 val, alignment_mask;
664 struct perf_event *wp, **slots;
665 struct debug_info *debug_info;
666 struct arch_hw_breakpoint *info;
667 struct arch_hw_breakpoint_ctrl ctrl;
668
1436c1aa 669 slots = this_cpu_ptr(wp_on_reg);
478fcb2c
WD
670 debug_info = &current->thread.debug;
671
672 for (i = 0; i < core_num_wrps; ++i) {
673 rcu_read_lock();
674
675 wp = slots[i];
676
677 if (wp == NULL)
678 goto unlock;
679
680 info = counter_arch_bp(wp);
681 /* AArch32 watchpoints are either 4 or 8 bytes aligned. */
682 if (is_compat_task()) {
683 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
684 alignment_mask = 0x7;
685 else
686 alignment_mask = 0x3;
687 } else {
688 alignment_mask = 0x7;
689 }
690
691 /* Check if the watchpoint value matches. */
692 val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
693 if (val != (addr & ~alignment_mask))
694 goto unlock;
695
696 /* Possible match, check the byte address select to confirm. */
697 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
698 decode_ctrl_reg(ctrl_reg, &ctrl);
699 if (!((1 << (addr & alignment_mask)) & ctrl.len))
700 goto unlock;
701
702 /*
703 * Check that the access type matches.
704 * 0 => load, otherwise => store
705 */
706 access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
707 HW_BREAKPOINT_R;
708 if (!(access & hw_breakpoint_type(wp)))
709 goto unlock;
710
711 info->trigger = addr;
712 perf_bp_event(wp, regs);
713
714 /* Do we need to handle the stepping? */
1879445d 715 if (is_default_overflow_handler(wp))
478fcb2c
WD
716 step = 1;
717
718unlock:
719 rcu_read_unlock();
720 }
721
722 if (!step)
723 return 0;
724
725 /*
726 * We always disable EL0 watchpoints because the kernel can
727 * cause these to fire via an unprivileged access.
728 */
729 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 0);
730
731 if (user_mode(regs)) {
732 debug_info->wps_disabled = 1;
733
734 /* If we're already stepping a breakpoint, just return. */
735 if (debug_info->bps_disabled)
736 return 0;
737
738 if (test_thread_flag(TIF_SINGLESTEP))
739 debug_info->suspended_step = 1;
740 else
741 user_enable_single_step(current);
742 } else {
743 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
1436c1aa 744 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
478fcb2c
WD
745
746 if (*kernel_step != ARM_KERNEL_STEP_NONE)
747 return 0;
748
749 if (kernel_active_single_step()) {
750 *kernel_step = ARM_KERNEL_STEP_SUSPEND;
751 } else {
752 *kernel_step = ARM_KERNEL_STEP_ACTIVE;
753 kernel_enable_single_step(regs);
754 }
755 }
756
757 return 0;
758}
759
760/*
761 * Handle single-step exception.
762 */
763int reinstall_suspended_bps(struct pt_regs *regs)
764{
765 struct debug_info *debug_info = &current->thread.debug;
766 int handled_exception = 0, *kernel_step;
767
1436c1aa 768 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
478fcb2c
WD
769
770 /*
771 * Called from single-step exception handler.
772 * Return 0 if execution can resume, 1 if a SIGTRAP should be
773 * reported.
774 */
775 if (user_mode(regs)) {
776 if (debug_info->bps_disabled) {
777 debug_info->bps_disabled = 0;
778 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1);
779 handled_exception = 1;
780 }
781
782 if (debug_info->wps_disabled) {
783 debug_info->wps_disabled = 0;
784 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
785 handled_exception = 1;
786 }
787
788 if (handled_exception) {
789 if (debug_info->suspended_step) {
790 debug_info->suspended_step = 0;
791 /* Allow exception handling to fall-through. */
792 handled_exception = 0;
793 } else {
794 user_disable_single_step(current);
795 }
796 }
797 } else if (*kernel_step != ARM_KERNEL_STEP_NONE) {
798 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 1);
799 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 1);
800
801 if (!debug_info->wps_disabled)
802 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
803
804 if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) {
805 kernel_disable_single_step();
806 handled_exception = 1;
807 } else {
808 handled_exception = 0;
809 }
810
811 *kernel_step = ARM_KERNEL_STEP_NONE;
812 }
813
814 return !handled_exception;
815}
816
817/*
818 * Context-switcher for restoring suspended breakpoints.
819 */
820void hw_breakpoint_thread_switch(struct task_struct *next)
821{
822 /*
823 * current next
824 * disabled: 0 0 => The usual case, NOTIFY_DONE
825 * 0 1 => Disable the registers
826 * 1 0 => Enable the registers
827 * 1 1 => NOTIFY_DONE. per-task bps will
828 * get taken care of by perf.
829 */
830
831 struct debug_info *current_debug_info, *next_debug_info;
832
833 current_debug_info = &current->thread.debug;
834 next_debug_info = &next->thread.debug;
835
836 /* Update breakpoints. */
837 if (current_debug_info->bps_disabled != next_debug_info->bps_disabled)
838 toggle_bp_registers(AARCH64_DBG_REG_BCR,
839 DBG_ACTIVE_EL0,
840 !next_debug_info->bps_disabled);
841
842 /* Update watchpoints. */
843 if (current_debug_info->wps_disabled != next_debug_info->wps_disabled)
844 toggle_bp_registers(AARCH64_DBG_REG_WCR,
845 DBG_ACTIVE_EL0,
846 !next_debug_info->wps_disabled);
847}
848
849/*
850 * CPU initialisation.
851 */
60fc6942 852static void hw_breakpoint_reset(void *unused)
478fcb2c
WD
853{
854 int i;
60fc6942
LP
855 struct perf_event **slots;
856 /*
857 * When a CPU goes through cold-boot, it does not have any installed
858 * slot, so it is safe to share the same function for restoring and
859 * resetting breakpoints; when a CPU is hotplugged in, it goes
860 * through the slots, which are all empty, hence it just resets control
861 * and value for debug registers.
862 * When this function is triggered on warm-boot through a CPU PM
863 * notifier some slots might be initialized; if so they are
864 * reprogrammed according to the debug slots content.
865 */
866 for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) {
867 if (slots[i]) {
868 hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
869 } else {
870 write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
871 write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
872 }
478fcb2c
WD
873 }
874
60fc6942
LP
875 for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) {
876 if (slots[i]) {
877 hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
878 } else {
879 write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
880 write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
881 }
478fcb2c
WD
882 }
883}
884
b8c6453a 885static int hw_breakpoint_reset_notify(struct notifier_block *self,
478fcb2c
WD
886 unsigned long action,
887 void *hcpu)
888{
4bc49274
AMG
889 if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE) {
890 local_irq_disable();
891 hw_breakpoint_reset(NULL);
892 local_irq_enable();
893 }
478fcb2c
WD
894 return NOTIFY_OK;
895}
896
b8c6453a 897static struct notifier_block hw_breakpoint_reset_nb = {
478fcb2c
WD
898 .notifier_call = hw_breakpoint_reset_notify,
899};
900
af3cfdbf 901#ifdef CONFIG_CPU_PM
65c021bb 902extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *));
60fc6942 903#else
65c021bb 904static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
60fc6942
LP
905{
906}
907#endif
908
478fcb2c
WD
909/*
910 * One-time initialisation.
911 */
912static int __init arch_hw_breakpoint_init(void)
913{
914 core_num_brps = get_num_brps();
915 core_num_wrps = get_num_wrps();
916
917 pr_info("found %d breakpoint and %d watchpoint registers.\n",
918 core_num_brps, core_num_wrps);
919
3d0dc643
SB
920 cpu_notifier_register_begin();
921
478fcb2c
WD
922 /*
923 * Reset the breakpoint resources. We assume that a halting
924 * debugger will leave the world in a nice state for us.
925 */
60fc6942
LP
926 smp_call_function(hw_breakpoint_reset, NULL, 1);
927 hw_breakpoint_reset(NULL);
478fcb2c
WD
928
929 /* Register debug fault handlers. */
930 hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
931 TRAP_HWBKPT, "hw-breakpoint handler");
932 hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP,
933 TRAP_HWBKPT, "hw-watchpoint handler");
934
935 /* Register hotplug notifier. */
3d0dc643
SB
936 __register_cpu_notifier(&hw_breakpoint_reset_nb);
937
938 cpu_notifier_register_done();
939
65c021bb
LP
940 /* Register cpu_suspend hw breakpoint restore hook */
941 cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
478fcb2c
WD
942
943 return 0;
944}
945arch_initcall(arch_hw_breakpoint_init);
946
947void hw_breakpoint_pmu_read(struct perf_event *bp)
948{
949}
950
951/*
952 * Dummy function to register with die_notifier.
953 */
954int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
955 unsigned long val, void *data)
956{
957 return NOTIFY_DONE;
958}