perf/hw_breakpoint: Pass arch breakpoint struct to arch_check_bp_in_kernelspace()
[linux-2.6-block.git] / arch / xtensa / kernel / hw_breakpoint.c
CommitLineData
c91e02bd
MF
1/*
2 * Xtensa hardware breakpoints/watchpoints handling functions
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2016 Cadence Design Systems Inc.
9 */
10
11#include <linux/hw_breakpoint.h>
12#include <linux/log2.h>
13#include <linux/percpu.h>
14#include <linux/perf_event.h>
15#include <variant/core.h>
16
17/* Breakpoint currently in use for each IBREAKA. */
18static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[XCHAL_NUM_IBREAK]);
19
20/* Watchpoint currently in use for each DBREAKA. */
21static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[XCHAL_NUM_DBREAK]);
22
23int hw_breakpoint_slots(int type)
24{
25 switch (type) {
26 case TYPE_INST:
27 return XCHAL_NUM_IBREAK;
28 case TYPE_DATA:
29 return XCHAL_NUM_DBREAK;
30 default:
31 pr_warn("unknown slot type: %d\n", type);
32 return 0;
33 }
34}
35
8e983ff9 36int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
c91e02bd
MF
37{
38 unsigned int len;
39 unsigned long va;
c91e02bd 40
8e983ff9
FW
41 va = hw->address;
42 len = hw->len;
c91e02bd
MF
43
44 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
45}
46
47/*
48 * Construct an arch_hw_breakpoint from a perf_event.
49 */
50static int arch_build_bp_info(struct perf_event *bp)
51{
52 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
53
54 /* Type */
55 switch (bp->attr.bp_type) {
56 case HW_BREAKPOINT_X:
57 info->type = XTENSA_BREAKPOINT_EXECUTE;
58 break;
59 case HW_BREAKPOINT_R:
60 info->type = XTENSA_BREAKPOINT_LOAD;
61 break;
62 case HW_BREAKPOINT_W:
63 info->type = XTENSA_BREAKPOINT_STORE;
64 break;
65 case HW_BREAKPOINT_RW:
66 info->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE;
67 break;
68 default:
69 return -EINVAL;
70 }
71
72 /* Len */
73 info->len = bp->attr.bp_len;
74 if (info->len < 1 || info->len > 64 || !is_power_of_2(info->len))
75 return -EINVAL;
76
77 /* Address */
78 info->address = bp->attr.bp_addr;
79 if (info->address & (info->len - 1))
80 return -EINVAL;
81
82 return 0;
83}
84
85int arch_validate_hwbkpt_settings(struct perf_event *bp)
86{
87 int ret;
88
89 /* Build the arch_hw_breakpoint. */
90 ret = arch_build_bp_info(bp);
91 return ret;
92}
93
94int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
95 unsigned long val, void *data)
96{
97 return NOTIFY_DONE;
98}
99
100static void xtensa_wsr(unsigned long v, u8 sr)
101{
102 /* We don't have indexed wsr and creating instruction dynamically
103 * doesn't seem worth it given how small XCHAL_NUM_IBREAK and
104 * XCHAL_NUM_DBREAK are. Thus the switch. In case build breaks here
105 * the switch below needs to be extended.
106 */
107 BUILD_BUG_ON(XCHAL_NUM_IBREAK > 2);
108 BUILD_BUG_ON(XCHAL_NUM_DBREAK > 2);
109
110 switch (sr) {
111#if XCHAL_NUM_IBREAK > 0
112 case SREG_IBREAKA + 0:
113 WSR(v, SREG_IBREAKA + 0);
114 break;
115#endif
116#if XCHAL_NUM_IBREAK > 1
117 case SREG_IBREAKA + 1:
118 WSR(v, SREG_IBREAKA + 1);
119 break;
120#endif
121
122#if XCHAL_NUM_DBREAK > 0
123 case SREG_DBREAKA + 0:
124 WSR(v, SREG_DBREAKA + 0);
125 break;
126 case SREG_DBREAKC + 0:
127 WSR(v, SREG_DBREAKC + 0);
128 break;
129#endif
130#if XCHAL_NUM_DBREAK > 1
131 case SREG_DBREAKA + 1:
132 WSR(v, SREG_DBREAKA + 1);
133 break;
134
135 case SREG_DBREAKC + 1:
136 WSR(v, SREG_DBREAKC + 1);
137 break;
138#endif
139 }
140}
141
142static int alloc_slot(struct perf_event **slot, size_t n,
143 struct perf_event *bp)
144{
145 size_t i;
146
147 for (i = 0; i < n; ++i) {
148 if (!slot[i]) {
149 slot[i] = bp;
150 return i;
151 }
152 }
153 return -EBUSY;
154}
155
156static void set_ibreak_regs(int reg, struct perf_event *bp)
157{
158 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
159 unsigned long ibreakenable;
160
161 xtensa_wsr(info->address, SREG_IBREAKA + reg);
162 RSR(ibreakenable, SREG_IBREAKENABLE);
163 WSR(ibreakenable | (1 << reg), SREG_IBREAKENABLE);
164}
165
166static void set_dbreak_regs(int reg, struct perf_event *bp)
167{
168 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
169 unsigned long dbreakc = DBREAKC_MASK_MASK & -info->len;
170
171 if (info->type & XTENSA_BREAKPOINT_LOAD)
172 dbreakc |= DBREAKC_LOAD_MASK;
173 if (info->type & XTENSA_BREAKPOINT_STORE)
174 dbreakc |= DBREAKC_STOR_MASK;
175
176 xtensa_wsr(info->address, SREG_DBREAKA + reg);
177 xtensa_wsr(dbreakc, SREG_DBREAKC + reg);
178}
179
180int arch_install_hw_breakpoint(struct perf_event *bp)
181{
182 int i;
183
184 if (counter_arch_bp(bp)->type == XTENSA_BREAKPOINT_EXECUTE) {
185 /* Breakpoint */
186 i = alloc_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp);
187 if (i < 0)
188 return i;
189 set_ibreak_regs(i, bp);
190
191 } else {
192 /* Watchpoint */
193 i = alloc_slot(this_cpu_ptr(wp_on_reg), XCHAL_NUM_DBREAK, bp);
194 if (i < 0)
195 return i;
196 set_dbreak_regs(i, bp);
197 }
198 return 0;
199}
200
201static int free_slot(struct perf_event **slot, size_t n,
202 struct perf_event *bp)
203{
204 size_t i;
205
206 for (i = 0; i < n; ++i) {
207 if (slot[i] == bp) {
208 slot[i] = NULL;
209 return i;
210 }
211 }
212 return -EBUSY;
213}
214
215void arch_uninstall_hw_breakpoint(struct perf_event *bp)
216{
217 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
218 int i;
219
220 if (info->type == XTENSA_BREAKPOINT_EXECUTE) {
221 unsigned long ibreakenable;
222
223 /* Breakpoint */
224 i = free_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp);
225 if (i >= 0) {
226 RSR(ibreakenable, SREG_IBREAKENABLE);
227 WSR(ibreakenable & ~(1 << i), SREG_IBREAKENABLE);
228 }
229 } else {
230 /* Watchpoint */
231 i = free_slot(this_cpu_ptr(wp_on_reg), XCHAL_NUM_DBREAK, bp);
232 if (i >= 0)
233 xtensa_wsr(0, SREG_DBREAKC + i);
234 }
235}
236
237void hw_breakpoint_pmu_read(struct perf_event *bp)
238{
239}
240
241void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
242{
243 int i;
244 struct thread_struct *t = &tsk->thread;
245
246 for (i = 0; i < XCHAL_NUM_IBREAK; ++i) {
247 if (t->ptrace_bp[i]) {
248 unregister_hw_breakpoint(t->ptrace_bp[i]);
249 t->ptrace_bp[i] = NULL;
250 }
251 }
252 for (i = 0; i < XCHAL_NUM_DBREAK; ++i) {
253 if (t->ptrace_wp[i]) {
254 unregister_hw_breakpoint(t->ptrace_wp[i]);
255 t->ptrace_wp[i] = NULL;
256 }
257 }
258}
259
260/*
261 * Set ptrace breakpoint pointers to zero for this task.
262 * This is required in order to prevent child processes from unregistering
263 * breakpoints held by their parent.
264 */
265void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
266{
267 memset(tsk->thread.ptrace_bp, 0, sizeof(tsk->thread.ptrace_bp));
268 memset(tsk->thread.ptrace_wp, 0, sizeof(tsk->thread.ptrace_wp));
269}
270
271void restore_dbreak(void)
272{
273 int i;
274
275 for (i = 0; i < XCHAL_NUM_DBREAK; ++i) {
276 struct perf_event *bp = this_cpu_ptr(wp_on_reg)[i];
277
278 if (bp)
279 set_dbreak_regs(i, bp);
280 }
281 clear_thread_flag(TIF_DB_DISABLED);
282}
283
284int check_hw_breakpoint(struct pt_regs *regs)
285{
286 if (regs->debugcause & BIT(DEBUGCAUSE_IBREAK_BIT)) {
287 int i;
288 struct perf_event **bp = this_cpu_ptr(bp_on_reg);
289
290 for (i = 0; i < XCHAL_NUM_IBREAK; ++i) {
291 if (bp[i] && !bp[i]->attr.disabled &&
292 regs->pc == bp[i]->attr.bp_addr)
293 perf_bp_event(bp[i], regs);
294 }
295 return 0;
296 } else if (regs->debugcause & BIT(DEBUGCAUSE_DBREAK_BIT)) {
297 struct perf_event **bp = this_cpu_ptr(wp_on_reg);
298 int dbnum = (regs->debugcause & DEBUGCAUSE_DBNUM_MASK) >>
299 DEBUGCAUSE_DBNUM_SHIFT;
300
301 if (dbnum < XCHAL_NUM_DBREAK && bp[dbnum]) {
302 if (user_mode(regs)) {
303 perf_bp_event(bp[dbnum], regs);
304 } else {
305 set_thread_flag(TIF_DB_DISABLED);
306 xtensa_wsr(0, SREG_DBREAKC + dbnum);
307 }
308 } else {
309 WARN_ONCE(1,
310 "Wrong/unconfigured DBNUM reported in DEBUGCAUSE: %d\n",
311 dbnum);
312 }
313 return 0;
314 }
315 return -ENOENT;
316}