Commit | Line | Data |
---|---|---|
62a038d3 P |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License as published by | |
4 | * the Free Software Foundation; either version 2 of the License, or | |
5 | * (at your option) any later version. | |
6 | * | |
7 | * This program is distributed in the hope that it will be useful, | |
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | * GNU General Public License for more details. | |
11 | * | |
12 | * You should have received a copy of the GNU General Public License | |
13 | * along with this program; if not, write to the Free Software | |
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
15 | * | |
16 | * Copyright (C) 2007 Alan Stern | |
17 | * Copyright (C) IBM Corporation, 2009 | |
24f1e32c | 18 | * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com> |
ba1c813a FW |
19 | * |
20 | * Thanks to Ingo Molnar for his many suggestions. | |
ba6909b7 P |
21 | * |
22 | * Authors: Alan Stern <stern@rowland.harvard.edu> | |
23 | * K.Prasad <prasad@linux.vnet.ibm.com> | |
24 | * Frederic Weisbecker <fweisbec@gmail.com> | |
62a038d3 P |
25 | */ |
26 | ||
27 | /* | |
28 | * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, | |
29 | * using the CPU's debug registers. | |
30 | * This file contains the arch-independent routines. | |
31 | */ | |
32 | ||
33 | #include <linux/irqflags.h> | |
34 | #include <linux/kallsyms.h> | |
35 | #include <linux/notifier.h> | |
36 | #include <linux/kprobes.h> | |
37 | #include <linux/kdebug.h> | |
38 | #include <linux/kernel.h> | |
39 | #include <linux/module.h> | |
40 | #include <linux/percpu.h> | |
41 | #include <linux/sched.h> | |
42 | #include <linux/init.h> | |
88f7a890 | 43 | #include <linux/cpu.h> |
62a038d3 P |
44 | #include <linux/smp.h> |
45 | ||
24f1e32c FW |
46 | #include <linux/hw_breakpoint.h> |
47 | ||
0102752e FW |
48 | enum bp_type_idx { |
49 | TYPE_INST = 0, | |
50 | #ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS | |
51 | TYPE_DATA = 0, | |
52 | #else | |
53 | TYPE_DATA = 1, | |
54 | #endif | |
55 | TYPE_MAX | |
56 | }; | |
57 | ||
ba1c813a FW |
58 | /* |
59 | * Constraints data | |
60 | */ | |
62a038d3 | 61 | |
ba1c813a | 62 | /* Number of pinned cpu breakpoints in a cpu */ |
0102752e | 63 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]); |
ba1c813a FW |
64 | |
65 | /* Number of pinned task breakpoints in a cpu */ | |
0102752e | 66 | static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[TYPE_MAX][HBP_NUM]); |
ba1c813a FW |
67 | |
68 | /* Number of non-pinned cpu/task breakpoints in a cpu */ | |
0102752e | 69 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]); |
ba1c813a FW |
70 | |
71 | /* Gather the number of total pinned and un-pinned bp in a cpuset */ | |
72 | struct bp_busy_slots { | |
73 | unsigned int pinned; | |
74 | unsigned int flexible; | |
75 | }; | |
76 | ||
77 | /* Serialize accesses to the above constraints */ | |
78 | static DEFINE_MUTEX(nr_bp_mutex); | |
79 | ||
0102752e FW |
80 | static inline enum bp_type_idx find_slot_idx(struct perf_event *bp) |
81 | { | |
82 | if (bp->attr.bp_type & HW_BREAKPOINT_RW) | |
83 | return TYPE_DATA; | |
84 | ||
85 | return TYPE_INST; | |
86 | } | |
87 | ||
ba1c813a FW |
88 | /* |
89 | * Report the maximum number of pinned breakpoints a task | |
90 | * have in this cpu | |
91 | */ | |
0102752e | 92 | static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) |
62a038d3 | 93 | { |
ba1c813a | 94 | int i; |
0102752e | 95 | unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); |
62a038d3 | 96 | |
ba1c813a FW |
97 | for (i = HBP_NUM -1; i >= 0; i--) { |
98 | if (tsk_pinned[i] > 0) | |
99 | return i + 1; | |
62a038d3 P |
100 | } |
101 | ||
24f1e32c | 102 | return 0; |
62a038d3 P |
103 | } |
104 | ||
0102752e | 105 | static int task_bp_pinned(struct task_struct *tsk, enum bp_type_idx type) |
56053170 FW |
106 | { |
107 | struct perf_event_context *ctx = tsk->perf_event_ctxp; | |
108 | struct list_head *list; | |
109 | struct perf_event *bp; | |
110 | unsigned long flags; | |
111 | int count = 0; | |
112 | ||
113 | if (WARN_ONCE(!ctx, "No perf context for this task")) | |
114 | return 0; | |
115 | ||
116 | list = &ctx->event_list; | |
117 | ||
e625cce1 | 118 | raw_spin_lock_irqsave(&ctx->lock, flags); |
56053170 FW |
119 | |
120 | /* | |
121 | * The current breakpoint counter is not included in the list | |
122 | * at the open() callback time | |
123 | */ | |
124 | list_for_each_entry(bp, list, event_entry) { | |
125 | if (bp->attr.type == PERF_TYPE_BREAKPOINT) | |
0102752e FW |
126 | if (find_slot_idx(bp) == type) |
127 | count++; | |
56053170 FW |
128 | } |
129 | ||
e625cce1 | 130 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
56053170 FW |
131 | |
132 | return count; | |
133 | } | |
134 | ||
ba1c813a FW |
135 | /* |
136 | * Report the number of pinned/un-pinned breakpoints we have in | |
137 | * a given cpu (cpu > -1) or in all of them (cpu = -1). | |
138 | */ | |
56053170 | 139 | static void |
0102752e FW |
140 | fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, |
141 | enum bp_type_idx type) | |
ba1c813a | 142 | { |
56053170 FW |
143 | int cpu = bp->cpu; |
144 | struct task_struct *tsk = bp->ctx->task; | |
145 | ||
ba1c813a | 146 | if (cpu >= 0) { |
0102752e | 147 | slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu); |
56053170 | 148 | if (!tsk) |
0102752e | 149 | slots->pinned += max_task_bp_pinned(cpu, type); |
56053170 | 150 | else |
0102752e FW |
151 | slots->pinned += task_bp_pinned(tsk, type); |
152 | slots->flexible = per_cpu(nr_bp_flexible[type], cpu); | |
ba1c813a FW |
153 | |
154 | return; | |
155 | } | |
156 | ||
157 | for_each_online_cpu(cpu) { | |
158 | unsigned int nr; | |
159 | ||
0102752e | 160 | nr = per_cpu(nr_cpu_bp_pinned[type], cpu); |
56053170 | 161 | if (!tsk) |
0102752e | 162 | nr += max_task_bp_pinned(cpu, type); |
56053170 | 163 | else |
0102752e | 164 | nr += task_bp_pinned(tsk, type); |
ba1c813a FW |
165 | |
166 | if (nr > slots->pinned) | |
167 | slots->pinned = nr; | |
168 | ||
0102752e | 169 | nr = per_cpu(nr_bp_flexible[type], cpu); |
ba1c813a FW |
170 | |
171 | if (nr > slots->flexible) | |
172 | slots->flexible = nr; | |
173 | } | |
174 | } | |
175 | ||
176 | /* | |
177 | * Add a pinned breakpoint for the given task in our constraint table | |
178 | */ | |
0102752e FW |
179 | static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable, |
180 | enum bp_type_idx type) | |
ba1c813a | 181 | { |
11e66357 | 182 | unsigned int *tsk_pinned; |
56053170 | 183 | int count = 0; |
ba1c813a | 184 | |
0102752e | 185 | count = task_bp_pinned(tsk, type); |
ba1c813a | 186 | |
0102752e | 187 | tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); |
ba1c813a | 188 | if (enable) { |
11e66357 | 189 | tsk_pinned[count]++; |
ba1c813a | 190 | if (count > 0) |
11e66357 | 191 | tsk_pinned[count-1]--; |
ba1c813a | 192 | } else { |
11e66357 | 193 | tsk_pinned[count]--; |
ba1c813a | 194 | if (count > 0) |
11e66357 | 195 | tsk_pinned[count-1]++; |
ba1c813a FW |
196 | } |
197 | } | |
198 | ||
199 | /* | |
200 | * Add/remove the given breakpoint in our constraint table | |
201 | */ | |
0102752e FW |
202 | static void |
203 | toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type) | |
ba1c813a FW |
204 | { |
205 | int cpu = bp->cpu; | |
206 | struct task_struct *tsk = bp->ctx->task; | |
207 | ||
208 | /* Pinned counter task profiling */ | |
209 | if (tsk) { | |
210 | if (cpu >= 0) { | |
0102752e | 211 | toggle_bp_task_slot(tsk, cpu, enable, type); |
ba1c813a FW |
212 | return; |
213 | } | |
214 | ||
215 | for_each_online_cpu(cpu) | |
0102752e | 216 | toggle_bp_task_slot(tsk, cpu, enable, type); |
ba1c813a FW |
217 | return; |
218 | } | |
219 | ||
220 | /* Pinned counter cpu profiling */ | |
221 | if (enable) | |
0102752e | 222 | per_cpu(nr_cpu_bp_pinned[type], bp->cpu)++; |
ba1c813a | 223 | else |
0102752e | 224 | per_cpu(nr_cpu_bp_pinned[type], bp->cpu)--; |
ba1c813a FW |
225 | } |
226 | ||
227 | /* | |
228 | * Contraints to check before allowing this new breakpoint counter: | |
229 | * | |
230 | * == Non-pinned counter == (Considered as pinned for now) | |
231 | * | |
232 | * - If attached to a single cpu, check: | |
233 | * | |
234 | * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) | |
6ab88863 | 235 | * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM |
ba1c813a FW |
236 | * |
237 | * -> If there are already non-pinned counters in this cpu, it means | |
238 | * there is already a free slot for them. | |
239 | * Otherwise, we check that the maximum number of per task | |
240 | * breakpoints (for this cpu) plus the number of per cpu breakpoint | |
241 | * (for this cpu) doesn't cover every registers. | |
242 | * | |
243 | * - If attached to every cpus, check: | |
244 | * | |
245 | * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) | |
6ab88863 | 246 | * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM |
ba1c813a FW |
247 | * |
248 | * -> This is roughly the same, except we check the number of per cpu | |
249 | * bp for every cpu and we keep the max one. Same for the per tasks | |
250 | * breakpoints. | |
251 | * | |
252 | * | |
253 | * == Pinned counter == | |
254 | * | |
255 | * - If attached to a single cpu, check: | |
256 | * | |
257 | * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) | |
6ab88863 | 258 | * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM |
ba1c813a FW |
259 | * |
260 | * -> Same checks as before. But now the nr_bp_flexible, if any, must keep | |
261 | * one register at least (or they will never be fed). | |
262 | * | |
263 | * - If attached to every cpus, check: | |
264 | * | |
265 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) | |
6ab88863 | 266 | * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM |
ba1c813a | 267 | */ |
5352ae63 | 268 | static int __reserve_bp_slot(struct perf_event *bp) |
ba1c813a FW |
269 | { |
270 | struct bp_busy_slots slots = {0}; | |
0102752e | 271 | enum bp_type_idx type; |
ba1c813a | 272 | |
0102752e FW |
273 | /* Basic checks */ |
274 | if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY || | |
275 | bp->attr.bp_type == HW_BREAKPOINT_INVALID) | |
276 | return -EINVAL; | |
277 | ||
278 | type = find_slot_idx(bp); | |
279 | fetch_bp_busy_slots(&slots, bp, type); | |
ba1c813a FW |
280 | |
281 | /* Flexible counters need to keep at least one slot */ | |
5352ae63 JW |
282 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) |
283 | return -ENOSPC; | |
ba1c813a | 284 | |
0102752e | 285 | toggle_bp_slot(bp, true, type); |
ba1c813a | 286 | |
5352ae63 JW |
287 | return 0; |
288 | } | |
289 | ||
290 | int reserve_bp_slot(struct perf_event *bp) | |
291 | { | |
292 | int ret; | |
293 | ||
294 | mutex_lock(&nr_bp_mutex); | |
295 | ||
296 | ret = __reserve_bp_slot(bp); | |
297 | ||
ba1c813a FW |
298 | mutex_unlock(&nr_bp_mutex); |
299 | ||
300 | return ret; | |
301 | } | |
302 | ||
5352ae63 JW |
303 | static void __release_bp_slot(struct perf_event *bp) |
304 | { | |
0102752e FW |
305 | enum bp_type_idx type; |
306 | ||
307 | type = find_slot_idx(bp); | |
308 | toggle_bp_slot(bp, false, type); | |
5352ae63 JW |
309 | } |
310 | ||
24f1e32c | 311 | void release_bp_slot(struct perf_event *bp) |
62a038d3 | 312 | { |
ba1c813a FW |
313 | mutex_lock(&nr_bp_mutex); |
314 | ||
5352ae63 | 315 | __release_bp_slot(bp); |
ba1c813a FW |
316 | |
317 | mutex_unlock(&nr_bp_mutex); | |
62a038d3 P |
318 | } |
319 | ||
5352ae63 JW |
320 | /* |
321 | * Allow the kernel debugger to reserve breakpoint slots without | |
322 | * taking a lock using the dbg_* variant of for the reserve and | |
323 | * release breakpoint slots. | |
324 | */ | |
325 | int dbg_reserve_bp_slot(struct perf_event *bp) | |
326 | { | |
327 | if (mutex_is_locked(&nr_bp_mutex)) | |
328 | return -1; | |
329 | ||
330 | return __reserve_bp_slot(bp); | |
331 | } | |
332 | ||
333 | int dbg_release_bp_slot(struct perf_event *bp) | |
334 | { | |
335 | if (mutex_is_locked(&nr_bp_mutex)) | |
336 | return -1; | |
337 | ||
338 | __release_bp_slot(bp); | |
339 | ||
340 | return 0; | |
341 | } | |
ba1c813a | 342 | |
b2812d03 FW |
343 | static int validate_hw_breakpoint(struct perf_event *bp) |
344 | { | |
345 | int ret; | |
346 | ||
347 | ret = arch_validate_hwbkpt_settings(bp); | |
348 | if (ret) | |
349 | return ret; | |
350 | ||
351 | if (arch_check_bp_in_kernelspace(bp)) { | |
352 | if (bp->attr.exclude_kernel) | |
353 | return -EINVAL; | |
354 | /* | |
355 | * Don't let unprivileged users set a breakpoint in the trap | |
356 | * path to avoid trap recursion attacks. | |
357 | */ | |
358 | if (!capable(CAP_SYS_ADMIN)) | |
359 | return -EPERM; | |
360 | } | |
361 | ||
362 | return 0; | |
363 | } | |
364 | ||
b326e956 | 365 | int register_perf_hw_breakpoint(struct perf_event *bp) |
62a038d3 | 366 | { |
24f1e32c | 367 | int ret; |
62a038d3 | 368 | |
24f1e32c FW |
369 | ret = reserve_bp_slot(bp); |
370 | if (ret) | |
371 | return ret; | |
62a038d3 | 372 | |
b2812d03 | 373 | ret = validate_hw_breakpoint(bp); |
62a038d3 | 374 | |
b23ff0e9 MS |
375 | /* if arch_validate_hwbkpt_settings() fails then release bp slot */ |
376 | if (ret) | |
377 | release_bp_slot(bp); | |
378 | ||
24f1e32c FW |
379 | return ret; |
380 | } | |
62a038d3 | 381 | |
62a038d3 P |
382 | /** |
383 | * register_user_hw_breakpoint - register a hardware breakpoint for user space | |
5fa10b28 | 384 | * @attr: breakpoint attributes |
24f1e32c | 385 | * @triggered: callback to trigger when we hit the breakpoint |
62a038d3 | 386 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
62a038d3 | 387 | */ |
24f1e32c | 388 | struct perf_event * |
5fa10b28 | 389 | register_user_hw_breakpoint(struct perf_event_attr *attr, |
b326e956 | 390 | perf_overflow_handler_t triggered, |
5fa10b28 | 391 | struct task_struct *tsk) |
62a038d3 | 392 | { |
5fa10b28 | 393 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); |
62a038d3 P |
394 | } |
395 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | |
396 | ||
397 | /** | |
398 | * modify_user_hw_breakpoint - modify a user-space hardware breakpoint | |
24f1e32c | 399 | * @bp: the breakpoint structure to modify |
5fa10b28 | 400 | * @attr: new breakpoint attributes |
24f1e32c | 401 | * @triggered: callback to trigger when we hit the breakpoint |
62a038d3 | 402 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
62a038d3 | 403 | */ |
44234adc | 404 | int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) |
62a038d3 | 405 | { |
44234adc | 406 | u64 old_addr = bp->attr.bp_addr; |
cd757645 | 407 | u64 old_len = bp->attr.bp_len; |
44234adc | 408 | int old_type = bp->attr.bp_type; |
44234adc FW |
409 | int err = 0; |
410 | ||
411 | perf_event_disable(bp); | |
412 | ||
413 | bp->attr.bp_addr = attr->bp_addr; | |
414 | bp->attr.bp_type = attr->bp_type; | |
415 | bp->attr.bp_len = attr->bp_len; | |
416 | ||
417 | if (attr->disabled) | |
418 | goto end; | |
62a038d3 | 419 | |
b2812d03 | 420 | err = validate_hw_breakpoint(bp); |
44234adc FW |
421 | if (!err) |
422 | perf_event_enable(bp); | |
423 | ||
424 | if (err) { | |
425 | bp->attr.bp_addr = old_addr; | |
426 | bp->attr.bp_type = old_type; | |
427 | bp->attr.bp_len = old_len; | |
428 | if (!bp->attr.disabled) | |
429 | perf_event_enable(bp); | |
430 | ||
431 | return err; | |
432 | } | |
433 | ||
434 | end: | |
435 | bp->attr.disabled = attr->disabled; | |
436 | ||
437 | return 0; | |
62a038d3 P |
438 | } |
439 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); | |
440 | ||
441 | /** | |
24f1e32c | 442 | * unregister_hw_breakpoint - unregister a user-space hardware breakpoint |
62a038d3 | 443 | * @bp: the breakpoint structure to unregister |
62a038d3 | 444 | */ |
24f1e32c | 445 | void unregister_hw_breakpoint(struct perf_event *bp) |
62a038d3 | 446 | { |
24f1e32c FW |
447 | if (!bp) |
448 | return; | |
449 | perf_event_release_kernel(bp); | |
450 | } | |
451 | EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); | |
452 | ||
62a038d3 | 453 | /** |
24f1e32c | 454 | * register_wide_hw_breakpoint - register a wide breakpoint in the kernel |
dd1853c3 | 455 | * @attr: breakpoint attributes |
24f1e32c | 456 | * @triggered: callback to trigger when we hit the breakpoint |
62a038d3 | 457 | * |
24f1e32c | 458 | * @return a set of per_cpu pointers to perf events |
62a038d3 | 459 | */ |
44ee6358 | 460 | struct perf_event * __percpu * |
dd1853c3 | 461 | register_wide_hw_breakpoint(struct perf_event_attr *attr, |
b326e956 | 462 | perf_overflow_handler_t triggered) |
62a038d3 | 463 | { |
44ee6358 | 464 | struct perf_event * __percpu *cpu_events, **pevent, *bp; |
24f1e32c FW |
465 | long err; |
466 | int cpu; | |
467 | ||
468 | cpu_events = alloc_percpu(typeof(*cpu_events)); | |
469 | if (!cpu_events) | |
44ee6358 | 470 | return (void __percpu __force *)ERR_PTR(-ENOMEM); |
62a038d3 | 471 | |
88f7a890 LZ |
472 | get_online_cpus(); |
473 | for_each_online_cpu(cpu) { | |
24f1e32c | 474 | pevent = per_cpu_ptr(cpu_events, cpu); |
dd1853c3 | 475 | bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered); |
62a038d3 | 476 | |
24f1e32c | 477 | *pevent = bp; |
62a038d3 | 478 | |
605bfaee | 479 | if (IS_ERR(bp)) { |
24f1e32c FW |
480 | err = PTR_ERR(bp); |
481 | goto fail; | |
482 | } | |
62a038d3 | 483 | } |
88f7a890 | 484 | put_online_cpus(); |
62a038d3 | 485 | |
24f1e32c FW |
486 | return cpu_events; |
487 | ||
488 | fail: | |
88f7a890 | 489 | for_each_online_cpu(cpu) { |
24f1e32c | 490 | pevent = per_cpu_ptr(cpu_events, cpu); |
605bfaee | 491 | if (IS_ERR(*pevent)) |
24f1e32c FW |
492 | break; |
493 | unregister_hw_breakpoint(*pevent); | |
494 | } | |
88f7a890 LZ |
495 | put_online_cpus(); |
496 | ||
24f1e32c | 497 | free_percpu(cpu_events); |
44ee6358 | 498 | return (void __percpu __force *)ERR_PTR(err); |
62a038d3 | 499 | } |
f60d24d2 | 500 | EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); |
62a038d3 P |
501 | |
502 | /** | |
24f1e32c FW |
503 | * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel |
504 | * @cpu_events: the per cpu set of events to unregister | |
62a038d3 | 505 | */ |
44ee6358 | 506 | void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) |
62a038d3 | 507 | { |
24f1e32c FW |
508 | int cpu; |
509 | struct perf_event **pevent; | |
62a038d3 | 510 | |
24f1e32c FW |
511 | for_each_possible_cpu(cpu) { |
512 | pevent = per_cpu_ptr(cpu_events, cpu); | |
513 | unregister_hw_breakpoint(*pevent); | |
62a038d3 | 514 | } |
24f1e32c | 515 | free_percpu(cpu_events); |
62a038d3 | 516 | } |
f60d24d2 | 517 | EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint); |
62a038d3 P |
518 | |
519 | static struct notifier_block hw_breakpoint_exceptions_nb = { | |
520 | .notifier_call = hw_breakpoint_exceptions_notify, | |
521 | /* we need to be notified first */ | |
522 | .priority = 0x7fffffff | |
523 | }; | |
524 | ||
525 | static int __init init_hw_breakpoint(void) | |
526 | { | |
527 | return register_die_notifier(&hw_breakpoint_exceptions_nb); | |
528 | } | |
62a038d3 | 529 | core_initcall(init_hw_breakpoint); |
24f1e32c FW |
530 | |
531 | ||
532 | struct pmu perf_ops_bp = { | |
533 | .enable = arch_install_hw_breakpoint, | |
534 | .disable = arch_uninstall_hw_breakpoint, | |
535 | .read = hw_breakpoint_pmu_read, | |
24f1e32c | 536 | }; |