Commit | Line | Data |
---|---|---|
62a038d3 P |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License as published by | |
4 | * the Free Software Foundation; either version 2 of the License, or | |
5 | * (at your option) any later version. | |
6 | * | |
7 | * This program is distributed in the hope that it will be useful, | |
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | * GNU General Public License for more details. | |
11 | * | |
12 | * You should have received a copy of the GNU General Public License | |
13 | * along with this program; if not, write to the Free Software | |
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
15 | * | |
16 | * Copyright (C) 2007 Alan Stern | |
17 | * Copyright (C) IBM Corporation, 2009 | |
24f1e32c | 18 | * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com> |
ba1c813a FW |
19 | * |
20 | * Thanks to Ingo Molnar for his many suggestions. | |
ba6909b7 P |
21 | * |
22 | * Authors: Alan Stern <stern@rowland.harvard.edu> | |
23 | * K.Prasad <prasad@linux.vnet.ibm.com> | |
24 | * Frederic Weisbecker <fweisbec@gmail.com> | |
62a038d3 P |
25 | */ |
26 | ||
27 | /* | |
28 | * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, | |
29 | * using the CPU's debug registers. | |
30 | * This file contains the arch-independent routines. | |
31 | */ | |
32 | ||
33 | #include <linux/irqflags.h> | |
34 | #include <linux/kallsyms.h> | |
35 | #include <linux/notifier.h> | |
36 | #include <linux/kprobes.h> | |
37 | #include <linux/kdebug.h> | |
38 | #include <linux/kernel.h> | |
39 | #include <linux/module.h> | |
40 | #include <linux/percpu.h> | |
41 | #include <linux/sched.h> | |
42 | #include <linux/init.h> | |
feef47d0 | 43 | #include <linux/slab.h> |
45a73372 | 44 | #include <linux/list.h> |
88f7a890 | 45 | #include <linux/cpu.h> |
62a038d3 P |
46 | #include <linux/smp.h> |
47 | ||
24f1e32c FW |
48 | #include <linux/hw_breakpoint.h> |
49 | ||
0102752e | 50 | |
ba1c813a FW |
51 | /* |
52 | * Constraints data | |
53 | */ | |
62a038d3 | 54 | |
ba1c813a | 55 | /* Number of pinned cpu breakpoints in a cpu */ |
0102752e | 56 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]); |
ba1c813a FW |
57 | |
58 | /* Number of pinned task breakpoints in a cpu */ | |
777d0411 | 59 | static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]); |
ba1c813a FW |
60 | |
61 | /* Number of non-pinned cpu/task breakpoints in a cpu */ | |
0102752e | 62 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]); |
ba1c813a | 63 | |
feef47d0 FW |
64 | static int nr_slots[TYPE_MAX]; |
65 | ||
45a73372 FW |
66 | /* Keep track of the breakpoints attached to tasks */ |
67 | static LIST_HEAD(bp_task_head); | |
68 | ||
feef47d0 FW |
69 | static int constraints_initialized; |
70 | ||
ba1c813a FW |
71 | /* Gather the number of total pinned and un-pinned bp in a cpuset */ |
72 | struct bp_busy_slots { | |
73 | unsigned int pinned; | |
74 | unsigned int flexible; | |
75 | }; | |
76 | ||
77 | /* Serialize accesses to the above constraints */ | |
78 | static DEFINE_MUTEX(nr_bp_mutex); | |
79 | ||
f93a2054 FW |
80 | __weak int hw_breakpoint_weight(struct perf_event *bp) |
81 | { | |
82 | return 1; | |
83 | } | |
84 | ||
0102752e FW |
85 | static inline enum bp_type_idx find_slot_idx(struct perf_event *bp) |
86 | { | |
87 | if (bp->attr.bp_type & HW_BREAKPOINT_RW) | |
88 | return TYPE_DATA; | |
89 | ||
90 | return TYPE_INST; | |
91 | } | |
92 | ||
ba1c813a FW |
93 | /* |
94 | * Report the maximum number of pinned breakpoints a task | |
95 | * have in this cpu | |
96 | */ | |
0102752e | 97 | static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) |
62a038d3 | 98 | { |
ba1c813a | 99 | int i; |
0102752e | 100 | unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); |
62a038d3 | 101 | |
feef47d0 | 102 | for (i = nr_slots[type] - 1; i >= 0; i--) { |
ba1c813a FW |
103 | if (tsk_pinned[i] > 0) |
104 | return i + 1; | |
62a038d3 P |
105 | } |
106 | ||
24f1e32c | 107 | return 0; |
62a038d3 P |
108 | } |
109 | ||
45a73372 FW |
110 | /* |
111 | * Count the number of breakpoints of the same type and same task. | |
112 | * The given event must be not on the list. | |
113 | */ | |
0d855354 | 114 | static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) |
56053170 | 115 | { |
d580ff86 | 116 | struct task_struct *tsk = bp->hw.bp_target; |
45a73372 | 117 | struct perf_event *iter; |
56053170 FW |
118 | int count = 0; |
119 | ||
45a73372 | 120 | list_for_each_entry(iter, &bp_task_head, hw.bp_list) { |
0d855354 MN |
121 | if (iter->hw.bp_target == tsk && |
122 | find_slot_idx(iter) == type && | |
8b4d801b | 123 | (iter->cpu < 0 || cpu == iter->cpu)) |
45a73372 | 124 | count += hw_breakpoint_weight(iter); |
56053170 FW |
125 | } |
126 | ||
56053170 FW |
127 | return count; |
128 | } | |
129 | ||
1c10adbb ON |
130 | static const struct cpumask *cpumask_of_bp(struct perf_event *bp) |
131 | { | |
132 | if (bp->cpu >= 0) | |
133 | return cpumask_of(bp->cpu); | |
134 | return cpu_possible_mask; | |
135 | } | |
136 | ||
ba1c813a FW |
137 | /* |
138 | * Report the number of pinned/un-pinned breakpoints we have in | |
139 | * a given cpu (cpu > -1) or in all of them (cpu = -1). | |
140 | */ | |
56053170 | 141 | static void |
0102752e FW |
142 | fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, |
143 | enum bp_type_idx type) | |
ba1c813a | 144 | { |
1c10adbb ON |
145 | const struct cpumask *cpumask = cpumask_of_bp(bp); |
146 | int cpu; | |
56053170 | 147 | |
1c10adbb ON |
148 | for_each_cpu(cpu, cpumask) { |
149 | unsigned int nr = per_cpu(nr_cpu_bp_pinned[type], cpu); | |
ba1c813a | 150 | |
1c10adbb | 151 | if (!bp->hw.bp_target) |
0102752e | 152 | nr += max_task_bp_pinned(cpu, type); |
56053170 | 153 | else |
0d855354 | 154 | nr += task_bp_pinned(cpu, bp, type); |
ba1c813a FW |
155 | |
156 | if (nr > slots->pinned) | |
157 | slots->pinned = nr; | |
158 | ||
0102752e | 159 | nr = per_cpu(nr_bp_flexible[type], cpu); |
ba1c813a FW |
160 | |
161 | if (nr > slots->flexible) | |
162 | slots->flexible = nr; | |
163 | } | |
164 | } | |
165 | ||
f93a2054 FW |
166 | /* |
167 | * For now, continue to consider flexible as pinned, until we can | |
168 | * ensure no flexible event can ever be scheduled before a pinned event | |
169 | * in a same cpu. | |
170 | */ | |
171 | static void | |
172 | fetch_this_slot(struct bp_busy_slots *slots, int weight) | |
173 | { | |
174 | slots->pinned += weight; | |
175 | } | |
176 | ||
ba1c813a FW |
177 | /* |
178 | * Add a pinned breakpoint for the given task in our constraint table | |
179 | */ | |
7ab71f32 | 180 | static void toggle_bp_task_slot(struct perf_event *bp, int cpu, |
f93a2054 | 181 | enum bp_type_idx type, int weight) |
ba1c813a | 182 | { |
e1ebe862 ON |
183 | /* tsk_pinned[n-1] is the number of tasks having n>0 breakpoints */ |
184 | unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); | |
185 | int old_idx, new_idx; | |
186 | ||
187 | old_idx = task_bp_pinned(cpu, bp, type) - 1; | |
7ab71f32 | 188 | new_idx = old_idx + weight; |
e1ebe862 ON |
189 | |
190 | if (old_idx >= 0) | |
191 | tsk_pinned[old_idx]--; | |
192 | if (new_idx >= 0) | |
193 | tsk_pinned[new_idx]++; | |
ba1c813a FW |
194 | } |
195 | ||
196 | /* | |
197 | * Add/remove the given breakpoint in our constraint table | |
198 | */ | |
0102752e | 199 | static void |
f93a2054 FW |
200 | toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, |
201 | int weight) | |
ba1c813a | 202 | { |
1c10adbb ON |
203 | const struct cpumask *cpumask = cpumask_of_bp(bp); |
204 | int cpu; | |
ba1c813a | 205 | |
7ab71f32 ON |
206 | if (!enable) |
207 | weight = -weight; | |
208 | ||
45a73372 | 209 | /* Pinned counter cpu profiling */ |
1c10adbb ON |
210 | if (!bp->hw.bp_target) { |
211 | per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight; | |
45a73372 FW |
212 | return; |
213 | } | |
214 | ||
ba1c813a | 215 | /* Pinned counter task profiling */ |
1c10adbb | 216 | for_each_cpu(cpu, cpumask) |
7ab71f32 | 217 | toggle_bp_task_slot(bp, cpu, type, weight); |
ba1c813a | 218 | |
ba1c813a | 219 | if (enable) |
45a73372 | 220 | list_add_tail(&bp->hw.bp_list, &bp_task_head); |
e1ebe862 ON |
221 | else |
222 | list_del(&bp->hw.bp_list); | |
ba1c813a FW |
223 | } |
224 | ||
f7136c51 P |
225 | /* |
226 | * Function to perform processor-specific cleanup during unregistration | |
227 | */ | |
228 | __weak void arch_unregister_hw_breakpoint(struct perf_event *bp) | |
229 | { | |
230 | /* | |
231 | * A weak stub function here for those archs that don't define | |
232 | * it inside arch/.../kernel/hw_breakpoint.c | |
233 | */ | |
234 | } | |
235 | ||
ba1c813a FW |
236 | /* |
237 | * Contraints to check before allowing this new breakpoint counter: | |
238 | * | |
239 | * == Non-pinned counter == (Considered as pinned for now) | |
240 | * | |
241 | * - If attached to a single cpu, check: | |
242 | * | |
243 | * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) | |
6ab88863 | 244 | * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM |
ba1c813a FW |
245 | * |
246 | * -> If there are already non-pinned counters in this cpu, it means | |
247 | * there is already a free slot for them. | |
248 | * Otherwise, we check that the maximum number of per task | |
249 | * breakpoints (for this cpu) plus the number of per cpu breakpoint | |
250 | * (for this cpu) doesn't cover every registers. | |
251 | * | |
252 | * - If attached to every cpus, check: | |
253 | * | |
254 | * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) | |
6ab88863 | 255 | * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM |
ba1c813a FW |
256 | * |
257 | * -> This is roughly the same, except we check the number of per cpu | |
258 | * bp for every cpu and we keep the max one. Same for the per tasks | |
259 | * breakpoints. | |
260 | * | |
261 | * | |
262 | * == Pinned counter == | |
263 | * | |
264 | * - If attached to a single cpu, check: | |
265 | * | |
266 | * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) | |
6ab88863 | 267 | * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM |
ba1c813a FW |
268 | * |
269 | * -> Same checks as before. But now the nr_bp_flexible, if any, must keep | |
270 | * one register at least (or they will never be fed). | |
271 | * | |
272 | * - If attached to every cpus, check: | |
273 | * | |
274 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) | |
6ab88863 | 275 | * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM |
ba1c813a | 276 | */ |
5352ae63 | 277 | static int __reserve_bp_slot(struct perf_event *bp) |
ba1c813a FW |
278 | { |
279 | struct bp_busy_slots slots = {0}; | |
0102752e | 280 | enum bp_type_idx type; |
f93a2054 | 281 | int weight; |
ba1c813a | 282 | |
feef47d0 FW |
283 | /* We couldn't initialize breakpoint constraints on boot */ |
284 | if (!constraints_initialized) | |
285 | return -ENOMEM; | |
286 | ||
0102752e FW |
287 | /* Basic checks */ |
288 | if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY || | |
289 | bp->attr.bp_type == HW_BREAKPOINT_INVALID) | |
290 | return -EINVAL; | |
291 | ||
292 | type = find_slot_idx(bp); | |
f93a2054 FW |
293 | weight = hw_breakpoint_weight(bp); |
294 | ||
0102752e | 295 | fetch_bp_busy_slots(&slots, bp, type); |
45a73372 FW |
296 | /* |
297 | * Simulate the addition of this breakpoint to the constraints | |
298 | * and see the result. | |
299 | */ | |
f93a2054 | 300 | fetch_this_slot(&slots, weight); |
ba1c813a FW |
301 | |
302 | /* Flexible counters need to keep at least one slot */ | |
feef47d0 | 303 | if (slots.pinned + (!!slots.flexible) > nr_slots[type]) |
5352ae63 | 304 | return -ENOSPC; |
ba1c813a | 305 | |
f93a2054 | 306 | toggle_bp_slot(bp, true, type, weight); |
ba1c813a | 307 | |
5352ae63 JW |
308 | return 0; |
309 | } | |
310 | ||
311 | int reserve_bp_slot(struct perf_event *bp) | |
312 | { | |
313 | int ret; | |
314 | ||
315 | mutex_lock(&nr_bp_mutex); | |
316 | ||
317 | ret = __reserve_bp_slot(bp); | |
318 | ||
ba1c813a FW |
319 | mutex_unlock(&nr_bp_mutex); |
320 | ||
321 | return ret; | |
322 | } | |
323 | ||
5352ae63 JW |
324 | static void __release_bp_slot(struct perf_event *bp) |
325 | { | |
0102752e | 326 | enum bp_type_idx type; |
f93a2054 | 327 | int weight; |
0102752e FW |
328 | |
329 | type = find_slot_idx(bp); | |
f93a2054 FW |
330 | weight = hw_breakpoint_weight(bp); |
331 | toggle_bp_slot(bp, false, type, weight); | |
5352ae63 JW |
332 | } |
333 | ||
24f1e32c | 334 | void release_bp_slot(struct perf_event *bp) |
62a038d3 | 335 | { |
ba1c813a FW |
336 | mutex_lock(&nr_bp_mutex); |
337 | ||
f7136c51 | 338 | arch_unregister_hw_breakpoint(bp); |
5352ae63 | 339 | __release_bp_slot(bp); |
ba1c813a FW |
340 | |
341 | mutex_unlock(&nr_bp_mutex); | |
62a038d3 P |
342 | } |
343 | ||
5352ae63 JW |
344 | /* |
345 | * Allow the kernel debugger to reserve breakpoint slots without | |
346 | * taking a lock using the dbg_* variant of for the reserve and | |
347 | * release breakpoint slots. | |
348 | */ | |
349 | int dbg_reserve_bp_slot(struct perf_event *bp) | |
350 | { | |
351 | if (mutex_is_locked(&nr_bp_mutex)) | |
352 | return -1; | |
353 | ||
354 | return __reserve_bp_slot(bp); | |
355 | } | |
356 | ||
357 | int dbg_release_bp_slot(struct perf_event *bp) | |
358 | { | |
359 | if (mutex_is_locked(&nr_bp_mutex)) | |
360 | return -1; | |
361 | ||
362 | __release_bp_slot(bp); | |
363 | ||
364 | return 0; | |
365 | } | |
ba1c813a | 366 | |
b2812d03 FW |
367 | static int validate_hw_breakpoint(struct perf_event *bp) |
368 | { | |
369 | int ret; | |
370 | ||
371 | ret = arch_validate_hwbkpt_settings(bp); | |
372 | if (ret) | |
373 | return ret; | |
374 | ||
375 | if (arch_check_bp_in_kernelspace(bp)) { | |
376 | if (bp->attr.exclude_kernel) | |
377 | return -EINVAL; | |
378 | /* | |
379 | * Don't let unprivileged users set a breakpoint in the trap | |
380 | * path to avoid trap recursion attacks. | |
381 | */ | |
382 | if (!capable(CAP_SYS_ADMIN)) | |
383 | return -EPERM; | |
384 | } | |
385 | ||
386 | return 0; | |
387 | } | |
388 | ||
b326e956 | 389 | int register_perf_hw_breakpoint(struct perf_event *bp) |
62a038d3 | 390 | { |
24f1e32c | 391 | int ret; |
62a038d3 | 392 | |
24f1e32c FW |
393 | ret = reserve_bp_slot(bp); |
394 | if (ret) | |
395 | return ret; | |
62a038d3 | 396 | |
b2812d03 | 397 | ret = validate_hw_breakpoint(bp); |
62a038d3 | 398 | |
b23ff0e9 MS |
399 | /* if arch_validate_hwbkpt_settings() fails then release bp slot */ |
400 | if (ret) | |
401 | release_bp_slot(bp); | |
402 | ||
24f1e32c FW |
403 | return ret; |
404 | } | |
62a038d3 | 405 | |
62a038d3 P |
406 | /** |
407 | * register_user_hw_breakpoint - register a hardware breakpoint for user space | |
5fa10b28 | 408 | * @attr: breakpoint attributes |
24f1e32c | 409 | * @triggered: callback to trigger when we hit the breakpoint |
62a038d3 | 410 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
62a038d3 | 411 | */ |
24f1e32c | 412 | struct perf_event * |
5fa10b28 | 413 | register_user_hw_breakpoint(struct perf_event_attr *attr, |
b326e956 | 414 | perf_overflow_handler_t triggered, |
4dc0da86 | 415 | void *context, |
5fa10b28 | 416 | struct task_struct *tsk) |
62a038d3 | 417 | { |
4dc0da86 AK |
418 | return perf_event_create_kernel_counter(attr, -1, tsk, triggered, |
419 | context); | |
62a038d3 P |
420 | } |
421 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | |
422 | ||
423 | /** | |
424 | * modify_user_hw_breakpoint - modify a user-space hardware breakpoint | |
24f1e32c | 425 | * @bp: the breakpoint structure to modify |
5fa10b28 | 426 | * @attr: new breakpoint attributes |
24f1e32c | 427 | * @triggered: callback to trigger when we hit the breakpoint |
62a038d3 | 428 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
62a038d3 | 429 | */ |
44234adc | 430 | int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) |
62a038d3 | 431 | { |
44234adc | 432 | u64 old_addr = bp->attr.bp_addr; |
cd757645 | 433 | u64 old_len = bp->attr.bp_len; |
44234adc | 434 | int old_type = bp->attr.bp_type; |
44234adc FW |
435 | int err = 0; |
436 | ||
500ad2d8 P |
437 | /* |
438 | * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it | |
439 | * will not be possible to raise IPIs that invoke __perf_event_disable. | |
440 | * So call the function directly after making sure we are targeting the | |
441 | * current task. | |
442 | */ | |
443 | if (irqs_disabled() && bp->ctx && bp->ctx->task == current) | |
444 | __perf_event_disable(bp); | |
445 | else | |
446 | perf_event_disable(bp); | |
44234adc FW |
447 | |
448 | bp->attr.bp_addr = attr->bp_addr; | |
449 | bp->attr.bp_type = attr->bp_type; | |
450 | bp->attr.bp_len = attr->bp_len; | |
451 | ||
452 | if (attr->disabled) | |
453 | goto end; | |
62a038d3 | 454 | |
b2812d03 | 455 | err = validate_hw_breakpoint(bp); |
44234adc FW |
456 | if (!err) |
457 | perf_event_enable(bp); | |
458 | ||
459 | if (err) { | |
460 | bp->attr.bp_addr = old_addr; | |
461 | bp->attr.bp_type = old_type; | |
462 | bp->attr.bp_len = old_len; | |
463 | if (!bp->attr.disabled) | |
464 | perf_event_enable(bp); | |
465 | ||
466 | return err; | |
467 | } | |
468 | ||
469 | end: | |
470 | bp->attr.disabled = attr->disabled; | |
471 | ||
472 | return 0; | |
62a038d3 P |
473 | } |
474 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); | |
475 | ||
476 | /** | |
24f1e32c | 477 | * unregister_hw_breakpoint - unregister a user-space hardware breakpoint |
62a038d3 | 478 | * @bp: the breakpoint structure to unregister |
62a038d3 | 479 | */ |
24f1e32c | 480 | void unregister_hw_breakpoint(struct perf_event *bp) |
62a038d3 | 481 | { |
24f1e32c FW |
482 | if (!bp) |
483 | return; | |
484 | perf_event_release_kernel(bp); | |
485 | } | |
486 | EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); | |
487 | ||
62a038d3 | 488 | /** |
24f1e32c | 489 | * register_wide_hw_breakpoint - register a wide breakpoint in the kernel |
dd1853c3 | 490 | * @attr: breakpoint attributes |
24f1e32c | 491 | * @triggered: callback to trigger when we hit the breakpoint |
62a038d3 | 492 | * |
24f1e32c | 493 | * @return a set of per_cpu pointers to perf events |
62a038d3 | 494 | */ |
44ee6358 | 495 | struct perf_event * __percpu * |
dd1853c3 | 496 | register_wide_hw_breakpoint(struct perf_event_attr *attr, |
4dc0da86 AK |
497 | perf_overflow_handler_t triggered, |
498 | void *context) | |
62a038d3 | 499 | { |
e12cbc10 ON |
500 | struct perf_event * __percpu *cpu_events, *bp; |
501 | long err = 0; | |
24f1e32c FW |
502 | int cpu; |
503 | ||
504 | cpu_events = alloc_percpu(typeof(*cpu_events)); | |
505 | if (!cpu_events) | |
44ee6358 | 506 | return (void __percpu __force *)ERR_PTR(-ENOMEM); |
62a038d3 | 507 | |
88f7a890 LZ |
508 | get_online_cpus(); |
509 | for_each_online_cpu(cpu) { | |
4dc0da86 AK |
510 | bp = perf_event_create_kernel_counter(attr, cpu, NULL, |
511 | triggered, context); | |
605bfaee | 512 | if (IS_ERR(bp)) { |
24f1e32c | 513 | err = PTR_ERR(bp); |
e12cbc10 | 514 | break; |
24f1e32c | 515 | } |
24f1e32c | 516 | |
e12cbc10 | 517 | per_cpu(*cpu_events, cpu) = bp; |
24f1e32c | 518 | } |
88f7a890 LZ |
519 | put_online_cpus(); |
520 | ||
e12cbc10 ON |
521 | if (likely(!err)) |
522 | return cpu_events; | |
523 | ||
524 | unregister_wide_hw_breakpoint(cpu_events); | |
44ee6358 | 525 | return (void __percpu __force *)ERR_PTR(err); |
62a038d3 | 526 | } |
f60d24d2 | 527 | EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); |
62a038d3 P |
528 | |
529 | /** | |
24f1e32c FW |
530 | * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel |
531 | * @cpu_events: the per cpu set of events to unregister | |
62a038d3 | 532 | */ |
44ee6358 | 533 | void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) |
62a038d3 | 534 | { |
24f1e32c | 535 | int cpu; |
62a038d3 | 536 | |
e12cbc10 ON |
537 | for_each_possible_cpu(cpu) |
538 | unregister_hw_breakpoint(per_cpu(*cpu_events, cpu)); | |
539 | ||
24f1e32c | 540 | free_percpu(cpu_events); |
62a038d3 | 541 | } |
f60d24d2 | 542 | EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint); |
62a038d3 P |
543 | |
544 | static struct notifier_block hw_breakpoint_exceptions_nb = { | |
545 | .notifier_call = hw_breakpoint_exceptions_notify, | |
546 | /* we need to be notified first */ | |
547 | .priority = 0x7fffffff | |
548 | }; | |
549 | ||
b0a873eb PZ |
550 | static void bp_perf_event_destroy(struct perf_event *event) |
551 | { | |
552 | release_bp_slot(event); | |
553 | } | |
554 | ||
555 | static int hw_breakpoint_event_init(struct perf_event *bp) | |
556 | { | |
557 | int err; | |
558 | ||
559 | if (bp->attr.type != PERF_TYPE_BREAKPOINT) | |
560 | return -ENOENT; | |
561 | ||
2481c5fa SE |
562 | /* |
563 | * no branch sampling for breakpoint events | |
564 | */ | |
565 | if (has_branch_stack(bp)) | |
566 | return -EOPNOTSUPP; | |
567 | ||
b0a873eb PZ |
568 | err = register_perf_hw_breakpoint(bp); |
569 | if (err) | |
570 | return err; | |
571 | ||
572 | bp->destroy = bp_perf_event_destroy; | |
573 | ||
574 | return 0; | |
575 | } | |
576 | ||
a4eaf7f1 PZ |
577 | static int hw_breakpoint_add(struct perf_event *bp, int flags) |
578 | { | |
579 | if (!(flags & PERF_EF_START)) | |
580 | bp->hw.state = PERF_HES_STOPPED; | |
581 | ||
ab573844 JO |
582 | if (is_sampling_event(bp)) { |
583 | bp->hw.last_period = bp->hw.sample_period; | |
584 | perf_swevent_set_period(bp); | |
585 | } | |
586 | ||
a4eaf7f1 PZ |
587 | return arch_install_hw_breakpoint(bp); |
588 | } | |
589 | ||
590 | static void hw_breakpoint_del(struct perf_event *bp, int flags) | |
591 | { | |
592 | arch_uninstall_hw_breakpoint(bp); | |
593 | } | |
594 | ||
595 | static void hw_breakpoint_start(struct perf_event *bp, int flags) | |
596 | { | |
597 | bp->hw.state = 0; | |
598 | } | |
599 | ||
600 | static void hw_breakpoint_stop(struct perf_event *bp, int flags) | |
601 | { | |
602 | bp->hw.state = PERF_HES_STOPPED; | |
603 | } | |
604 | ||
35edc2a5 PZ |
605 | static int hw_breakpoint_event_idx(struct perf_event *bp) |
606 | { | |
607 | return 0; | |
608 | } | |
609 | ||
b0a873eb | 610 | static struct pmu perf_breakpoint = { |
89a1e187 PZ |
611 | .task_ctx_nr = perf_sw_context, /* could eventually get its own */ |
612 | ||
b0a873eb | 613 | .event_init = hw_breakpoint_event_init, |
a4eaf7f1 PZ |
614 | .add = hw_breakpoint_add, |
615 | .del = hw_breakpoint_del, | |
616 | .start = hw_breakpoint_start, | |
617 | .stop = hw_breakpoint_stop, | |
b0a873eb | 618 | .read = hw_breakpoint_pmu_read, |
35edc2a5 PZ |
619 | |
620 | .event_idx = hw_breakpoint_event_idx, | |
b0a873eb PZ |
621 | }; |
622 | ||
3c502e7a | 623 | int __init init_hw_breakpoint(void) |
62a038d3 | 624 | { |
feef47d0 FW |
625 | unsigned int **task_bp_pinned; |
626 | int cpu, err_cpu; | |
627 | int i; | |
628 | ||
629 | for (i = 0; i < TYPE_MAX; i++) | |
630 | nr_slots[i] = hw_breakpoint_slots(i); | |
631 | ||
632 | for_each_possible_cpu(cpu) { | |
633 | for (i = 0; i < TYPE_MAX; i++) { | |
634 | task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu); | |
635 | *task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i], | |
636 | GFP_KERNEL); | |
637 | if (!*task_bp_pinned) | |
638 | goto err_alloc; | |
639 | } | |
640 | } | |
641 | ||
642 | constraints_initialized = 1; | |
643 | ||
2e80a82a | 644 | perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT); |
b0a873eb | 645 | |
62a038d3 | 646 | return register_die_notifier(&hw_breakpoint_exceptions_nb); |
feef47d0 FW |
647 | |
648 | err_alloc: | |
649 | for_each_possible_cpu(err_cpu) { | |
feef47d0 | 650 | for (i = 0; i < TYPE_MAX; i++) |
02e176af | 651 | kfree(per_cpu(nr_task_bp_pinned[i], err_cpu)); |
30ce2f7e NK |
652 | if (err_cpu == cpu) |
653 | break; | |
feef47d0 FW |
654 | } |
655 | ||
656 | return -ENOMEM; | |
62a038d3 | 657 | } |
24f1e32c FW |
658 | |
659 |