iommu/amd: Apply the same IVRS IOAPIC workaround to Acer Aspire A315-41
[linux-2.6-block.git] / kernel / events / hw_breakpoint.c
CommitLineData
469eb32e 1// SPDX-License-Identifier: GPL-2.0+
62a038d3 2/*
62a038d3
P
3 * Copyright (C) 2007 Alan Stern
4 * Copyright (C) IBM Corporation, 2009
24f1e32c 5 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
ba1c813a
FW
6 *
7 * Thanks to Ingo Molnar for his many suggestions.
ba6909b7
P
8 *
9 * Authors: Alan Stern <stern@rowland.harvard.edu>
10 * K.Prasad <prasad@linux.vnet.ibm.com>
11 * Frederic Weisbecker <fweisbec@gmail.com>
62a038d3
P
12 */
13
14/*
15 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
16 * using the CPU's debug registers.
17 * This file contains the arch-independent routines.
18 */
19
20#include <linux/irqflags.h>
21#include <linux/kallsyms.h>
22#include <linux/notifier.h>
23#include <linux/kprobes.h>
24#include <linux/kdebug.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/percpu.h>
28#include <linux/sched.h>
29#include <linux/init.h>
feef47d0 30#include <linux/slab.h>
45a73372 31#include <linux/list.h>
88f7a890 32#include <linux/cpu.h>
62a038d3 33#include <linux/smp.h>
ea6a9d53 34#include <linux/bug.h>
62a038d3 35
24f1e32c 36#include <linux/hw_breakpoint.h>
ba1c813a
FW
37/*
38 * Constraints data
39 */
bde96030
ON
40struct bp_cpuinfo {
41 /* Number of pinned cpu breakpoints in a cpu */
42 unsigned int cpu_pinned;
43 /* tsk_pinned[n] is the number of tasks having n+1 breakpoints */
44 unsigned int *tsk_pinned;
45 /* Number of non-pinned cpu/task breakpoints in a cpu */
46 unsigned int flexible; /* XXX: placeholder, see fetch_this_slot() */
47};
62a038d3 48
bde96030 49static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]);
feef47d0
FW
50static int nr_slots[TYPE_MAX];
51
bde96030
ON
52static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
53{
54 return per_cpu_ptr(bp_cpuinfo + type, cpu);
55}
56
45a73372
FW
57/* Keep track of the breakpoints attached to tasks */
58static LIST_HEAD(bp_task_head);
59
feef47d0
FW
60static int constraints_initialized;
61
ba1c813a
FW
62/* Gather the number of total pinned and un-pinned bp in a cpuset */
63struct bp_busy_slots {
64 unsigned int pinned;
65 unsigned int flexible;
66};
67
68/* Serialize accesses to the above constraints */
69static DEFINE_MUTEX(nr_bp_mutex);
70
f93a2054
FW
71__weak int hw_breakpoint_weight(struct perf_event *bp)
72{
73 return 1;
74}
75
cbd9d9f1 76static inline enum bp_type_idx find_slot_idx(u64 bp_type)
0102752e 77{
cbd9d9f1 78 if (bp_type & HW_BREAKPOINT_RW)
0102752e
FW
79 return TYPE_DATA;
80
81 return TYPE_INST;
82}
83
ba1c813a
FW
84/*
85 * Report the maximum number of pinned breakpoints a task
86 * have in this cpu
87 */
0102752e 88static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
62a038d3 89{
bde96030 90 unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
ba1c813a 91 int i;
62a038d3 92
feef47d0 93 for (i = nr_slots[type] - 1; i >= 0; i--) {
ba1c813a
FW
94 if (tsk_pinned[i] > 0)
95 return i + 1;
62a038d3
P
96 }
97
24f1e32c 98 return 0;
62a038d3
P
99}
100
45a73372
FW
101/*
102 * Count the number of breakpoints of the same type and same task.
103 * The given event must be not on the list.
104 */
0d855354 105static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
56053170 106{
50f16a8b 107 struct task_struct *tsk = bp->hw.target;
45a73372 108 struct perf_event *iter;
56053170
FW
109 int count = 0;
110
45a73372 111 list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
50f16a8b 112 if (iter->hw.target == tsk &&
cbd9d9f1 113 find_slot_idx(iter->attr.bp_type) == type &&
8b4d801b 114 (iter->cpu < 0 || cpu == iter->cpu))
45a73372 115 count += hw_breakpoint_weight(iter);
56053170
FW
116 }
117
56053170
FW
118 return count;
119}
120
1c10adbb
ON
121static const struct cpumask *cpumask_of_bp(struct perf_event *bp)
122{
123 if (bp->cpu >= 0)
124 return cpumask_of(bp->cpu);
125 return cpu_possible_mask;
126}
127
ba1c813a
FW
128/*
129 * Report the number of pinned/un-pinned breakpoints we have in
130 * a given cpu (cpu > -1) or in all of them (cpu = -1).
131 */
56053170 132static void
0102752e
FW
133fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
134 enum bp_type_idx type)
ba1c813a 135{
1c10adbb
ON
136 const struct cpumask *cpumask = cpumask_of_bp(bp);
137 int cpu;
56053170 138
1c10adbb 139 for_each_cpu(cpu, cpumask) {
bde96030
ON
140 struct bp_cpuinfo *info = get_bp_info(cpu, type);
141 int nr;
ba1c813a 142
bde96030 143 nr = info->cpu_pinned;
50f16a8b 144 if (!bp->hw.target)
0102752e 145 nr += max_task_bp_pinned(cpu, type);
56053170 146 else
0d855354 147 nr += task_bp_pinned(cpu, bp, type);
ba1c813a
FW
148
149 if (nr > slots->pinned)
150 slots->pinned = nr;
151
bde96030 152 nr = info->flexible;
ba1c813a
FW
153 if (nr > slots->flexible)
154 slots->flexible = nr;
155 }
156}
157
f93a2054
FW
158/*
159 * For now, continue to consider flexible as pinned, until we can
160 * ensure no flexible event can ever be scheduled before a pinned event
161 * in a same cpu.
162 */
163static void
164fetch_this_slot(struct bp_busy_slots *slots, int weight)
165{
166 slots->pinned += weight;
167}
168
ba1c813a
FW
169/*
170 * Add a pinned breakpoint for the given task in our constraint table
171 */
7ab71f32 172static void toggle_bp_task_slot(struct perf_event *bp, int cpu,
f93a2054 173 enum bp_type_idx type, int weight)
ba1c813a 174{
bde96030 175 unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
e1ebe862
ON
176 int old_idx, new_idx;
177
178 old_idx = task_bp_pinned(cpu, bp, type) - 1;
7ab71f32 179 new_idx = old_idx + weight;
e1ebe862
ON
180
181 if (old_idx >= 0)
182 tsk_pinned[old_idx]--;
183 if (new_idx >= 0)
184 tsk_pinned[new_idx]++;
ba1c813a
FW
185}
186
187/*
188 * Add/remove the given breakpoint in our constraint table
189 */
0102752e 190static void
f93a2054
FW
191toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
192 int weight)
ba1c813a 193{
1c10adbb
ON
194 const struct cpumask *cpumask = cpumask_of_bp(bp);
195 int cpu;
ba1c813a 196
7ab71f32
ON
197 if (!enable)
198 weight = -weight;
199
45a73372 200 /* Pinned counter cpu profiling */
50f16a8b 201 if (!bp->hw.target) {
bde96030 202 get_bp_info(bp->cpu, type)->cpu_pinned += weight;
45a73372
FW
203 return;
204 }
205
ba1c813a 206 /* Pinned counter task profiling */
1c10adbb 207 for_each_cpu(cpu, cpumask)
7ab71f32 208 toggle_bp_task_slot(bp, cpu, type, weight);
ba1c813a 209
ba1c813a 210 if (enable)
45a73372 211 list_add_tail(&bp->hw.bp_list, &bp_task_head);
e1ebe862
ON
212 else
213 list_del(&bp->hw.bp_list);
ba1c813a
FW
214}
215
f7136c51
P
216/*
217 * Function to perform processor-specific cleanup during unregistration
218 */
219__weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
220{
221 /*
222 * A weak stub function here for those archs that don't define
223 * it inside arch/.../kernel/hw_breakpoint.c
224 */
225}
226
ba1c813a 227/*
fca0c116 228 * Constraints to check before allowing this new breakpoint counter:
ba1c813a
FW
229 *
230 * == Non-pinned counter == (Considered as pinned for now)
231 *
232 * - If attached to a single cpu, check:
233 *
bde96030
ON
234 * (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu)
235 * + max(per_cpu(info->tsk_pinned, cpu)))) < HBP_NUM
ba1c813a
FW
236 *
237 * -> If there are already non-pinned counters in this cpu, it means
238 * there is already a free slot for them.
239 * Otherwise, we check that the maximum number of per task
240 * breakpoints (for this cpu) plus the number of per cpu breakpoint
241 * (for this cpu) doesn't cover every registers.
242 *
243 * - If attached to every cpus, check:
244 *
bde96030
ON
245 * (per_cpu(info->flexible, *) || (max(per_cpu(info->cpu_pinned, *))
246 * + max(per_cpu(info->tsk_pinned, *)))) < HBP_NUM
ba1c813a
FW
247 *
248 * -> This is roughly the same, except we check the number of per cpu
249 * bp for every cpu and we keep the max one. Same for the per tasks
250 * breakpoints.
251 *
252 *
253 * == Pinned counter ==
254 *
255 * - If attached to a single cpu, check:
256 *
bde96030
ON
257 * ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu)
258 * + max(per_cpu(info->tsk_pinned, cpu))) < HBP_NUM
ba1c813a 259 *
bde96030 260 * -> Same checks as before. But now the info->flexible, if any, must keep
ba1c813a
FW
261 * one register at least (or they will never be fed).
262 *
263 * - If attached to every cpus, check:
264 *
bde96030
ON
265 * ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *))
266 * + max(per_cpu(info->tsk_pinned, *))) < HBP_NUM
ba1c813a 267 */
1ad9ff7d 268static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
ba1c813a
FW
269{
270 struct bp_busy_slots slots = {0};
0102752e 271 enum bp_type_idx type;
f93a2054 272 int weight;
ba1c813a 273
feef47d0
FW
274 /* We couldn't initialize breakpoint constraints on boot */
275 if (!constraints_initialized)
276 return -ENOMEM;
277
0102752e 278 /* Basic checks */
1ad9ff7d
JO
279 if (bp_type == HW_BREAKPOINT_EMPTY ||
280 bp_type == HW_BREAKPOINT_INVALID)
0102752e
FW
281 return -EINVAL;
282
1ad9ff7d 283 type = find_slot_idx(bp_type);
f93a2054
FW
284 weight = hw_breakpoint_weight(bp);
285
0102752e 286 fetch_bp_busy_slots(&slots, bp, type);
45a73372
FW
287 /*
288 * Simulate the addition of this breakpoint to the constraints
289 * and see the result.
290 */
f93a2054 291 fetch_this_slot(&slots, weight);
ba1c813a
FW
292
293 /* Flexible counters need to keep at least one slot */
feef47d0 294 if (slots.pinned + (!!slots.flexible) > nr_slots[type])
5352ae63 295 return -ENOSPC;
ba1c813a 296
f93a2054 297 toggle_bp_slot(bp, true, type, weight);
ba1c813a 298
5352ae63
JW
299 return 0;
300}
301
302int reserve_bp_slot(struct perf_event *bp)
303{
304 int ret;
305
306 mutex_lock(&nr_bp_mutex);
307
1ad9ff7d 308 ret = __reserve_bp_slot(bp, bp->attr.bp_type);
5352ae63 309
ba1c813a
FW
310 mutex_unlock(&nr_bp_mutex);
311
312 return ret;
313}
314
1ad9ff7d 315static void __release_bp_slot(struct perf_event *bp, u64 bp_type)
5352ae63 316{
0102752e 317 enum bp_type_idx type;
f93a2054 318 int weight;
0102752e 319
1ad9ff7d 320 type = find_slot_idx(bp_type);
f93a2054
FW
321 weight = hw_breakpoint_weight(bp);
322 toggle_bp_slot(bp, false, type, weight);
5352ae63
JW
323}
324
24f1e32c 325void release_bp_slot(struct perf_event *bp)
62a038d3 326{
ba1c813a
FW
327 mutex_lock(&nr_bp_mutex);
328
f7136c51 329 arch_unregister_hw_breakpoint(bp);
1ad9ff7d 330 __release_bp_slot(bp, bp->attr.bp_type);
ba1c813a
FW
331
332 mutex_unlock(&nr_bp_mutex);
62a038d3
P
333}
334
cb8b7881 335static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
ea6a9d53
JO
336{
337 int err;
338
339 __release_bp_slot(bp, old_type);
340
cb8b7881 341 err = __reserve_bp_slot(bp, new_type);
ea6a9d53
JO
342 if (err) {
343 /*
344 * Reserve the old_type slot back in case
345 * there's no space for the new type.
346 *
347 * This must succeed, because we just released
348 * the old_type slot in the __release_bp_slot
349 * call above. If not, something is broken.
350 */
351 WARN_ON(__reserve_bp_slot(bp, old_type));
352 }
353
354 return err;
355}
356
cb8b7881 357static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
ea6a9d53
JO
358{
359 int ret;
360
361 mutex_lock(&nr_bp_mutex);
cb8b7881 362 ret = __modify_bp_slot(bp, old_type, new_type);
ea6a9d53
JO
363 mutex_unlock(&nr_bp_mutex);
364 return ret;
365}
366
5352ae63
JW
367/*
368 * Allow the kernel debugger to reserve breakpoint slots without
369 * taking a lock using the dbg_* variant of for the reserve and
370 * release breakpoint slots.
371 */
372int dbg_reserve_bp_slot(struct perf_event *bp)
373{
374 if (mutex_is_locked(&nr_bp_mutex))
375 return -1;
376
1ad9ff7d 377 return __reserve_bp_slot(bp, bp->attr.bp_type);
5352ae63
JW
378}
379
380int dbg_release_bp_slot(struct perf_event *bp)
381{
382 if (mutex_is_locked(&nr_bp_mutex))
383 return -1;
384
1ad9ff7d 385 __release_bp_slot(bp, bp->attr.bp_type);
5352ae63
JW
386
387 return 0;
388}
ba1c813a 389
9a4903dd
FW
390static int hw_breakpoint_parse(struct perf_event *bp,
391 const struct perf_event_attr *attr,
392 struct arch_hw_breakpoint *hw)
393{
394 int err;
b2812d03 395
9a4903dd
FW
396 err = hw_breakpoint_arch_parse(bp, attr, hw);
397 if (err)
398 return err;
b2812d03 399
8e983ff9 400 if (arch_check_bp_in_kernelspace(hw)) {
9a4903dd 401 if (attr->exclude_kernel)
b2812d03
FW
402 return -EINVAL;
403 /*
404 * Don't let unprivileged users set a breakpoint in the trap
405 * path to avoid trap recursion attacks.
406 */
407 if (!capable(CAP_SYS_ADMIN))
408 return -EPERM;
409 }
410
411 return 0;
412}
413
b326e956 414int register_perf_hw_breakpoint(struct perf_event *bp)
62a038d3 415{
310aa0a2 416 struct arch_hw_breakpoint hw = { };
9a4903dd 417 int err;
62a038d3 418
9a4903dd
FW
419 err = reserve_bp_slot(bp);
420 if (err)
421 return err;
62a038d3 422
9a4903dd
FW
423 err = hw_breakpoint_parse(bp, &bp->attr, &hw);
424 if (err) {
b23ff0e9 425 release_bp_slot(bp);
9a4903dd
FW
426 return err;
427 }
b23ff0e9 428
9a4903dd
FW
429 bp->hw.info = hw;
430
431 return 0;
24f1e32c 432}
62a038d3 433
62a038d3
P
434/**
435 * register_user_hw_breakpoint - register a hardware breakpoint for user space
5fa10b28 436 * @attr: breakpoint attributes
24f1e32c 437 * @triggered: callback to trigger when we hit the breakpoint
62a038d3 438 * @tsk: pointer to 'task_struct' of the process to which the address belongs
62a038d3 439 */
24f1e32c 440struct perf_event *
5fa10b28 441register_user_hw_breakpoint(struct perf_event_attr *attr,
b326e956 442 perf_overflow_handler_t triggered,
4dc0da86 443 void *context,
5fa10b28 444 struct task_struct *tsk)
62a038d3 445{
4dc0da86
AK
446 return perf_event_create_kernel_counter(attr, -1, tsk, triggered,
447 context);
62a038d3
P
448}
449EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
450
26c6ccdf
FW
451static void hw_breakpoint_copy_attr(struct perf_event_attr *to,
452 struct perf_event_attr *from)
453{
454 to->bp_addr = from->bp_addr;
455 to->bp_type = from->bp_type;
456 to->bp_len = from->bp_len;
457 to->disabled = from->disabled;
458}
459
32ff77e8 460int
705feaf3
JO
461modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
462 bool check)
18ff57b2 463{
310aa0a2 464 struct arch_hw_breakpoint hw = { };
26c6ccdf 465 int err;
18ff57b2 466
26c6ccdf
FW
467 err = hw_breakpoint_parse(bp, attr, &hw);
468 if (err)
469 return err;
18ff57b2 470
26c6ccdf
FW
471 if (check) {
472 struct perf_event_attr old_attr;
705feaf3 473
26c6ccdf
FW
474 old_attr = bp->attr;
475 hw_breakpoint_copy_attr(&old_attr, attr);
476 if (memcmp(&old_attr, attr, sizeof(*attr)))
477 return -EINVAL;
478 }
18ff57b2 479
26c6ccdf
FW
480 if (bp->attr.bp_type != attr->bp_type) {
481 err = modify_bp_slot(bp, bp->attr.bp_type, attr->bp_type);
482 if (err)
483 return err;
18ff57b2
JO
484 }
485
26c6ccdf 486 hw_breakpoint_copy_attr(&bp->attr, attr);
9a4903dd 487 bp->hw.info = hw;
9a4903dd 488
18ff57b2
JO
489 return 0;
490}
491
62a038d3
P
492/**
493 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
24f1e32c 494 * @bp: the breakpoint structure to modify
5fa10b28 495 * @attr: new breakpoint attributes
62a038d3 496 */
44234adc 497int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
62a038d3 498{
bd14406b
JO
499 int err;
500
500ad2d8
P
501 /*
502 * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
503 * will not be possible to raise IPIs that invoke __perf_event_disable.
504 * So call the function directly after making sure we are targeting the
505 * current task.
506 */
507 if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
fae3fde6 508 perf_event_disable_local(bp);
500ad2d8
P
509 else
510 perf_event_disable(bp);
44234adc 511
bd14406b 512 err = modify_user_hw_breakpoint_check(bp, attr, false);
44234adc 513
96955837 514 if (!bp->attr.disabled)
ea6a9d53 515 perf_event_enable(bp);
cb45302d 516
96955837 517 return err;
62a038d3
P
518}
519EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
520
521/**
24f1e32c 522 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
62a038d3 523 * @bp: the breakpoint structure to unregister
62a038d3 524 */
24f1e32c 525void unregister_hw_breakpoint(struct perf_event *bp)
62a038d3 526{
24f1e32c
FW
527 if (!bp)
528 return;
529 perf_event_release_kernel(bp);
530}
531EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
532
62a038d3 533/**
24f1e32c 534 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
dd1853c3 535 * @attr: breakpoint attributes
24f1e32c 536 * @triggered: callback to trigger when we hit the breakpoint
62a038d3 537 *
24f1e32c 538 * @return a set of per_cpu pointers to perf events
62a038d3 539 */
44ee6358 540struct perf_event * __percpu *
dd1853c3 541register_wide_hw_breakpoint(struct perf_event_attr *attr,
4dc0da86
AK
542 perf_overflow_handler_t triggered,
543 void *context)
62a038d3 544{
e12cbc10
ON
545 struct perf_event * __percpu *cpu_events, *bp;
546 long err = 0;
24f1e32c
FW
547 int cpu;
548
549 cpu_events = alloc_percpu(typeof(*cpu_events));
550 if (!cpu_events)
44ee6358 551 return (void __percpu __force *)ERR_PTR(-ENOMEM);
62a038d3 552
88f7a890
LZ
553 get_online_cpus();
554 for_each_online_cpu(cpu) {
4dc0da86
AK
555 bp = perf_event_create_kernel_counter(attr, cpu, NULL,
556 triggered, context);
605bfaee 557 if (IS_ERR(bp)) {
24f1e32c 558 err = PTR_ERR(bp);
e12cbc10 559 break;
24f1e32c 560 }
24f1e32c 561
e12cbc10 562 per_cpu(*cpu_events, cpu) = bp;
24f1e32c 563 }
88f7a890
LZ
564 put_online_cpus();
565
e12cbc10
ON
566 if (likely(!err))
567 return cpu_events;
568
569 unregister_wide_hw_breakpoint(cpu_events);
44ee6358 570 return (void __percpu __force *)ERR_PTR(err);
62a038d3 571}
f60d24d2 572EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
62a038d3
P
573
574/**
24f1e32c
FW
575 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
576 * @cpu_events: the per cpu set of events to unregister
62a038d3 577 */
44ee6358 578void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
62a038d3 579{
24f1e32c 580 int cpu;
62a038d3 581
e12cbc10
ON
582 for_each_possible_cpu(cpu)
583 unregister_hw_breakpoint(per_cpu(*cpu_events, cpu));
584
24f1e32c 585 free_percpu(cpu_events);
62a038d3 586}
f60d24d2 587EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
62a038d3
P
588
589static struct notifier_block hw_breakpoint_exceptions_nb = {
590 .notifier_call = hw_breakpoint_exceptions_notify,
591 /* we need to be notified first */
592 .priority = 0x7fffffff
593};
594
b0a873eb
PZ
595static void bp_perf_event_destroy(struct perf_event *event)
596{
597 release_bp_slot(event);
598}
599
600static int hw_breakpoint_event_init(struct perf_event *bp)
601{
602 int err;
603
604 if (bp->attr.type != PERF_TYPE_BREAKPOINT)
605 return -ENOENT;
606
2481c5fa
SE
607 /*
608 * no branch sampling for breakpoint events
609 */
610 if (has_branch_stack(bp))
611 return -EOPNOTSUPP;
612
b0a873eb
PZ
613 err = register_perf_hw_breakpoint(bp);
614 if (err)
615 return err;
616
617 bp->destroy = bp_perf_event_destroy;
618
619 return 0;
620}
621
a4eaf7f1
PZ
622static int hw_breakpoint_add(struct perf_event *bp, int flags)
623{
624 if (!(flags & PERF_EF_START))
625 bp->hw.state = PERF_HES_STOPPED;
626
ab573844
JO
627 if (is_sampling_event(bp)) {
628 bp->hw.last_period = bp->hw.sample_period;
629 perf_swevent_set_period(bp);
630 }
631
a4eaf7f1
PZ
632 return arch_install_hw_breakpoint(bp);
633}
634
635static void hw_breakpoint_del(struct perf_event *bp, int flags)
636{
637 arch_uninstall_hw_breakpoint(bp);
638}
639
640static void hw_breakpoint_start(struct perf_event *bp, int flags)
641{
642 bp->hw.state = 0;
643}
644
645static void hw_breakpoint_stop(struct perf_event *bp, int flags)
646{
647 bp->hw.state = PERF_HES_STOPPED;
648}
649
b0a873eb 650static struct pmu perf_breakpoint = {
89a1e187
PZ
651 .task_ctx_nr = perf_sw_context, /* could eventually get its own */
652
b0a873eb 653 .event_init = hw_breakpoint_event_init,
a4eaf7f1
PZ
654 .add = hw_breakpoint_add,
655 .del = hw_breakpoint_del,
656 .start = hw_breakpoint_start,
657 .stop = hw_breakpoint_stop,
b0a873eb
PZ
658 .read = hw_breakpoint_pmu_read,
659};
660
3c502e7a 661int __init init_hw_breakpoint(void)
62a038d3 662{
feef47d0
FW
663 int cpu, err_cpu;
664 int i;
665
666 for (i = 0; i < TYPE_MAX; i++)
667 nr_slots[i] = hw_breakpoint_slots(i);
668
669 for_each_possible_cpu(cpu) {
670 for (i = 0; i < TYPE_MAX; i++) {
bde96030
ON
671 struct bp_cpuinfo *info = get_bp_info(cpu, i);
672
673 info->tsk_pinned = kcalloc(nr_slots[i], sizeof(int),
674 GFP_KERNEL);
675 if (!info->tsk_pinned)
feef47d0
FW
676 goto err_alloc;
677 }
678 }
679
680 constraints_initialized = 1;
681
2e80a82a 682 perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT);
b0a873eb 683
62a038d3 684 return register_die_notifier(&hw_breakpoint_exceptions_nb);
feef47d0
FW
685
686 err_alloc:
687 for_each_possible_cpu(err_cpu) {
feef47d0 688 for (i = 0; i < TYPE_MAX; i++)
bde96030 689 kfree(get_bp_info(err_cpu, i)->tsk_pinned);
30ce2f7e
NK
690 if (err_cpu == cpu)
691 break;
feef47d0
FW
692 }
693
694 return -ENOMEM;
62a038d3 695}
24f1e32c
FW
696
697