Merge tag 'for-linus-20190118' of git://git.kernel.dk/linux-block
[linux-2.6-block.git] / drivers / perf / arm_pmu.c
CommitLineData
1b8873a0
JI
1#undef DEBUG
2
3/*
4 * ARM performance counter support.
5 *
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
43eab878 7 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
796d1295 8 *
1b8873a0 9 * This code is based on the sparc64 perf event code, which is in turn based
d39976f0 10 * on the x86 code.
1b8873a0
JI
11 */
12#define pr_fmt(fmt) "hw perfevents: " fmt
13
74cf0bc7 14#include <linux/bitmap.h>
cc88116d 15#include <linux/cpumask.h>
da4e4f18 16#include <linux/cpu_pm.h>
74cf0bc7 17#include <linux/export.h>
1b8873a0 18#include <linux/kernel.h>
fa8ad788 19#include <linux/perf/arm_pmu.h>
74cf0bc7 20#include <linux/slab.h>
e6017571 21#include <linux/sched/clock.h>
74cf0bc7 22#include <linux/spinlock.h>
bbd64559
SB
23#include <linux/irq.h>
24#include <linux/irqdesc.h>
1b8873a0 25
1b8873a0 26#include <asm/irq_regs.h>
1b8873a0 27
84b4be57
MR
28static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
29static DEFINE_PER_CPU(int, cpu_irq);
30
e2da97d3 31static inline u64 arm_pmu_event_max_period(struct perf_event *event)
8d3e9942 32{
e2da97d3
SP
33 if (event->hw.flags & ARMPMU_EVT_64BIT)
34 return GENMASK_ULL(63, 0);
35 else
36 return GENMASK_ULL(31, 0);
8d3e9942
SP
37}
38
1b8873a0 39static int
e1f431b5
MR
40armpmu_map_cache_event(const unsigned (*cache_map)
41 [PERF_COUNT_HW_CACHE_MAX]
42 [PERF_COUNT_HW_CACHE_OP_MAX]
43 [PERF_COUNT_HW_CACHE_RESULT_MAX],
44 u64 config)
1b8873a0
JI
45{
46 unsigned int cache_type, cache_op, cache_result, ret;
47
48 cache_type = (config >> 0) & 0xff;
49 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
50 return -EINVAL;
51
52 cache_op = (config >> 8) & 0xff;
53 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
54 return -EINVAL;
55
56 cache_result = (config >> 16) & 0xff;
57 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
58 return -EINVAL;
59
6c833bb9
WD
60 if (!cache_map)
61 return -ENOENT;
62
e1f431b5 63 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
1b8873a0
JI
64
65 if (ret == CACHE_OP_UNSUPPORTED)
66 return -ENOENT;
67
68 return ret;
69}
70
84fee97a 71static int
6dbc0029 72armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
84fee97a 73{
d9f96635
SB
74 int mapping;
75
76 if (config >= PERF_COUNT_HW_MAX)
77 return -EINVAL;
78
6c833bb9
WD
79 if (!event_map)
80 return -ENOENT;
81
d9f96635 82 mapping = (*event_map)[config];
e1f431b5 83 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
84fee97a
WD
84}
85
86static int
e1f431b5 87armpmu_map_raw_event(u32 raw_event_mask, u64 config)
84fee97a 88{
e1f431b5
MR
89 return (int)(config & raw_event_mask);
90}
91
6dbc0029
WD
92int
93armpmu_map_event(struct perf_event *event,
94 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
95 const unsigned (*cache_map)
96 [PERF_COUNT_HW_CACHE_MAX]
97 [PERF_COUNT_HW_CACHE_OP_MAX]
98 [PERF_COUNT_HW_CACHE_RESULT_MAX],
99 u32 raw_event_mask)
e1f431b5
MR
100{
101 u64 config = event->attr.config;
67b4305a 102 int type = event->attr.type;
e1f431b5 103
67b4305a
MR
104 if (type == event->pmu->type)
105 return armpmu_map_raw_event(raw_event_mask, config);
106
107 switch (type) {
e1f431b5 108 case PERF_TYPE_HARDWARE:
6dbc0029 109 return armpmu_map_hw_event(event_map, config);
e1f431b5
MR
110 case PERF_TYPE_HW_CACHE:
111 return armpmu_map_cache_event(cache_map, config);
112 case PERF_TYPE_RAW:
113 return armpmu_map_raw_event(raw_event_mask, config);
114 }
115
116 return -ENOENT;
84fee97a
WD
117}
118
ed6f2a52 119int armpmu_event_set_period(struct perf_event *event)
1b8873a0 120{
8a16b34e 121 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
ed6f2a52 122 struct hw_perf_event *hwc = &event->hw;
e7850595 123 s64 left = local64_read(&hwc->period_left);
1b8873a0 124 s64 period = hwc->sample_period;
8d3e9942 125 u64 max_period;
1b8873a0
JI
126 int ret = 0;
127
e2da97d3 128 max_period = arm_pmu_event_max_period(event);
1b8873a0
JI
129 if (unlikely(left <= -period)) {
130 left = period;
e7850595 131 local64_set(&hwc->period_left, left);
1b8873a0
JI
132 hwc->last_period = period;
133 ret = 1;
134 }
135
136 if (unlikely(left <= 0)) {
137 left += period;
e7850595 138 local64_set(&hwc->period_left, left);
1b8873a0
JI
139 hwc->last_period = period;
140 ret = 1;
141 }
142
2d9ed740
DT
143 /*
144 * Limit the maximum period to prevent the counter value
145 * from overtaking the one we are about to program. In
146 * effect we are reducing max_period to account for
147 * interrupt latency (and we are being very conservative).
148 */
8d3e9942
SP
149 if (left > (max_period >> 1))
150 left = (max_period >> 1);
1b8873a0 151
e7850595 152 local64_set(&hwc->prev_count, (u64)-left);
1b8873a0 153
e2da97d3 154 armpmu->write_counter(event, (u64)(-left) & max_period);
1b8873a0
JI
155
156 perf_event_update_userpage(event);
157
158 return ret;
159}
160
ed6f2a52 161u64 armpmu_event_update(struct perf_event *event)
1b8873a0 162{
8a16b34e 163 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
ed6f2a52 164 struct hw_perf_event *hwc = &event->hw;
a737823d 165 u64 delta, prev_raw_count, new_raw_count;
e2da97d3 166 u64 max_period = arm_pmu_event_max_period(event);
1b8873a0
JI
167
168again:
e7850595 169 prev_raw_count = local64_read(&hwc->prev_count);
ed6f2a52 170 new_raw_count = armpmu->read_counter(event);
1b8873a0 171
e7850595 172 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
1b8873a0
JI
173 new_raw_count) != prev_raw_count)
174 goto again;
175
8d3e9942 176 delta = (new_raw_count - prev_raw_count) & max_period;
1b8873a0 177
e7850595
PZ
178 local64_add(delta, &event->count);
179 local64_sub(delta, &hwc->period_left);
1b8873a0
JI
180
181 return new_raw_count;
182}
183
184static void
a4eaf7f1 185armpmu_read(struct perf_event *event)
1b8873a0 186{
ed6f2a52 187 armpmu_event_update(event);
1b8873a0
JI
188}
189
190static void
a4eaf7f1 191armpmu_stop(struct perf_event *event, int flags)
1b8873a0 192{
8a16b34e 193 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
1b8873a0
JI
194 struct hw_perf_event *hwc = &event->hw;
195
a4eaf7f1
PZ
196 /*
197 * ARM pmu always has to update the counter, so ignore
198 * PERF_EF_UPDATE, see comments in armpmu_start().
199 */
200 if (!(hwc->state & PERF_HES_STOPPED)) {
ed6f2a52
SK
201 armpmu->disable(event);
202 armpmu_event_update(event);
a4eaf7f1
PZ
203 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
204 }
1b8873a0
JI
205}
206
ed6f2a52 207static void armpmu_start(struct perf_event *event, int flags)
1b8873a0 208{
8a16b34e 209 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
1b8873a0
JI
210 struct hw_perf_event *hwc = &event->hw;
211
a4eaf7f1
PZ
212 /*
213 * ARM pmu always has to reprogram the period, so ignore
214 * PERF_EF_RELOAD, see the comment below.
215 */
216 if (flags & PERF_EF_RELOAD)
217 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
218
219 hwc->state = 0;
1b8873a0
JI
220 /*
221 * Set the period again. Some counters can't be stopped, so when we
a4eaf7f1 222 * were stopped we simply disabled the IRQ source and the counter
1b8873a0
JI
223 * may have been left counting. If we don't do this step then we may
224 * get an interrupt too soon or *way* too late if the overflow has
225 * happened since disabling.
226 */
ed6f2a52
SK
227 armpmu_event_set_period(event);
228 armpmu->enable(event);
1b8873a0
JI
229}
230
a4eaf7f1
PZ
231static void
232armpmu_del(struct perf_event *event, int flags)
233{
8a16b34e 234 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
11679250 235 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
a4eaf7f1
PZ
236 struct hw_perf_event *hwc = &event->hw;
237 int idx = hwc->idx;
238
a4eaf7f1 239 armpmu_stop(event, PERF_EF_UPDATE);
8be3f9a2 240 hw_events->events[idx] = NULL;
7dfc8db1 241 armpmu->clear_event_idx(hw_events, event);
a4eaf7f1 242 perf_event_update_userpage(event);
7dfc8db1
SP
243 /* Clear the allocated counter */
244 hwc->idx = -1;
a4eaf7f1
PZ
245}
246
1b8873a0 247static int
a4eaf7f1 248armpmu_add(struct perf_event *event, int flags)
1b8873a0 249{
8a16b34e 250 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
11679250 251 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
1b8873a0
JI
252 struct hw_perf_event *hwc = &event->hw;
253 int idx;
1b8873a0 254
cc88116d
MR
255 /* An event following a process won't be stopped earlier */
256 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
257 return -ENOENT;
258
1b8873a0 259 /* If we don't have a space for the counter then finish early. */
ed6f2a52 260 idx = armpmu->get_event_idx(hw_events, event);
a9e469d1
MR
261 if (idx < 0)
262 return idx;
1b8873a0
JI
263
264 /*
265 * If there is an event in the counter we are going to use then make
266 * sure it is disabled.
267 */
268 event->hw.idx = idx;
ed6f2a52 269 armpmu->disable(event);
8be3f9a2 270 hw_events->events[idx] = event;
1b8873a0 271
a4eaf7f1
PZ
272 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
273 if (flags & PERF_EF_START)
274 armpmu_start(event, PERF_EF_RELOAD);
1b8873a0
JI
275
276 /* Propagate our changes to the userspace mapping. */
277 perf_event_update_userpage(event);
278
a9e469d1 279 return 0;
1b8873a0
JI
280}
281
1b8873a0 282static int
e429817b
SP
283validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
284 struct perf_event *event)
1b8873a0 285{
e429817b 286 struct arm_pmu *armpmu;
1b8873a0 287
c95eb318
WD
288 if (is_software_event(event))
289 return 1;
290
e429817b
SP
291 /*
292 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
293 * core perf code won't check that the pmu->ctx == leader->ctx
294 * until after pmu->event_init(event).
295 */
296 if (event->pmu != pmu)
297 return 0;
298
2dfcb802 299 if (event->state < PERF_EVENT_STATE_OFF)
cb2d8b34
WD
300 return 1;
301
302 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
65b4711f 303 return 1;
1b8873a0 304
e429817b 305 armpmu = to_arm_pmu(event->pmu);
ed6f2a52 306 return armpmu->get_event_idx(hw_events, event) >= 0;
1b8873a0
JI
307}
308
309static int
310validate_group(struct perf_event *event)
311{
312 struct perf_event *sibling, *leader = event->group_leader;
8be3f9a2 313 struct pmu_hw_events fake_pmu;
1b8873a0 314
bce34d14
WD
315 /*
316 * Initialise the fake PMU. We only need to populate the
317 * used_mask for the purposes of validation.
318 */
a4560846 319 memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
1b8873a0 320
e429817b 321 if (!validate_event(event->pmu, &fake_pmu, leader))
aa2bc1ad 322 return -EINVAL;
1b8873a0 323
edb39592 324 for_each_sibling_event(sibling, leader) {
e429817b 325 if (!validate_event(event->pmu, &fake_pmu, sibling))
aa2bc1ad 326 return -EINVAL;
1b8873a0
JI
327 }
328
e429817b 329 if (!validate_event(event->pmu, &fake_pmu, event))
aa2bc1ad 330 return -EINVAL;
1b8873a0
JI
331
332 return 0;
333}
334
051f1b13 335static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
0e25a5c9 336{
bbd64559 337 struct arm_pmu *armpmu;
5f5092e7
WD
338 int ret;
339 u64 start_clock, finish_clock;
bbd64559 340
5ebd9200
MR
341 /*
342 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
343 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
344 * do any necessary shifting, we just need to perform the first
345 * dereference.
346 */
347 armpmu = *(void **)dev;
84b4be57
MR
348 if (WARN_ON_ONCE(!armpmu))
349 return IRQ_NONE;
76541370 350
5f5092e7 351 start_clock = sched_clock();
0788f1e9 352 ret = armpmu->handle_irq(armpmu);
5f5092e7
WD
353 finish_clock = sched_clock();
354
355 perf_sample_event_took(finish_clock - start_clock);
356 return ret;
0e25a5c9
RV
357}
358
05d22fde
WD
359static int
360event_requires_mode_exclusion(struct perf_event_attr *attr)
361{
362 return attr->exclude_idle || attr->exclude_user ||
363 attr->exclude_kernel || attr->exclude_hv;
364}
365
1b8873a0
JI
366static int
367__hw_perf_event_init(struct perf_event *event)
368{
8a16b34e 369 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
1b8873a0 370 struct hw_perf_event *hwc = &event->hw;
9dcbf466 371 int mapping;
1b8873a0 372
e2da97d3 373 hwc->flags = 0;
e1f431b5 374 mapping = armpmu->map_event(event);
1b8873a0
JI
375
376 if (mapping < 0) {
377 pr_debug("event %x:%llx not supported\n", event->attr.type,
378 event->attr.config);
379 return mapping;
380 }
381
05d22fde
WD
382 /*
383 * We don't assign an index until we actually place the event onto
384 * hardware. Use -1 to signify that we haven't decided where to put it
385 * yet. For SMP systems, each core has it's own PMU so we can't do any
386 * clever allocation or constraints checking at this point.
387 */
388 hwc->idx = -1;
389 hwc->config_base = 0;
390 hwc->config = 0;
391 hwc->event_base = 0;
392
1b8873a0
JI
393 /*
394 * Check whether we need to exclude the counter from certain modes.
1b8873a0 395 */
05d22fde
WD
396 if ((!armpmu->set_event_filter ||
397 armpmu->set_event_filter(hwc, &event->attr)) &&
398 event_requires_mode_exclusion(&event->attr)) {
1b8873a0
JI
399 pr_debug("ARM performance counters do not support "
400 "mode exclusion\n");
fdeb8e35 401 return -EOPNOTSUPP;
1b8873a0
JI
402 }
403
404 /*
05d22fde 405 * Store the event encoding into the config_base field.
1b8873a0 406 */
05d22fde 407 hwc->config_base |= (unsigned long)mapping;
1b8873a0 408
edcb4d3c 409 if (!is_sampling_event(event)) {
57273471
WD
410 /*
411 * For non-sampling runs, limit the sample_period to half
412 * of the counter width. That way, the new counter value
413 * is far less likely to overtake the previous one unless
414 * you have some serious IRQ latency issues.
415 */
e2da97d3 416 hwc->sample_period = arm_pmu_event_max_period(event) >> 1;
1b8873a0 417 hwc->last_period = hwc->sample_period;
e7850595 418 local64_set(&hwc->period_left, hwc->sample_period);
1b8873a0
JI
419 }
420
1b8873a0 421 if (event->group_leader != event) {
e595ede6 422 if (validate_group(event) != 0)
1b8873a0
JI
423 return -EINVAL;
424 }
425
9dcbf466 426 return 0;
1b8873a0
JI
427}
428
b0a873eb 429static int armpmu_event_init(struct perf_event *event)
1b8873a0 430{
8a16b34e 431 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
1b8873a0 432
cc88116d
MR
433 /*
434 * Reject CPU-affine events for CPUs that are of a different class to
435 * that which this PMU handles. Process-following events (where
436 * event->cpu == -1) can be migrated between CPUs, and thus we have to
437 * reject them later (in armpmu_add) if they're scheduled on a
438 * different class of CPU.
439 */
440 if (event->cpu != -1 &&
441 !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
442 return -ENOENT;
443
2481c5fa
SE
444 /* does not support taken branch sampling */
445 if (has_branch_stack(event))
446 return -EOPNOTSUPP;
447
e1f431b5 448 if (armpmu->map_event(event) == -ENOENT)
b0a873eb 449 return -ENOENT;
b0a873eb 450
c09adab0 451 return __hw_perf_event_init(event);
1b8873a0
JI
452}
453
a4eaf7f1 454static void armpmu_enable(struct pmu *pmu)
1b8873a0 455{
8be3f9a2 456 struct arm_pmu *armpmu = to_arm_pmu(pmu);
11679250 457 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
7325eaec 458 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
1b8873a0 459
cc88116d
MR
460 /* For task-bound events we may be called on other CPUs */
461 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
462 return;
463
f4f38430 464 if (enabled)
ed6f2a52 465 armpmu->start(armpmu);
1b8873a0
JI
466}
467
a4eaf7f1 468static void armpmu_disable(struct pmu *pmu)
1b8873a0 469{
8a16b34e 470 struct arm_pmu *armpmu = to_arm_pmu(pmu);
cc88116d
MR
471
472 /* For task-bound events we may be called on other CPUs */
473 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
474 return;
475
ed6f2a52 476 armpmu->stop(armpmu);
1b8873a0
JI
477}
478
c904e32a
MR
479/*
480 * In heterogeneous systems, events are specific to a particular
481 * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
482 * the same microarchitecture.
483 */
484static int armpmu_filter_match(struct perf_event *event)
485{
486 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
487 unsigned int cpu = smp_processor_id();
ca2b4972
WD
488 int ret;
489
490 ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
491 if (ret && armpmu->filter_match)
492 return armpmu->filter_match(event);
493
494 return ret;
c904e32a
MR
495}
496
48538b58
MR
497static ssize_t armpmu_cpumask_show(struct device *dev,
498 struct device_attribute *attr, char *buf)
499{
500 struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev));
501 return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus);
502}
503
504static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL);
505
506static struct attribute *armpmu_common_attrs[] = {
507 &dev_attr_cpus.attr,
508 NULL,
509};
510
511static struct attribute_group armpmu_common_attr_group = {
512 .attrs = armpmu_common_attrs,
513};
514
74cf0bc7
MR
515/* Set at runtime when we know what CPU type we are. */
516static struct arm_pmu *__oprofile_cpu_pmu;
517
518/*
519 * Despite the names, these two functions are CPU-specific and are used
520 * by the OProfile/perf code.
521 */
522const char *perf_pmu_name(void)
523{
524 if (!__oprofile_cpu_pmu)
525 return NULL;
526
527 return __oprofile_cpu_pmu->name;
528}
529EXPORT_SYMBOL_GPL(perf_pmu_name);
530
531int perf_num_counters(void)
532{
533 int max_events = 0;
534
535 if (__oprofile_cpu_pmu != NULL)
536 max_events = __oprofile_cpu_pmu->num_events;
537
538 return max_events;
539}
540EXPORT_SYMBOL_GPL(perf_num_counters);
541
84b4be57 542static int armpmu_count_irq_users(const int irq)
74cf0bc7 543{
84b4be57 544 int cpu, count = 0;
74cf0bc7 545
84b4be57
MR
546 for_each_possible_cpu(cpu) {
547 if (per_cpu(cpu_irq, cpu) == irq)
548 count++;
549 }
550
551 return count;
552}
7ed98e01 553
167e6143 554void armpmu_free_irq(int irq, int cpu)
84b4be57
MR
555{
556 if (per_cpu(cpu_irq, cpu) == 0)
557 return;
558 if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
0e2663d9 559 return;
7ed98e01 560
84b4be57
MR
561 if (!irq_is_percpu_devid(irq))
562 free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu));
563 else if (armpmu_count_irq_users(irq) == 1)
564 free_percpu_irq(irq, &cpu_armpmu);
565
566 per_cpu(cpu_irq, cpu) = 0;
0e2663d9 567}
7ed98e01 568
167e6143 569int armpmu_request_irq(int irq, int cpu)
84b4be57
MR
570{
571 int err = 0;
572 const irq_handler_t handler = armpmu_dispatch_irq;
0e2663d9
MR
573 if (!irq)
574 return 0;
74cf0bc7 575
43fc9a2f 576 if (!irq_is_percpu_devid(irq)) {
a3287c41
WD
577 unsigned long irq_flags;
578
579 err = irq_force_affinity(irq, cpumask_of(cpu));
580
581 if (err && num_possible_cpus() > 1) {
582 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
583 irq, cpu);
584 goto err_out;
585 }
586
c0248c96
MR
587 irq_flags = IRQF_PERCPU |
588 IRQF_NOBALANCING |
589 IRQF_NO_THREAD;
a3287c41 590
6de3f791 591 irq_set_status_flags(irq, IRQ_NOAUTOEN);
a3287c41 592 err = request_irq(irq, handler, irq_flags, "arm-pmu",
84b4be57
MR
593 per_cpu_ptr(&cpu_armpmu, cpu));
594 } else if (armpmu_count_irq_users(irq) == 0) {
43fc9a2f 595 err = request_percpu_irq(irq, handler, "arm-pmu",
84b4be57 596 &cpu_armpmu);
0e2663d9 597 }
7ed98e01 598
a3287c41
WD
599 if (err)
600 goto err_out;
74cf0bc7 601
84b4be57 602 per_cpu(cpu_irq, cpu) = irq;
74cf0bc7 603 return 0;
a3287c41
WD
604
605err_out:
606 pr_err("unable to request IRQ%d for ARM PMU counters\n", irq);
607 return err;
74cf0bc7
MR
608}
609
c09adab0
MR
610static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
611{
612 struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
613 return per_cpu(hw_events->irq, cpu);
614}
615
74cf0bc7
MR
616/*
617 * PMU hardware loses all context when a CPU goes offline.
618 * When a CPU is hotplugged back in, since some hardware registers are
619 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
620 * junk values out of them.
621 */
6e103c0c 622static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
74cf0bc7 623{
6e103c0c 624 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
c09adab0 625 int irq;
74cf0bc7 626
6e103c0c
SAS
627 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
628 return 0;
629 if (pmu->reset)
630 pmu->reset(pmu);
c09adab0 631
84b4be57
MR
632 per_cpu(cpu_armpmu, cpu) = pmu;
633
c09adab0
MR
634 irq = armpmu_get_cpu_irq(pmu, cpu);
635 if (irq) {
6de3f791 636 if (irq_is_percpu_devid(irq))
c09adab0 637 enable_percpu_irq(irq, IRQ_TYPE_NONE);
6de3f791
MR
638 else
639 enable_irq(irq);
c09adab0
MR
640 }
641
642 return 0;
643}
644
645static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
646{
647 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
648 int irq;
649
650 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
651 return 0;
652
653 irq = armpmu_get_cpu_irq(pmu, cpu);
6de3f791
MR
654 if (irq) {
655 if (irq_is_percpu_devid(irq))
656 disable_percpu_irq(irq);
657 else
b08e5fd9 658 disable_irq_nosync(irq);
6de3f791 659 }
c09adab0 660
84b4be57
MR
661 per_cpu(cpu_armpmu, cpu) = NULL;
662
7d88eb69 663 return 0;
74cf0bc7
MR
664}
665
da4e4f18
LP
666#ifdef CONFIG_CPU_PM
667static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
668{
669 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
670 struct perf_event *event;
671 int idx;
672
673 for (idx = 0; idx < armpmu->num_events; idx++) {
da4e4f18 674 event = hw_events->events[idx];
c1320790
SP
675 if (!event)
676 continue;
da4e4f18
LP
677
678 switch (cmd) {
679 case CPU_PM_ENTER:
680 /*
681 * Stop and update the counter
682 */
683 armpmu_stop(event, PERF_EF_UPDATE);
684 break;
685 case CPU_PM_EXIT:
686 case CPU_PM_ENTER_FAILED:
cbcc72e0
LP
687 /*
688 * Restore and enable the counter.
689 * armpmu_start() indirectly calls
690 *
691 * perf_event_update_userpage()
692 *
693 * that requires RCU read locking to be functional,
694 * wrap the call within RCU_NONIDLE to make the
695 * RCU subsystem aware this cpu is not idle from
696 * an RCU perspective for the armpmu_start() call
697 * duration.
698 */
699 RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
da4e4f18
LP
700 break;
701 default:
702 break;
703 }
704 }
705}
706
707static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
708 void *v)
709{
710 struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
711 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
712 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
713
714 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
715 return NOTIFY_DONE;
716
717 /*
718 * Always reset the PMU registers on power-up even if
719 * there are no events running.
720 */
721 if (cmd == CPU_PM_EXIT && armpmu->reset)
722 armpmu->reset(armpmu);
723
724 if (!enabled)
725 return NOTIFY_OK;
726
727 switch (cmd) {
728 case CPU_PM_ENTER:
729 armpmu->stop(armpmu);
730 cpu_pm_pmu_setup(armpmu, cmd);
731 break;
732 case CPU_PM_EXIT:
733 cpu_pm_pmu_setup(armpmu, cmd);
734 case CPU_PM_ENTER_FAILED:
735 armpmu->start(armpmu);
736 break;
737 default:
738 return NOTIFY_DONE;
739 }
740
741 return NOTIFY_OK;
742}
743
744static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
745{
746 cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
747 return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
748}
749
750static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
751{
752 cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
753}
754#else
755static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
756static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
757#endif
758
74cf0bc7
MR
759static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
760{
761 int err;
74cf0bc7 762
c09adab0
MR
763 err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING,
764 &cpu_pmu->node);
6e103c0c 765 if (err)
2681f018 766 goto out;
74cf0bc7 767
da4e4f18
LP
768 err = cpu_pm_pmu_register(cpu_pmu);
769 if (err)
770 goto out_unregister;
771
74cf0bc7
MR
772 return 0;
773
da4e4f18 774out_unregister:
6e103c0c
SAS
775 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
776 &cpu_pmu->node);
2681f018 777out:
74cf0bc7
MR
778 return err;
779}
780
781static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
782{
da4e4f18 783 cpu_pm_pmu_unregister(cpu_pmu);
6e103c0c
SAS
784 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
785 &cpu_pmu->node);
74cf0bc7
MR
786}
787
0dc1a185 788static struct arm_pmu *__armpmu_alloc(gfp_t flags)
2681f018
MR
789{
790 struct arm_pmu *pmu;
791 int cpu;
792
0dc1a185 793 pmu = kzalloc(sizeof(*pmu), flags);
2681f018
MR
794 if (!pmu) {
795 pr_info("failed to allocate PMU device!\n");
796 goto out;
797 }
798
0dc1a185 799 pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags);
2681f018
MR
800 if (!pmu->hw_events) {
801 pr_info("failed to allocate per-cpu PMU data.\n");
802 goto out_free_pmu;
803 }
804
70cd908a
MR
805 pmu->pmu = (struct pmu) {
806 .pmu_enable = armpmu_enable,
807 .pmu_disable = armpmu_disable,
808 .event_init = armpmu_event_init,
809 .add = armpmu_add,
810 .del = armpmu_del,
811 .start = armpmu_start,
812 .stop = armpmu_stop,
813 .read = armpmu_read,
814 .filter_match = armpmu_filter_match,
815 .attr_groups = pmu->attr_groups,
816 /*
817 * This is a CPU PMU potentially in a heterogeneous
818 * configuration (e.g. big.LITTLE). This is not an uncore PMU,
819 * and we have taken ctx sharing into account (e.g. with our
820 * pmu::filter_match callback and pmu::event_init group
821 * validation).
822 */
823 .capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS,
824 };
825
826 pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
827 &armpmu_common_attr_group;
828
2681f018
MR
829 for_each_possible_cpu(cpu) {
830 struct pmu_hw_events *events;
831
832 events = per_cpu_ptr(pmu->hw_events, cpu);
833 raw_spin_lock_init(&events->pmu_lock);
834 events->percpu_pmu = pmu;
835 }
836
837 return pmu;
838
839out_free_pmu:
840 kfree(pmu);
841out:
842 return NULL;
843}
844
0dc1a185
MR
845struct arm_pmu *armpmu_alloc(void)
846{
847 return __armpmu_alloc(GFP_KERNEL);
848}
849
850struct arm_pmu *armpmu_alloc_atomic(void)
851{
852 return __armpmu_alloc(GFP_ATOMIC);
853}
854
855
18bfcfe5 856void armpmu_free(struct arm_pmu *pmu)
2681f018
MR
857{
858 free_percpu(pmu->hw_events);
859 kfree(pmu);
860}
861
74a2b3ea
MR
862int armpmu_register(struct arm_pmu *pmu)
863{
864 int ret;
865
866 ret = cpu_pmu_init(pmu);
867 if (ret)
868 return ret;
869
870 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
871 if (ret)
872 goto out_destroy;
873
874 if (!__oprofile_cpu_pmu)
875 __oprofile_cpu_pmu = pmu;
876
877 pr_info("enabled with %s PMU driver, %d counters available\n",
878 pmu->name, pmu->num_events);
879
880 return 0;
881
882out_destroy:
883 cpu_pmu_destroy(pmu);
884 return ret;
885}
886
37b502f1
SAS
887static int arm_pmu_hp_init(void)
888{
889 int ret;
890
6e103c0c 891 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
73c1b41e 892 "perf/arm/pmu:starting",
c09adab0
MR
893 arm_perf_starting_cpu,
894 arm_perf_teardown_cpu);
37b502f1
SAS
895 if (ret)
896 pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
897 ret);
898 return ret;
899}
900subsys_initcall(arm_pmu_hp_init);