perf/x86/intel: Optimize intel_get_excl_constraints()
[linux-block.git] / arch / x86 / events / core.c
CommitLineData
241771ef 1/*
cdd6c482 2 * Performance events x86 architecture code
241771ef 3 *
98144511
IM
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
90eec103 8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
30dd568c 9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
1da53e02 10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
241771ef
IM
11 *
12 * For licencing details see kernel-base/COPYING
13 */
14
cdd6c482 15#include <linux/perf_event.h>
241771ef
IM
16#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
eb008eb6
PG
20#include <linux/export.h>
21#include <linux/init.h>
241771ef 22#include <linux/kdebug.h>
589ee628 23#include <linux/sched/mm.h>
e6017571 24#include <linux/sched/clock.h>
d7d59fb3 25#include <linux/uaccess.h>
5a0e3ad6 26#include <linux/slab.h>
30dd568c 27#include <linux/cpu.h>
272d30be 28#include <linux/bitops.h>
0c9d42ed 29#include <linux/device.h>
46b1b577 30#include <linux/nospec.h>
241771ef 31
241771ef 32#include <asm/apic.h>
d7d59fb3 33#include <asm/stacktrace.h>
4e935e47 34#include <asm/nmi.h>
69092624 35#include <asm/smp.h>
c8e5910e 36#include <asm/alternative.h>
7911d3f7 37#include <asm/mmu_context.h>
375074cc 38#include <asm/tlbflush.h>
e3f3541c 39#include <asm/timer.h>
d07bdfd3
PZ
40#include <asm/desc.h>
41#include <asm/ldt.h>
35f4d9b3 42#include <asm/unwind.h>
241771ef 43
27f6d22b 44#include "perf_event.h"
de0428a7 45
de0428a7 46struct x86_pmu x86_pmu __read_mostly;
efc9f05d 47
de0428a7 48DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
b0f3f28e
PZ
49 .enabled = 1,
50};
241771ef 51
631fe154 52DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key);
a6673429 53
de0428a7 54u64 __read_mostly hw_cache_event_ids
8326f44d
IM
55 [PERF_COUNT_HW_CACHE_MAX]
56 [PERF_COUNT_HW_CACHE_OP_MAX]
57 [PERF_COUNT_HW_CACHE_RESULT_MAX];
de0428a7 58u64 __read_mostly hw_cache_extra_regs
e994d7d2
AK
59 [PERF_COUNT_HW_CACHE_MAX]
60 [PERF_COUNT_HW_CACHE_OP_MAX]
61 [PERF_COUNT_HW_CACHE_RESULT_MAX];
8326f44d 62
ee06094f 63/*
cdd6c482
IM
64 * Propagate event elapsed time into the generic event.
65 * Can only be executed on the CPU where the event is active.
ee06094f
IM
66 * Returns the delta events processed.
67 */
de0428a7 68u64 x86_perf_event_update(struct perf_event *event)
ee06094f 69{
cc2ad4ba 70 struct hw_perf_event *hwc = &event->hw;
948b1bb8 71 int shift = 64 - x86_pmu.cntval_bits;
ec3232bd 72 u64 prev_raw_count, new_raw_count;
cc2ad4ba 73 int idx = hwc->idx;
7f612a7f 74 u64 delta;
ee06094f 75
15c7ad51 76 if (idx == INTEL_PMC_IDX_FIXED_BTS)
30dd568c
MM
77 return 0;
78
ee06094f 79 /*
cdd6c482 80 * Careful: an NMI might modify the previous event value.
ee06094f
IM
81 *
82 * Our tactic to handle this is to first atomically read and
83 * exchange a new raw count - then add that new-prev delta
cdd6c482 84 * count to the generic event atomically:
ee06094f
IM
85 */
86again:
e7850595 87 prev_raw_count = local64_read(&hwc->prev_count);
c48b6053 88 rdpmcl(hwc->event_base_rdpmc, new_raw_count);
ee06094f 89
e7850595 90 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
ee06094f
IM
91 new_raw_count) != prev_raw_count)
92 goto again;
93
94 /*
95 * Now we have the new raw value and have updated the prev
96 * timestamp already. We can now calculate the elapsed delta
cdd6c482 97 * (event-)time and add that to the generic event.
ee06094f
IM
98 *
99 * Careful, not all hw sign-extends above the physical width
ec3232bd 100 * of the count.
ee06094f 101 */
ec3232bd
PZ
102 delta = (new_raw_count << shift) - (prev_raw_count << shift);
103 delta >>= shift;
ee06094f 104
e7850595
PZ
105 local64_add(delta, &event->count);
106 local64_sub(delta, &hwc->period_left);
4b7bfd0d
RR
107
108 return new_raw_count;
ee06094f
IM
109}
110
a7e3ed1e
AK
111/*
112 * Find and validate any extra registers to set up.
113 */
114static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
115{
efc9f05d 116 struct hw_perf_event_extra *reg;
a7e3ed1e
AK
117 struct extra_reg *er;
118
efc9f05d 119 reg = &event->hw.extra_reg;
a7e3ed1e
AK
120
121 if (!x86_pmu.extra_regs)
122 return 0;
123
124 for (er = x86_pmu.extra_regs; er->msr; er++) {
125 if (er->event != (config & er->config_mask))
126 continue;
127 if (event->attr.config1 & ~er->valid_mask)
128 return -EINVAL;
338b522c
KL
129 /* Check if the extra msrs can be safely accessed*/
130 if (!er->extra_msr_access)
131 return -ENXIO;
efc9f05d
SE
132
133 reg->idx = er->idx;
134 reg->config = event->attr.config1;
135 reg->reg = er->msr;
a7e3ed1e
AK
136 break;
137 }
138 return 0;
139}
140
cdd6c482 141static atomic_t active_events;
1b7b938f 142static atomic_t pmc_refcount;
4e935e47
PZ
143static DEFINE_MUTEX(pmc_reserve_mutex);
144
b27ea29c
RR
145#ifdef CONFIG_X86_LOCAL_APIC
146
4e935e47
PZ
147static bool reserve_pmc_hardware(void)
148{
149 int i;
150
948b1bb8 151 for (i = 0; i < x86_pmu.num_counters; i++) {
41bf4989 152 if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
4e935e47
PZ
153 goto perfctr_fail;
154 }
155
948b1bb8 156 for (i = 0; i < x86_pmu.num_counters; i++) {
41bf4989 157 if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
4e935e47
PZ
158 goto eventsel_fail;
159 }
160
161 return true;
162
163eventsel_fail:
164 for (i--; i >= 0; i--)
41bf4989 165 release_evntsel_nmi(x86_pmu_config_addr(i));
4e935e47 166
948b1bb8 167 i = x86_pmu.num_counters;
4e935e47
PZ
168
169perfctr_fail:
170 for (i--; i >= 0; i--)
41bf4989 171 release_perfctr_nmi(x86_pmu_event_addr(i));
4e935e47 172
4e935e47
PZ
173 return false;
174}
175
176static void release_pmc_hardware(void)
177{
178 int i;
179
948b1bb8 180 for (i = 0; i < x86_pmu.num_counters; i++) {
41bf4989
RR
181 release_perfctr_nmi(x86_pmu_event_addr(i));
182 release_evntsel_nmi(x86_pmu_config_addr(i));
4e935e47 183 }
4e935e47
PZ
184}
185
b27ea29c
RR
186#else
187
188static bool reserve_pmc_hardware(void) { return true; }
189static void release_pmc_hardware(void) {}
190
191#endif
192
33c6d6a7
DZ
193static bool check_hw_exists(void)
194{
11d8b058
AB
195 u64 val, val_fail = -1, val_new= ~0;
196 int i, reg, reg_fail = -1, ret = 0;
a5ebe0ba 197 int bios_fail = 0;
68ab7476 198 int reg_safe = -1;
33c6d6a7 199
4407204c
PZ
200 /*
201 * Check to see if the BIOS enabled any of the counters, if so
202 * complain and bail.
203 */
204 for (i = 0; i < x86_pmu.num_counters; i++) {
41bf4989 205 reg = x86_pmu_config_addr(i);
4407204c
PZ
206 ret = rdmsrl_safe(reg, &val);
207 if (ret)
208 goto msr_fail;
a5ebe0ba
GD
209 if (val & ARCH_PERFMON_EVENTSEL_ENABLE) {
210 bios_fail = 1;
211 val_fail = val;
212 reg_fail = reg;
68ab7476
DZ
213 } else {
214 reg_safe = i;
a5ebe0ba 215 }
4407204c
PZ
216 }
217
218 if (x86_pmu.num_counters_fixed) {
219 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
220 ret = rdmsrl_safe(reg, &val);
221 if (ret)
222 goto msr_fail;
223 for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
a5ebe0ba
GD
224 if (val & (0x03 << i*4)) {
225 bios_fail = 1;
226 val_fail = val;
227 reg_fail = reg;
228 }
4407204c
PZ
229 }
230 }
231
68ab7476
DZ
232 /*
233 * If all the counters are enabled, the below test will always
234 * fail. The tools will also become useless in this scenario.
235 * Just fail and disable the hardware counters.
236 */
237
238 if (reg_safe == -1) {
239 reg = reg_safe;
240 goto msr_fail;
241 }
242
4407204c 243 /*
bffd5fc2
AP
244 * Read the current value, change it and read it back to see if it
245 * matches, this is needed to detect certain hardware emulators
246 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
4407204c 247 */
68ab7476 248 reg = x86_pmu_event_addr(reg_safe);
bffd5fc2
AP
249 if (rdmsrl_safe(reg, &val))
250 goto msr_fail;
251 val ^= 0xffffUL;
f285f92f
RR
252 ret = wrmsrl_safe(reg, val);
253 ret |= rdmsrl_safe(reg, &val_new);
33c6d6a7 254 if (ret || val != val_new)
4407204c 255 goto msr_fail;
33c6d6a7 256
45daae57
IM
257 /*
258 * We still allow the PMU driver to operate:
259 */
a5ebe0ba 260 if (bios_fail) {
1b74dde7
CY
261 pr_cont("Broken BIOS detected, complain to your hardware vendor.\n");
262 pr_err(FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n",
263 reg_fail, val_fail);
a5ebe0ba 264 }
45daae57
IM
265
266 return true;
4407204c
PZ
267
268msr_fail:
005bd007
JG
269 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
270 pr_cont("PMU not available due to virtualization, using software events only.\n");
271 } else {
272 pr_cont("Broken PMU hardware detected, using software events only.\n");
273 pr_err("Failed to access perfctr msr (MSR %x is %Lx)\n",
274 reg, val_new);
275 }
45daae57 276
4407204c 277 return false;
33c6d6a7
DZ
278}
279
cdd6c482 280static void hw_perf_event_destroy(struct perf_event *event)
4e935e47 281{
6b099d9b 282 x86_release_hardware();
1b7b938f 283 atomic_dec(&active_events);
4e935e47
PZ
284}
285
48070342
AS
286void hw_perf_lbr_event_destroy(struct perf_event *event)
287{
288 hw_perf_event_destroy(event);
289
290 /* undo the lbr/bts event accounting */
291 x86_del_exclusive(x86_lbr_exclusive_lbr);
292}
293
85cf9dba
RR
294static inline int x86_pmu_initialized(void)
295{
296 return x86_pmu.handle_irq != NULL;
297}
298
8326f44d 299static inline int
e994d7d2 300set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
8326f44d 301{
e994d7d2 302 struct perf_event_attr *attr = &event->attr;
8326f44d
IM
303 unsigned int cache_type, cache_op, cache_result;
304 u64 config, val;
305
306 config = attr->config;
307
ef9ee4ad 308 cache_type = (config >> 0) & 0xff;
8326f44d
IM
309 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
310 return -EINVAL;
ef9ee4ad 311 cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX);
8326f44d
IM
312
313 cache_op = (config >> 8) & 0xff;
314 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
315 return -EINVAL;
ef9ee4ad 316 cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX);
8326f44d
IM
317
318 cache_result = (config >> 16) & 0xff;
319 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
320 return -EINVAL;
ef9ee4ad 321 cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX);
8326f44d
IM
322
323 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
324
325 if (val == 0)
326 return -ENOENT;
327
328 if (val == -1)
329 return -EINVAL;
330
331 hwc->config |= val;
e994d7d2
AK
332 attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
333 return x86_pmu_extra_regs(val, event);
8326f44d
IM
334}
335
6b099d9b
AS
336int x86_reserve_hardware(void)
337{
338 int err = 0;
339
1b7b938f 340 if (!atomic_inc_not_zero(&pmc_refcount)) {
6b099d9b 341 mutex_lock(&pmc_reserve_mutex);
1b7b938f 342 if (atomic_read(&pmc_refcount) == 0) {
6b099d9b
AS
343 if (!reserve_pmc_hardware())
344 err = -EBUSY;
345 else
346 reserve_ds_buffers();
347 }
348 if (!err)
1b7b938f 349 atomic_inc(&pmc_refcount);
6b099d9b
AS
350 mutex_unlock(&pmc_reserve_mutex);
351 }
352
353 return err;
354}
355
356void x86_release_hardware(void)
357{
1b7b938f 358 if (atomic_dec_and_mutex_lock(&pmc_refcount, &pmc_reserve_mutex)) {
6b099d9b
AS
359 release_pmc_hardware();
360 release_ds_buffers();
361 mutex_unlock(&pmc_reserve_mutex);
362 }
363}
364
48070342
AS
365/*
366 * Check if we can create event of a certain type (that no conflicting events
367 * are present).
368 */
369int x86_add_exclusive(unsigned int what)
370{
93472aff 371 int i;
48070342 372
b0c1ef52
AK
373 /*
374 * When lbr_pt_coexist we allow PT to coexist with either LBR or BTS.
375 * LBR and BTS are still mutually exclusive.
376 */
377 if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
ccbebba4
AS
378 return 0;
379
93472aff
PZ
380 if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
381 mutex_lock(&pmc_reserve_mutex);
382 for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
383 if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
384 goto fail_unlock;
385 }
386 atomic_inc(&x86_pmu.lbr_exclusive[what]);
387 mutex_unlock(&pmc_reserve_mutex);
6b099d9b 388 }
48070342 389
93472aff
PZ
390 atomic_inc(&active_events);
391 return 0;
48070342 392
93472aff 393fail_unlock:
48070342 394 mutex_unlock(&pmc_reserve_mutex);
93472aff 395 return -EBUSY;
48070342
AS
396}
397
398void x86_del_exclusive(unsigned int what)
399{
b0c1ef52 400 if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
ccbebba4
AS
401 return;
402
48070342 403 atomic_dec(&x86_pmu.lbr_exclusive[what]);
1b7b938f 404 atomic_dec(&active_events);
48070342
AS
405}
406
de0428a7 407int x86_setup_perfctr(struct perf_event *event)
c1726f34
RR
408{
409 struct perf_event_attr *attr = &event->attr;
410 struct hw_perf_event *hwc = &event->hw;
411 u64 config;
412
6c7e550f 413 if (!is_sampling_event(event)) {
c1726f34
RR
414 hwc->sample_period = x86_pmu.max_period;
415 hwc->last_period = hwc->sample_period;
e7850595 416 local64_set(&hwc->period_left, hwc->sample_period);
c1726f34
RR
417 }
418
419 if (attr->type == PERF_TYPE_RAW)
ed13ec58 420 return x86_pmu_extra_regs(event->attr.config, event);
c1726f34
RR
421
422 if (attr->type == PERF_TYPE_HW_CACHE)
e994d7d2 423 return set_ext_hw_attr(hwc, event);
c1726f34
RR
424
425 if (attr->config >= x86_pmu.max_events)
426 return -EINVAL;
427
46b1b577
PZ
428 attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events);
429
c1726f34
RR
430 /*
431 * The generic map:
432 */
433 config = x86_pmu.event_map(attr->config);
434
435 if (config == 0)
436 return -ENOENT;
437
438 if (config == -1LL)
439 return -EINVAL;
440
c1726f34
RR
441 hwc->config |= config;
442
443 return 0;
444}
4261e0e0 445
ff3fb511
SE
446/*
447 * check that branch_sample_type is compatible with
448 * settings needed for precise_ip > 1 which implies
449 * using the LBR to capture ALL taken branches at the
450 * priv levels of the measurement
451 */
452static inline int precise_br_compat(struct perf_event *event)
453{
454 u64 m = event->attr.branch_sample_type;
455 u64 b = 0;
456
457 /* must capture all branches */
458 if (!(m & PERF_SAMPLE_BRANCH_ANY))
459 return 0;
460
461 m &= PERF_SAMPLE_BRANCH_KERNEL | PERF_SAMPLE_BRANCH_USER;
462
463 if (!event->attr.exclude_user)
464 b |= PERF_SAMPLE_BRANCH_USER;
465
466 if (!event->attr.exclude_kernel)
467 b |= PERF_SAMPLE_BRANCH_KERNEL;
468
469 /*
470 * ignore PERF_SAMPLE_BRANCH_HV, not supported on x86
471 */
472
473 return m == b;
474}
475
b00233b5 476int x86_pmu_max_precise(void)
a072738e 477{
b00233b5
AK
478 int precise = 0;
479
480 /* Support for constant skid */
481 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
482 precise++;
ab608344 483
b00233b5
AK
484 /* Support for IP fixup */
485 if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2)
ab608344
PZ
486 precise++;
487
b00233b5
AK
488 if (x86_pmu.pebs_prec_dist)
489 precise++;
490 }
491 return precise;
492}
72469764 493
b00233b5
AK
494int x86_pmu_hw_config(struct perf_event *event)
495{
496 if (event->attr.precise_ip) {
497 int precise = x86_pmu_max_precise();
ab608344
PZ
498
499 if (event->attr.precise_ip > precise)
500 return -EOPNOTSUPP;
18e7a45a
JO
501
502 /* There's no sense in having PEBS for non sampling events: */
503 if (!is_sampling_event(event))
504 return -EINVAL;
4b854900
YZ
505 }
506 /*
507 * check that PEBS LBR correction does not conflict with
508 * whatever the user is asking with attr->branch_sample_type
509 */
510 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) {
511 u64 *br_type = &event->attr.branch_sample_type;
512
513 if (has_branch_stack(event)) {
514 if (!precise_br_compat(event))
515 return -EOPNOTSUPP;
516
517 /* branch_sample_type is compatible */
518
519 } else {
520 /*
521 * user did not specify branch_sample_type
522 *
523 * For PEBS fixups, we capture all
524 * the branches at the priv level of the
525 * event.
526 */
527 *br_type = PERF_SAMPLE_BRANCH_ANY;
528
529 if (!event->attr.exclude_user)
530 *br_type |= PERF_SAMPLE_BRANCH_USER;
531
532 if (!event->attr.exclude_kernel)
533 *br_type |= PERF_SAMPLE_BRANCH_KERNEL;
ff3fb511 534 }
ab608344
PZ
535 }
536
e18bf526
YZ
537 if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK)
538 event->attach_state |= PERF_ATTACH_TASK_DATA;
539
a072738e
CG
540 /*
541 * Generate PMC IRQs:
542 * (keep 'enabled' bit clear for now)
543 */
b4cdc5c2 544 event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
a072738e
CG
545
546 /*
547 * Count user and OS events unless requested not to
548 */
b4cdc5c2
PZ
549 if (!event->attr.exclude_user)
550 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
551 if (!event->attr.exclude_kernel)
552 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
a072738e 553
b4cdc5c2
PZ
554 if (event->attr.type == PERF_TYPE_RAW)
555 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
a072738e 556
294fe0f5
AK
557 if (event->attr.sample_period && x86_pmu.limit_period) {
558 if (x86_pmu.limit_period(event, event->attr.sample_period) >
559 event->attr.sample_period)
560 return -EINVAL;
561 }
562
9d0fcba6 563 return x86_setup_perfctr(event);
a098f448
RR
564}
565
241771ef 566/*
0d48696f 567 * Setup the hardware configuration for a given attr_type
241771ef 568 */
b0a873eb 569static int __x86_pmu_event_init(struct perf_event *event)
241771ef 570{
4e935e47 571 int err;
241771ef 572
85cf9dba
RR
573 if (!x86_pmu_initialized())
574 return -ENODEV;
241771ef 575
6b099d9b 576 err = x86_reserve_hardware();
4e935e47
PZ
577 if (err)
578 return err;
579
1b7b938f 580 atomic_inc(&active_events);
cdd6c482 581 event->destroy = hw_perf_event_destroy;
a1792cda 582
4261e0e0
RR
583 event->hw.idx = -1;
584 event->hw.last_cpu = -1;
585 event->hw.last_tag = ~0ULL;
b690081d 586
efc9f05d
SE
587 /* mark unused */
588 event->hw.extra_reg.idx = EXTRA_REG_NONE;
b36817e8
SE
589 event->hw.branch_reg.idx = EXTRA_REG_NONE;
590
9d0fcba6 591 return x86_pmu.hw_config(event);
4261e0e0
RR
592}
593
de0428a7 594void x86_pmu_disable_all(void)
f87ad35d 595{
89cbc767 596 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
9e35ad38
PZ
597 int idx;
598
948b1bb8 599 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
b0f3f28e
PZ
600 u64 val;
601
43f6201a 602 if (!test_bit(idx, cpuc->active_mask))
4295ee62 603 continue;
41bf4989 604 rdmsrl(x86_pmu_config_addr(idx), val);
bb1165d6 605 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
4295ee62 606 continue;
bb1165d6 607 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
41bf4989 608 wrmsrl(x86_pmu_config_addr(idx), val);
f87ad35d 609 }
f87ad35d
JSR
610}
611
c3d266c8
KL
612/*
613 * There may be PMI landing after enabled=0. The PMI hitting could be before or
614 * after disable_all.
615 *
616 * If PMI hits before disable_all, the PMU will be disabled in the NMI handler.
617 * It will not be re-enabled in the NMI handler again, because enabled=0. After
618 * handling the NMI, disable_all will be called, which will not change the
619 * state either. If PMI hits after disable_all, the PMU is already disabled
620 * before entering NMI handler. The NMI handler will not change the state
621 * either.
622 *
623 * So either situation is harmless.
624 */
a4eaf7f1 625static void x86_pmu_disable(struct pmu *pmu)
b56a3802 626{
89cbc767 627 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1da53e02 628
85cf9dba 629 if (!x86_pmu_initialized())
9e35ad38 630 return;
1da53e02 631
1a6e21f7
PZ
632 if (!cpuc->enabled)
633 return;
634
635 cpuc->n_added = 0;
636 cpuc->enabled = 0;
637 barrier();
1da53e02
SE
638
639 x86_pmu.disable_all();
b56a3802 640}
241771ef 641
de0428a7 642void x86_pmu_enable_all(int added)
f87ad35d 643{
89cbc767 644 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
f87ad35d
JSR
645 int idx;
646
948b1bb8 647 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
d45dd923 648 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
b0f3f28e 649
43f6201a 650 if (!test_bit(idx, cpuc->active_mask))
4295ee62 651 continue;
984b838c 652
d45dd923 653 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
f87ad35d
JSR
654 }
655}
656
51b0fe39 657static struct pmu pmu;
1da53e02
SE
658
659static inline int is_x86_event(struct perf_event *event)
660{
661 return event->pmu == &pmu;
662}
663
1e2ad28f
RR
664/*
665 * Event scheduler state:
666 *
667 * Assign events iterating over all events and counters, beginning
668 * with events with least weights first. Keep the current iterator
669 * state in struct sched_state.
670 */
671struct sched_state {
672 int weight;
673 int event; /* event index */
674 int counter; /* counter index */
675 int unassigned; /* number of events to be assigned left */
cc1790cf 676 int nr_gp; /* number of GP counters used */
1e2ad28f
RR
677 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
678};
679
bc1738f6
RR
680/* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
681#define SCHED_STATES_MAX 2
682
1e2ad28f
RR
683struct perf_sched {
684 int max_weight;
685 int max_events;
cc1790cf
PZ
686 int max_gp;
687 int saved_states;
b371b594 688 struct event_constraint **constraints;
1e2ad28f 689 struct sched_state state;
bc1738f6 690 struct sched_state saved[SCHED_STATES_MAX];
1e2ad28f
RR
691};
692
693/*
694 * Initialize interator that runs through all events and counters.
695 */
b371b594 696static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
cc1790cf 697 int num, int wmin, int wmax, int gpmax)
1e2ad28f
RR
698{
699 int idx;
700
701 memset(sched, 0, sizeof(*sched));
702 sched->max_events = num;
703 sched->max_weight = wmax;
cc1790cf 704 sched->max_gp = gpmax;
b371b594 705 sched->constraints = constraints;
1e2ad28f
RR
706
707 for (idx = 0; idx < num; idx++) {
b371b594 708 if (constraints[idx]->weight == wmin)
1e2ad28f
RR
709 break;
710 }
711
712 sched->state.event = idx; /* start with min weight */
713 sched->state.weight = wmin;
714 sched->state.unassigned = num;
715}
716
bc1738f6
RR
717static void perf_sched_save_state(struct perf_sched *sched)
718{
719 if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
720 return;
721
722 sched->saved[sched->saved_states] = sched->state;
723 sched->saved_states++;
724}
725
726static bool perf_sched_restore_state(struct perf_sched *sched)
727{
728 if (!sched->saved_states)
729 return false;
730
731 sched->saved_states--;
732 sched->state = sched->saved[sched->saved_states];
733
734 /* continue with next counter: */
735 clear_bit(sched->state.counter++, sched->state.used);
736
737 return true;
738}
739
1e2ad28f
RR
740/*
741 * Select a counter for the current event to schedule. Return true on
742 * success.
743 */
bc1738f6 744static bool __perf_sched_find_counter(struct perf_sched *sched)
1e2ad28f
RR
745{
746 struct event_constraint *c;
747 int idx;
748
749 if (!sched->state.unassigned)
750 return false;
751
752 if (sched->state.event >= sched->max_events)
753 return false;
754
b371b594 755 c = sched->constraints[sched->state.event];
4defea85 756 /* Prefer fixed purpose counters */
15c7ad51
RR
757 if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
758 idx = INTEL_PMC_IDX_FIXED;
307b1cd7 759 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
4defea85
PZ
760 if (!__test_and_set_bit(idx, sched->state.used))
761 goto done;
762 }
763 }
cc1790cf 764
1e2ad28f
RR
765 /* Grab the first unused counter starting with idx */
766 idx = sched->state.counter;
15c7ad51 767 for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
cc1790cf
PZ
768 if (!__test_and_set_bit(idx, sched->state.used)) {
769 if (sched->state.nr_gp++ >= sched->max_gp)
770 return false;
771
4defea85 772 goto done;
cc1790cf 773 }
1e2ad28f 774 }
1e2ad28f 775
4defea85
PZ
776 return false;
777
778done:
779 sched->state.counter = idx;
1e2ad28f 780
bc1738f6
RR
781 if (c->overlap)
782 perf_sched_save_state(sched);
783
784 return true;
785}
786
787static bool perf_sched_find_counter(struct perf_sched *sched)
788{
789 while (!__perf_sched_find_counter(sched)) {
790 if (!perf_sched_restore_state(sched))
791 return false;
792 }
793
1e2ad28f
RR
794 return true;
795}
796
797/*
798 * Go through all unassigned events and find the next one to schedule.
799 * Take events with the least weight first. Return true on success.
800 */
801static bool perf_sched_next_event(struct perf_sched *sched)
802{
803 struct event_constraint *c;
804
805 if (!sched->state.unassigned || !--sched->state.unassigned)
806 return false;
807
808 do {
809 /* next event */
810 sched->state.event++;
811 if (sched->state.event >= sched->max_events) {
812 /* next weight */
813 sched->state.event = 0;
814 sched->state.weight++;
815 if (sched->state.weight > sched->max_weight)
816 return false;
817 }
b371b594 818 c = sched->constraints[sched->state.event];
1e2ad28f
RR
819 } while (c->weight != sched->state.weight);
820
821 sched->state.counter = 0; /* start with first counter */
822
823 return true;
824}
825
826/*
827 * Assign a counter for each event.
828 */
b371b594 829int perf_assign_events(struct event_constraint **constraints, int n,
cc1790cf 830 int wmin, int wmax, int gpmax, int *assign)
1e2ad28f
RR
831{
832 struct perf_sched sched;
833
cc1790cf 834 perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax);
1e2ad28f
RR
835
836 do {
837 if (!perf_sched_find_counter(&sched))
838 break; /* failed */
839 if (assign)
840 assign[sched.state.event] = sched.state.counter;
841 } while (perf_sched_next_event(&sched));
842
843 return sched.state.unassigned;
844}
4a3dc121 845EXPORT_SYMBOL_GPL(perf_assign_events);
1e2ad28f 846
de0428a7 847int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1da53e02 848{
43b45780 849 struct event_constraint *c;
1da53e02 850 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
2f7f73a5 851 struct perf_event *e;
e979121b 852 int i, wmin, wmax, unsched = 0;
1da53e02
SE
853 struct hw_perf_event *hwc;
854
855 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
856
c5362c0c
MD
857 if (x86_pmu.start_scheduling)
858 x86_pmu.start_scheduling(cpuc);
859
1e2ad28f 860 for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
b371b594 861 cpuc->event_constraint[i] = NULL;
79cba822 862 c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
b371b594 863 cpuc->event_constraint[i] = c;
43b45780 864
1e2ad28f
RR
865 wmin = min(wmin, c->weight);
866 wmax = max(wmax, c->weight);
1da53e02
SE
867 }
868
8113070d
SE
869 /*
870 * fastpath, try to reuse previous register
871 */
c933c1a6 872 for (i = 0; i < n; i++) {
8113070d 873 hwc = &cpuc->event_list[i]->hw;
b371b594 874 c = cpuc->event_constraint[i];
8113070d
SE
875
876 /* never assigned */
877 if (hwc->idx == -1)
878 break;
879
880 /* constraint still honored */
63b14649 881 if (!test_bit(hwc->idx, c->idxmsk))
8113070d
SE
882 break;
883
884 /* not already used */
885 if (test_bit(hwc->idx, used_mask))
886 break;
887
34538ee7 888 __set_bit(hwc->idx, used_mask);
8113070d
SE
889 if (assign)
890 assign[i] = hwc->idx;
891 }
8113070d 892
1e2ad28f 893 /* slow path */
b371b594 894 if (i != n) {
cc1790cf
PZ
895 int gpmax = x86_pmu.num_counters;
896
897 /*
898 * Do not allow scheduling of more than half the available
899 * generic counters.
900 *
901 * This helps avoid counter starvation of sibling thread by
902 * ensuring at most half the counters cannot be in exclusive
903 * mode. There is no designated counters for the limits. Any
904 * N/2 counters can be used. This helps with events with
905 * specific counter constraints.
906 */
907 if (is_ht_workaround_enabled() && !cpuc->is_fake &&
908 READ_ONCE(cpuc->excl_cntrs->exclusive_present))
909 gpmax /= 2;
910
b371b594 911 unsched = perf_assign_events(cpuc->event_constraint, n, wmin,
cc1790cf 912 wmax, gpmax, assign);
b371b594 913 }
8113070d 914
2f7f73a5 915 /*
e979121b
MD
916 * In case of success (unsched = 0), mark events as committed,
917 * so we do not put_constraint() in case new events are added
918 * and fail to be scheduled
919 *
920 * We invoke the lower level commit callback to lock the resource
921 *
922 * We do not need to do all of this in case we are called to
923 * validate an event group (assign == NULL)
2f7f73a5 924 */
e979121b 925 if (!unsched && assign) {
2f7f73a5
SE
926 for (i = 0; i < n; i++) {
927 e = cpuc->event_list[i];
c5362c0c 928 if (x86_pmu.commit_scheduling)
b371b594 929 x86_pmu.commit_scheduling(cpuc, i, assign[i]);
2f7f73a5 930 }
8736e548 931 } else {
1f6a1e2d
PZ
932 /*
933 * Compute the number of events already present; see
934 * x86_pmu_add(), validate_group() and x86_pmu_commit_txn().
935 * For the former two cpuc->n_events hasn't been updated yet,
936 * while for the latter cpuc->n_txn contains the number of
937 * events added in the current transaction.
938 */
939 i = cpuc->n_events;
940 if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
941 i -= cpuc->n_txn;
942
943 for (; i < n; i++) {
2f7f73a5 944 e = cpuc->event_list[i];
2f7f73a5 945
e979121b
MD
946 /*
947 * release events that failed scheduling
948 */
1da53e02 949 if (x86_pmu.put_event_constraints)
2f7f73a5 950 x86_pmu.put_event_constraints(cpuc, e);
1da53e02
SE
951 }
952 }
c5362c0c
MD
953
954 if (x86_pmu.stop_scheduling)
955 x86_pmu.stop_scheduling(cpuc);
956
e979121b 957 return unsched ? -EINVAL : 0;
1da53e02
SE
958}
959
960/*
961 * dogrp: true if must collect siblings events (group)
962 * returns total number of events and error code
963 */
964static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
965{
966 struct perf_event *event;
967 int n, max_count;
968
948b1bb8 969 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
1da53e02
SE
970
971 /* current number of events already accepted */
972 n = cpuc->n_events;
973
974 if (is_x86_event(leader)) {
975 if (n >= max_count)
aa2bc1ad 976 return -EINVAL;
1da53e02
SE
977 cpuc->event_list[n] = leader;
978 n++;
979 }
980 if (!dogrp)
981 return n;
982
edb39592 983 for_each_sibling_event(event, leader) {
1da53e02 984 if (!is_x86_event(event) ||
8113070d 985 event->state <= PERF_EVENT_STATE_OFF)
1da53e02
SE
986 continue;
987
988 if (n >= max_count)
aa2bc1ad 989 return -EINVAL;
1da53e02
SE
990
991 cpuc->event_list[n] = event;
992 n++;
993 }
994 return n;
995}
996
1da53e02 997static inline void x86_assign_hw_event(struct perf_event *event,
447a194b 998 struct cpu_hw_events *cpuc, int i)
1da53e02 999{
447a194b
SE
1000 struct hw_perf_event *hwc = &event->hw;
1001
1002 hwc->idx = cpuc->assign[i];
1003 hwc->last_cpu = smp_processor_id();
1004 hwc->last_tag = ++cpuc->tags[i];
1da53e02 1005
15c7ad51 1006 if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
1da53e02
SE
1007 hwc->config_base = 0;
1008 hwc->event_base = 0;
15c7ad51 1009 } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
1da53e02 1010 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
15c7ad51
RR
1011 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
1012 hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
1da53e02 1013 } else {
73d6e522
RR
1014 hwc->config_base = x86_pmu_config_addr(hwc->idx);
1015 hwc->event_base = x86_pmu_event_addr(hwc->idx);
0fbdad07 1016 hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx);
1da53e02
SE
1017 }
1018}
1019
1182a495
RC
1020/**
1021 * x86_perf_rdpmc_index - Return PMC counter used for event
1022 * @event: the perf_event to which the PMC counter was assigned
1023 *
1024 * The counter assigned to this performance event may change if interrupts
1025 * are enabled. This counter should thus never be used while interrupts are
1026 * enabled. Before this function is used to obtain the assigned counter the
1027 * event should be checked for validity using, for example,
1028 * perf_event_read_local(), within the same interrupt disabled section in
1029 * which this counter is planned to be used.
1030 *
1031 * Return: The index of the performance monitoring counter assigned to
1032 * @perf_event.
1033 */
1034int x86_perf_rdpmc_index(struct perf_event *event)
1035{
1036 lockdep_assert_irqs_disabled();
1037
1038 return event->hw.event_base_rdpmc;
1039}
1040
447a194b
SE
1041static inline int match_prev_assignment(struct hw_perf_event *hwc,
1042 struct cpu_hw_events *cpuc,
1043 int i)
1044{
1045 return hwc->idx == cpuc->assign[i] &&
1046 hwc->last_cpu == smp_processor_id() &&
1047 hwc->last_tag == cpuc->tags[i];
1048}
1049
a4eaf7f1 1050static void x86_pmu_start(struct perf_event *event, int flags);
2e841873 1051
a4eaf7f1 1052static void x86_pmu_enable(struct pmu *pmu)
ee06094f 1053{
89cbc767 1054 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1da53e02
SE
1055 struct perf_event *event;
1056 struct hw_perf_event *hwc;
11164cd4 1057 int i, added = cpuc->n_added;
1da53e02 1058
85cf9dba 1059 if (!x86_pmu_initialized())
2b9ff0db 1060 return;
1a6e21f7
PZ
1061
1062 if (cpuc->enabled)
1063 return;
1064
1da53e02 1065 if (cpuc->n_added) {
19925ce7 1066 int n_running = cpuc->n_events - cpuc->n_added;
1da53e02
SE
1067 /*
1068 * apply assignment obtained either from
1069 * hw_perf_group_sched_in() or x86_pmu_enable()
1070 *
1071 * step1: save events moving to new counters
1da53e02 1072 */
19925ce7 1073 for (i = 0; i < n_running; i++) {
1da53e02
SE
1074 event = cpuc->event_list[i];
1075 hwc = &event->hw;
1076
447a194b
SE
1077 /*
1078 * we can avoid reprogramming counter if:
1079 * - assigned same counter as last time
1080 * - running on same CPU as last time
1081 * - no other event has used the counter since
1082 */
1083 if (hwc->idx == -1 ||
1084 match_prev_assignment(hwc, cpuc, i))
1da53e02
SE
1085 continue;
1086
a4eaf7f1
PZ
1087 /*
1088 * Ensure we don't accidentally enable a stopped
1089 * counter simply because we rescheduled.
1090 */
1091 if (hwc->state & PERF_HES_STOPPED)
1092 hwc->state |= PERF_HES_ARCH;
1093
1094 x86_pmu_stop(event, PERF_EF_UPDATE);
1da53e02
SE
1095 }
1096
c347a2f1
PZ
1097 /*
1098 * step2: reprogram moved events into new counters
1099 */
1da53e02 1100 for (i = 0; i < cpuc->n_events; i++) {
1da53e02
SE
1101 event = cpuc->event_list[i];
1102 hwc = &event->hw;
1103
45e16a68 1104 if (!match_prev_assignment(hwc, cpuc, i))
447a194b 1105 x86_assign_hw_event(event, cpuc, i);
45e16a68
PZ
1106 else if (i < n_running)
1107 continue;
1da53e02 1108
a4eaf7f1
PZ
1109 if (hwc->state & PERF_HES_ARCH)
1110 continue;
1111
1112 x86_pmu_start(event, PERF_EF_RELOAD);
1da53e02
SE
1113 }
1114 cpuc->n_added = 0;
1115 perf_events_lapic_init();
1116 }
1a6e21f7
PZ
1117
1118 cpuc->enabled = 1;
1119 barrier();
1120
11164cd4 1121 x86_pmu.enable_all(added);
ee06094f 1122}
ee06094f 1123
245b2e70 1124static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
241771ef 1125
ee06094f
IM
1126/*
1127 * Set the next IRQ period, based on the hwc->period_left value.
cdd6c482 1128 * To be called with the event disabled in hw:
ee06094f 1129 */
de0428a7 1130int x86_perf_event_set_period(struct perf_event *event)
241771ef 1131{
07088edb 1132 struct hw_perf_event *hwc = &event->hw;
e7850595 1133 s64 left = local64_read(&hwc->period_left);
e4abb5d4 1134 s64 period = hwc->sample_period;
7645a24c 1135 int ret = 0, idx = hwc->idx;
ee06094f 1136
15c7ad51 1137 if (idx == INTEL_PMC_IDX_FIXED_BTS)
30dd568c
MM
1138 return 0;
1139
ee06094f 1140 /*
af901ca1 1141 * If we are way outside a reasonable range then just skip forward:
ee06094f
IM
1142 */
1143 if (unlikely(left <= -period)) {
1144 left = period;
e7850595 1145 local64_set(&hwc->period_left, left);
9e350de3 1146 hwc->last_period = period;
e4abb5d4 1147 ret = 1;
ee06094f
IM
1148 }
1149
1150 if (unlikely(left <= 0)) {
1151 left += period;
e7850595 1152 local64_set(&hwc->period_left, left);
9e350de3 1153 hwc->last_period = period;
e4abb5d4 1154 ret = 1;
ee06094f 1155 }
1c80f4b5 1156 /*
dfc65094 1157 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1c80f4b5
IM
1158 */
1159 if (unlikely(left < 2))
1160 left = 2;
241771ef 1161
e4abb5d4
PZ
1162 if (left > x86_pmu.max_period)
1163 left = x86_pmu.max_period;
1164
294fe0f5
AK
1165 if (x86_pmu.limit_period)
1166 left = x86_pmu.limit_period(event, left);
1167
245b2e70 1168 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
ee06094f 1169
d31fc13f
KL
1170 /*
1171 * The hw event starts counting from this event offset,
1172 * mark it to be able to extra future deltas:
1173 */
1174 local64_set(&hwc->prev_count, (u64)-left);
ee06094f 1175
d31fc13f 1176 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
68aa00ac
CG
1177
1178 /*
1179 * Due to erratum on certan cpu we need
1180 * a second write to be sure the register
1181 * is updated properly
1182 */
1183 if (x86_pmu.perfctr_second_write) {
73d6e522 1184 wrmsrl(hwc->event_base,
948b1bb8 1185 (u64)(-left) & x86_pmu.cntval_mask);
68aa00ac 1186 }
e4abb5d4 1187
cdd6c482 1188 perf_event_update_userpage(event);
194002b2 1189
e4abb5d4 1190 return ret;
2f18d1e8
IM
1191}
1192
de0428a7 1193void x86_pmu_enable_event(struct perf_event *event)
7c90cc45 1194{
0a3aee0d 1195 if (__this_cpu_read(cpu_hw_events.enabled))
31fa58af
RR
1196 __x86_pmu_enable_event(&event->hw,
1197 ARCH_PERFMON_EVENTSEL_ENABLE);
241771ef
IM
1198}
1199
b690081d 1200/*
a4eaf7f1 1201 * Add a single event to the PMU.
1da53e02
SE
1202 *
1203 * The event is added to the group of enabled events
1204 * but only if it can be scehduled with existing events.
fe9081cc 1205 */
a4eaf7f1 1206static int x86_pmu_add(struct perf_event *event, int flags)
fe9081cc 1207{
89cbc767 1208 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1da53e02
SE
1209 struct hw_perf_event *hwc;
1210 int assign[X86_PMC_IDX_MAX];
1211 int n, n0, ret;
fe9081cc 1212
1da53e02 1213 hwc = &event->hw;
fe9081cc 1214
1da53e02 1215 n0 = cpuc->n_events;
24cd7f54
PZ
1216 ret = n = collect_events(cpuc, event, false);
1217 if (ret < 0)
1218 goto out;
53b441a5 1219
a4eaf7f1
PZ
1220 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1221 if (!(flags & PERF_EF_START))
1222 hwc->state |= PERF_HES_ARCH;
1223
4d1c52b0
LM
1224 /*
1225 * If group events scheduling transaction was started,
0d2eb44f 1226 * skip the schedulability test here, it will be performed
c347a2f1 1227 * at commit time (->commit_txn) as a whole.
68f7082f
PZ
1228 *
1229 * If commit fails, we'll call ->del() on all events
1230 * for which ->add() was called.
4d1c52b0 1231 */
8f3e5684 1232 if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
24cd7f54 1233 goto done_collect;
4d1c52b0 1234
a072738e 1235 ret = x86_pmu.schedule_events(cpuc, n, assign);
1da53e02 1236 if (ret)
24cd7f54 1237 goto out;
1da53e02
SE
1238 /*
1239 * copy new assignment, now we know it is possible
1240 * will be used by hw_perf_enable()
1241 */
1242 memcpy(cpuc->assign, assign, n*sizeof(int));
7e2ae347 1243
24cd7f54 1244done_collect:
c347a2f1
PZ
1245 /*
1246 * Commit the collect_events() state. See x86_pmu_del() and
1247 * x86_pmu_*_txn().
1248 */
1da53e02 1249 cpuc->n_events = n;
356e1f2e 1250 cpuc->n_added += n - n0;
90151c35 1251 cpuc->n_txn += n - n0;
95cdd2e7 1252
68f7082f
PZ
1253 if (x86_pmu.add) {
1254 /*
1255 * This is before x86_pmu_enable() will call x86_pmu_start(),
1256 * so we enable LBRs before an event needs them etc..
1257 */
1258 x86_pmu.add(event);
1259 }
1260
24cd7f54
PZ
1261 ret = 0;
1262out:
24cd7f54 1263 return ret;
241771ef
IM
1264}
1265
a4eaf7f1 1266static void x86_pmu_start(struct perf_event *event, int flags)
d76a0812 1267{
89cbc767 1268 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
c08053e6
PZ
1269 int idx = event->hw.idx;
1270
a4eaf7f1
PZ
1271 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1272 return;
1273
1274 if (WARN_ON_ONCE(idx == -1))
1275 return;
1276
1277 if (flags & PERF_EF_RELOAD) {
1278 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1279 x86_perf_event_set_period(event);
1280 }
1281
1282 event->hw.state = 0;
d76a0812 1283
c08053e6
PZ
1284 cpuc->events[idx] = event;
1285 __set_bit(idx, cpuc->active_mask);
63e6be6d 1286 __set_bit(idx, cpuc->running);
aff3d91a 1287 x86_pmu.enable(event);
c08053e6 1288 perf_event_update_userpage(event);
a78ac325
PZ
1289}
1290
cdd6c482 1291void perf_event_print_debug(void)
241771ef 1292{
2f18d1e8 1293 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
da3e606d 1294 u64 pebs, debugctl;
cdd6c482 1295 struct cpu_hw_events *cpuc;
5bb9efe3 1296 unsigned long flags;
1e125676
IM
1297 int cpu, idx;
1298
948b1bb8 1299 if (!x86_pmu.num_counters)
1e125676 1300 return;
241771ef 1301
5bb9efe3 1302 local_irq_save(flags);
241771ef
IM
1303
1304 cpu = smp_processor_id();
cdd6c482 1305 cpuc = &per_cpu(cpu_hw_events, cpu);
241771ef 1306
faa28ae0 1307 if (x86_pmu.version >= 2) {
a1ef58f4
JSR
1308 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1309 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1310 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1311 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1312
1313 pr_info("\n");
1314 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1315 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1316 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1317 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
15fde110
AK
1318 if (x86_pmu.pebs_constraints) {
1319 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
1320 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
1321 }
da3e606d
AK
1322 if (x86_pmu.lbr_nr) {
1323 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
1324 pr_info("CPU#%d: debugctl: %016llx\n", cpu, debugctl);
1325 }
f87ad35d 1326 }
7645a24c 1327 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
241771ef 1328
948b1bb8 1329 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
41bf4989
RR
1330 rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
1331 rdmsrl(x86_pmu_event_addr(idx), pmc_count);
241771ef 1332
245b2e70 1333 prev_left = per_cpu(pmc_prev_left[idx], cpu);
241771ef 1334
a1ef58f4 1335 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
241771ef 1336 cpu, idx, pmc_ctrl);
a1ef58f4 1337 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
241771ef 1338 cpu, idx, pmc_count);
a1ef58f4 1339 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
ee06094f 1340 cpu, idx, prev_left);
241771ef 1341 }
948b1bb8 1342 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
2f18d1e8
IM
1343 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1344
a1ef58f4 1345 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
2f18d1e8
IM
1346 cpu, idx, pmc_count);
1347 }
5bb9efe3 1348 local_irq_restore(flags);
241771ef
IM
1349}
1350
de0428a7 1351void x86_pmu_stop(struct perf_event *event, int flags)
241771ef 1352{
89cbc767 1353 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cdd6c482 1354 struct hw_perf_event *hwc = &event->hw;
241771ef 1355
a4eaf7f1
PZ
1356 if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
1357 x86_pmu.disable(event);
1358 cpuc->events[hwc->idx] = NULL;
1359 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1360 hwc->state |= PERF_HES_STOPPED;
1361 }
30dd568c 1362
a4eaf7f1
PZ
1363 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1364 /*
1365 * Drain the remaining delta count out of a event
1366 * that we are disabling:
1367 */
1368 x86_perf_event_update(event);
1369 hwc->state |= PERF_HES_UPTODATE;
1370 }
2e841873
PZ
1371}
1372
a4eaf7f1 1373static void x86_pmu_del(struct perf_event *event, int flags)
2e841873 1374{
89cbc767 1375 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2e841873
PZ
1376 int i;
1377
90151c35 1378 /*
68f7082f 1379 * If we're called during a txn, we only need to undo x86_pmu.add.
90151c35
SE
1380 * The events never got scheduled and ->cancel_txn will truncate
1381 * the event_list.
c347a2f1
PZ
1382 *
1383 * XXX assumes any ->del() called during a TXN will only be on
1384 * an event added during that same TXN.
90151c35 1385 */
8f3e5684 1386 if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
68f7082f 1387 goto do_del;
90151c35 1388
c347a2f1
PZ
1389 /*
1390 * Not a TXN, therefore cleanup properly.
1391 */
a4eaf7f1 1392 x86_pmu_stop(event, PERF_EF_UPDATE);
194002b2 1393
1da53e02 1394 for (i = 0; i < cpuc->n_events; i++) {
c347a2f1
PZ
1395 if (event == cpuc->event_list[i])
1396 break;
1397 }
1da53e02 1398
c347a2f1
PZ
1399 if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */
1400 return;
26e61e89 1401
c347a2f1
PZ
1402 /* If we have a newly added event; make sure to decrease n_added. */
1403 if (i >= cpuc->n_events - cpuc->n_added)
1404 --cpuc->n_added;
1da53e02 1405
c347a2f1
PZ
1406 if (x86_pmu.put_event_constraints)
1407 x86_pmu.put_event_constraints(cpuc, event);
1408
1409 /* Delete the array entry. */
b371b594 1410 while (++i < cpuc->n_events) {
c347a2f1 1411 cpuc->event_list[i-1] = cpuc->event_list[i];
b371b594
PZ
1412 cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
1413 }
c347a2f1 1414 --cpuc->n_events;
1da53e02 1415
cdd6c482 1416 perf_event_update_userpage(event);
68f7082f
PZ
1417
1418do_del:
1419 if (x86_pmu.del) {
1420 /*
1421 * This is after x86_pmu_stop(); so we disable LBRs after any
1422 * event can need them etc..
1423 */
1424 x86_pmu.del(event);
1425 }
241771ef
IM
1426}
1427
de0428a7 1428int x86_pmu_handle_irq(struct pt_regs *regs)
a29aa8a7 1429{
df1a132b 1430 struct perf_sample_data data;
cdd6c482
IM
1431 struct cpu_hw_events *cpuc;
1432 struct perf_event *event;
11d1578f 1433 int idx, handled = 0;
9029a5e3
IM
1434 u64 val;
1435
89cbc767 1436 cpuc = this_cpu_ptr(&cpu_hw_events);
962bf7a6 1437
2bce5dac
DZ
1438 /*
1439 * Some chipsets need to unmask the LVTPC in a particular spot
1440 * inside the nmi handler. As a result, the unmasking was pushed
1441 * into all the nmi handlers.
1442 *
1443 * This generic handler doesn't seem to have any issues where the
1444 * unmasking occurs so it was left at the top.
1445 */
1446 apic_write(APIC_LVTPC, APIC_DM_NMI);
1447
948b1bb8 1448 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
63e6be6d
RR
1449 if (!test_bit(idx, cpuc->active_mask)) {
1450 /*
1451 * Though we deactivated the counter some cpus
1452 * might still deliver spurious interrupts still
1453 * in flight. Catch them:
1454 */
1455 if (__test_and_clear_bit(idx, cpuc->running))
1456 handled++;
a29aa8a7 1457 continue;
63e6be6d 1458 }
962bf7a6 1459
cdd6c482 1460 event = cpuc->events[idx];
a4016a79 1461
cc2ad4ba 1462 val = x86_perf_event_update(event);
948b1bb8 1463 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
48e22d56 1464 continue;
962bf7a6 1465
9e350de3 1466 /*
cdd6c482 1467 * event overflow
9e350de3 1468 */
4177c42a 1469 handled++;
fd0d000b 1470 perf_sample_data_init(&data, 0, event->hw.last_period);
9e350de3 1471
07088edb 1472 if (!x86_perf_event_set_period(event))
e4abb5d4
PZ
1473 continue;
1474
a8b0ca17 1475 if (perf_event_overflow(event, &data, regs))
a4eaf7f1 1476 x86_pmu_stop(event, 0);
a29aa8a7 1477 }
962bf7a6 1478
9e350de3
PZ
1479 if (handled)
1480 inc_irq_stat(apic_perf_irqs);
1481
a29aa8a7
RR
1482 return handled;
1483}
39d81eab 1484
cdd6c482 1485void perf_events_lapic_init(void)
241771ef 1486{
04da8a43 1487 if (!x86_pmu.apic || !x86_pmu_initialized())
241771ef 1488 return;
85cf9dba 1489
241771ef 1490 /*
c323d95f 1491 * Always use NMI for PMU
241771ef 1492 */
c323d95f 1493 apic_write(APIC_LVTPC, APIC_DM_NMI);
241771ef
IM
1494}
1495
9326638c 1496static int
9c48f1c6 1497perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
241771ef 1498{
14c63f17
DH
1499 u64 start_clock;
1500 u64 finish_clock;
e8a923cc 1501 int ret;
14c63f17 1502
1b7b938f
AS
1503 /*
1504 * All PMUs/events that share this PMI handler should make sure to
1505 * increment active_events for their events.
1506 */
cdd6c482 1507 if (!atomic_read(&active_events))
9c48f1c6 1508 return NMI_DONE;
4177c42a 1509
e8a923cc 1510 start_clock = sched_clock();
14c63f17 1511 ret = x86_pmu.handle_irq(regs);
e8a923cc 1512 finish_clock = sched_clock();
14c63f17
DH
1513
1514 perf_sample_event_took(finish_clock - start_clock);
1515
1516 return ret;
241771ef 1517}
9326638c 1518NOKPROBE_SYMBOL(perf_event_nmi_handler);
241771ef 1519
de0428a7
KW
1520struct event_constraint emptyconstraint;
1521struct event_constraint unconstrained;
f87ad35d 1522
95ca792c 1523static int x86_pmu_prepare_cpu(unsigned int cpu)
3f6da390 1524{
7fdba1ca 1525 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
95ca792c 1526 int i;
3f6da390 1527
95ca792c
TG
1528 for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
1529 cpuc->kfree_on_online[i] = NULL;
1530 if (x86_pmu.cpu_prepare)
1531 return x86_pmu.cpu_prepare(cpu);
1532 return 0;
1533}
7fdba1ca 1534
95ca792c
TG
1535static int x86_pmu_dead_cpu(unsigned int cpu)
1536{
1537 if (x86_pmu.cpu_dead)
1538 x86_pmu.cpu_dead(cpu);
1539 return 0;
1540}
3f6da390 1541
95ca792c
TG
1542static int x86_pmu_online_cpu(unsigned int cpu)
1543{
1544 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1545 int i;
3f6da390 1546
95ca792c
TG
1547 for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
1548 kfree(cpuc->kfree_on_online[i]);
1549 cpuc->kfree_on_online[i] = NULL;
3f6da390 1550 }
95ca792c
TG
1551 return 0;
1552}
3f6da390 1553
95ca792c
TG
1554static int x86_pmu_starting_cpu(unsigned int cpu)
1555{
1556 if (x86_pmu.cpu_starting)
1557 x86_pmu.cpu_starting(cpu);
1558 return 0;
1559}
1560
1561static int x86_pmu_dying_cpu(unsigned int cpu)
1562{
1563 if (x86_pmu.cpu_dying)
1564 x86_pmu.cpu_dying(cpu);
1565 return 0;
3f6da390
PZ
1566}
1567
12558038
CG
1568static void __init pmu_check_apic(void)
1569{
93984fbd 1570 if (boot_cpu_has(X86_FEATURE_APIC))
12558038
CG
1571 return;
1572
1573 x86_pmu.apic = 0;
1574 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1575 pr_info("no hardware sampling interrupt available.\n");
c184c980
VW
1576
1577 /*
1578 * If we have a PMU initialized but no APIC
1579 * interrupts, we cannot sample hardware
1580 * events (user-space has to fall back and
1581 * sample via a hrtimer based software event):
1582 */
1583 pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1584
12558038
CG
1585}
1586
2766d2ee 1587static struct attribute_group x86_pmu_format_group __ro_after_init = {
641cc938
JO
1588 .name = "format",
1589 .attrs = NULL,
1590};
1591
8300daa2
JO
1592/*
1593 * Remove all undefined events (x86_pmu.event_map(id) == 0)
1594 * out of events_attr attributes.
1595 */
1596static void __init filter_events(struct attribute **attrs)
1597{
3a54aaa0
SE
1598 struct device_attribute *d;
1599 struct perf_pmu_events_attr *pmu_attr;
61b87cae 1600 int offset = 0;
8300daa2
JO
1601 int i, j;
1602
1603 for (i = 0; attrs[i]; i++) {
3a54aaa0
SE
1604 d = (struct device_attribute *)attrs[i];
1605 pmu_attr = container_of(d, struct perf_pmu_events_attr, attr);
1606 /* str trumps id */
1607 if (pmu_attr->event_str)
1608 continue;
61b87cae 1609 if (x86_pmu.event_map(i + offset))
8300daa2
JO
1610 continue;
1611
1612 for (j = i; attrs[j]; j++)
1613 attrs[j] = attrs[j + 1];
1614
1615 /* Check the shifted attr. */
1616 i--;
61b87cae
SE
1617
1618 /*
1619 * event_map() is index based, the attrs array is organized
1620 * by increasing event index. If we shift the events, then
1621 * we need to compensate for the event_map(), otherwise
1622 * we are looking up the wrong event in the map
1623 */
1624 offset++;
8300daa2
JO
1625 }
1626}
1627
1a6461b1 1628/* Merge two pointer arrays */
47732d88 1629__init struct attribute **merge_attr(struct attribute **a, struct attribute **b)
1a6461b1
AK
1630{
1631 struct attribute **new;
1632 int j, i;
1633
d4ae5529 1634 for (j = 0; a && a[j]; j++)
1a6461b1 1635 ;
d4ae5529 1636 for (i = 0; b && b[i]; i++)
1a6461b1
AK
1637 j++;
1638 j++;
1639
6da2ec56 1640 new = kmalloc_array(j, sizeof(struct attribute *), GFP_KERNEL);
1a6461b1
AK
1641 if (!new)
1642 return NULL;
1643
1644 j = 0;
d4ae5529 1645 for (i = 0; a && a[i]; i++)
1a6461b1 1646 new[j++] = a[i];
d4ae5529 1647 for (i = 0; b && b[i]; i++)
1a6461b1
AK
1648 new[j++] = b[i];
1649 new[j] = NULL;
1650
1651 return new;
1652}
1653
c7ab62bf 1654ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page)
a4747393
JO
1655{
1656 struct perf_pmu_events_attr *pmu_attr = \
1657 container_of(attr, struct perf_pmu_events_attr, attr);
a4747393 1658 u64 config = x86_pmu.event_map(pmu_attr->id);
a4747393 1659
3a54aaa0
SE
1660 /* string trumps id */
1661 if (pmu_attr->event_str)
1662 return sprintf(page, "%s", pmu_attr->event_str);
a4747393 1663
3a54aaa0
SE
1664 return x86_pmu.events_sysfs_show(page, config);
1665}
c7ab62bf 1666EXPORT_SYMBOL_GPL(events_sysfs_show);
a4747393 1667
fc07e9f9
AK
1668ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
1669 char *page)
1670{
1671 struct perf_pmu_events_ht_attr *pmu_attr =
1672 container_of(attr, struct perf_pmu_events_ht_attr, attr);
1673
1674 /*
1675 * Report conditional events depending on Hyper-Threading.
1676 *
1677 * This is overly conservative as usually the HT special
1678 * handling is not needed if the other CPU thread is idle.
1679 *
1680 * Note this does not (and cannot) handle the case when thread
1681 * siblings are invisible, for example with virtualization
1682 * if they are owned by some other guest. The user tool
1683 * has to re-read when a thread sibling gets onlined later.
1684 */
1685 return sprintf(page, "%s",
1686 topology_max_smt_threads() > 1 ?
1687 pmu_attr->event_str_ht :
1688 pmu_attr->event_str_noht);
1689}
1690
a4747393
JO
1691EVENT_ATTR(cpu-cycles, CPU_CYCLES );
1692EVENT_ATTR(instructions, INSTRUCTIONS );
1693EVENT_ATTR(cache-references, CACHE_REFERENCES );
1694EVENT_ATTR(cache-misses, CACHE_MISSES );
1695EVENT_ATTR(branch-instructions, BRANCH_INSTRUCTIONS );
1696EVENT_ATTR(branch-misses, BRANCH_MISSES );
1697EVENT_ATTR(bus-cycles, BUS_CYCLES );
1698EVENT_ATTR(stalled-cycles-frontend, STALLED_CYCLES_FRONTEND );
1699EVENT_ATTR(stalled-cycles-backend, STALLED_CYCLES_BACKEND );
1700EVENT_ATTR(ref-cycles, REF_CPU_CYCLES );
1701
1702static struct attribute *empty_attrs;
1703
95d18aa2 1704static struct attribute *events_attr[] = {
a4747393
JO
1705 EVENT_PTR(CPU_CYCLES),
1706 EVENT_PTR(INSTRUCTIONS),
1707 EVENT_PTR(CACHE_REFERENCES),
1708 EVENT_PTR(CACHE_MISSES),
1709 EVENT_PTR(BRANCH_INSTRUCTIONS),
1710 EVENT_PTR(BRANCH_MISSES),
1711 EVENT_PTR(BUS_CYCLES),
1712 EVENT_PTR(STALLED_CYCLES_FRONTEND),
1713 EVENT_PTR(STALLED_CYCLES_BACKEND),
1714 EVENT_PTR(REF_CPU_CYCLES),
1715 NULL,
1716};
1717
2766d2ee 1718static struct attribute_group x86_pmu_events_group __ro_after_init = {
a4747393
JO
1719 .name = "events",
1720 .attrs = events_attr,
1721};
1722
0bf79d44 1723ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
43c032fe 1724{
43c032fe
JO
1725 u64 umask = (config & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
1726 u64 cmask = (config & ARCH_PERFMON_EVENTSEL_CMASK) >> 24;
1727 bool edge = (config & ARCH_PERFMON_EVENTSEL_EDGE);
1728 bool pc = (config & ARCH_PERFMON_EVENTSEL_PIN_CONTROL);
1729 bool any = (config & ARCH_PERFMON_EVENTSEL_ANY);
1730 bool inv = (config & ARCH_PERFMON_EVENTSEL_INV);
1731 ssize_t ret;
1732
1733 /*
1734 * We have whole page size to spend and just little data
1735 * to write, so we can safely use sprintf.
1736 */
1737 ret = sprintf(page, "event=0x%02llx", event);
1738
1739 if (umask)
1740 ret += sprintf(page + ret, ",umask=0x%02llx", umask);
1741
1742 if (edge)
1743 ret += sprintf(page + ret, ",edge");
1744
1745 if (pc)
1746 ret += sprintf(page + ret, ",pc");
1747
1748 if (any)
1749 ret += sprintf(page + ret, ",any");
1750
1751 if (inv)
1752 ret += sprintf(page + ret, ",inv");
1753
1754 if (cmask)
1755 ret += sprintf(page + ret, ",cmask=0x%02llx", cmask);
1756
1757 ret += sprintf(page + ret, "\n");
1758
1759 return ret;
1760}
1761
6089327f 1762static struct attribute_group x86_pmu_attr_group;
5da382eb 1763static struct attribute_group x86_pmu_caps_group;
6089327f 1764
dda99116 1765static int __init init_hw_perf_events(void)
b56a3802 1766{
c1d6f42f 1767 struct x86_pmu_quirk *quirk;
72eae04d
RR
1768 int err;
1769
cdd6c482 1770 pr_info("Performance Events: ");
1123e3ad 1771
b56a3802
JSR
1772 switch (boot_cpu_data.x86_vendor) {
1773 case X86_VENDOR_INTEL:
72eae04d 1774 err = intel_pmu_init();
b56a3802 1775 break;
f87ad35d 1776 case X86_VENDOR_AMD:
72eae04d 1777 err = amd_pmu_init();
f87ad35d 1778 break;
6d0ef316
PW
1779 case X86_VENDOR_HYGON:
1780 err = amd_pmu_init();
1781 x86_pmu.name = "HYGON";
1782 break;
4138960a 1783 default:
8a3da6c7 1784 err = -ENOTSUPP;
b56a3802 1785 }
1123e3ad 1786 if (err != 0) {
cdd6c482 1787 pr_cont("no PMU driver, software events only.\n");
004417a6 1788 return 0;
1123e3ad 1789 }
b56a3802 1790
12558038
CG
1791 pmu_check_apic();
1792
33c6d6a7 1793 /* sanity check that the hardware exists or is emulated */
4407204c 1794 if (!check_hw_exists())
004417a6 1795 return 0;
33c6d6a7 1796
1123e3ad 1797 pr_cont("%s PMU driver.\n", x86_pmu.name);
faa28ae0 1798
e97df763
PZ
1799 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
1800
c1d6f42f
PZ
1801 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
1802 quirk->func();
3c44780b 1803
a1eac7ac
RR
1804 if (!x86_pmu.intel_ctrl)
1805 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
241771ef 1806
cdd6c482 1807 perf_events_lapic_init();
9c48f1c6 1808 register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
1123e3ad 1809
63b14649 1810 unconstrained = (struct event_constraint)
948b1bb8 1811 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
9fac2cf3 1812 0, x86_pmu.num_counters, 0, 0);
63b14649 1813
641cc938 1814 x86_pmu_format_group.attrs = x86_pmu.format_attrs;
0c9d42ed 1815
5da382eb
PZ
1816 if (x86_pmu.caps_attrs) {
1817 struct attribute **tmp;
1818
1819 tmp = merge_attr(x86_pmu_caps_group.attrs, x86_pmu.caps_attrs);
1820 if (!WARN_ON(!tmp))
1821 x86_pmu_caps_group.attrs = tmp;
1822 }
0c9d42ed 1823
f20093ee
SE
1824 if (x86_pmu.event_attrs)
1825 x86_pmu_events_group.attrs = x86_pmu.event_attrs;
1826
a4747393
JO
1827 if (!x86_pmu.events_sysfs_show)
1828 x86_pmu_events_group.attrs = &empty_attrs;
8300daa2
JO
1829 else
1830 filter_events(x86_pmu_events_group.attrs);
a4747393 1831
1a6461b1
AK
1832 if (x86_pmu.cpu_events) {
1833 struct attribute **tmp;
1834
1835 tmp = merge_attr(x86_pmu_events_group.attrs, x86_pmu.cpu_events);
1836 if (!WARN_ON(!tmp))
1837 x86_pmu_events_group.attrs = tmp;
1838 }
1839
6089327f
KL
1840 if (x86_pmu.attrs) {
1841 struct attribute **tmp;
1842
1843 tmp = merge_attr(x86_pmu_attr_group.attrs, x86_pmu.attrs);
1844 if (!WARN_ON(!tmp))
1845 x86_pmu_attr_group.attrs = tmp;
1846 }
1847
57c0c15b 1848 pr_info("... version: %d\n", x86_pmu.version);
948b1bb8
RR
1849 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1850 pr_info("... generic registers: %d\n", x86_pmu.num_counters);
1851 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
57c0c15b 1852 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
948b1bb8 1853 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
d6dc0b4e 1854 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
3f6da390 1855
95ca792c
TG
1856 /*
1857 * Install callbacks. Core will call them for each online
1858 * cpu.
1859 */
73c1b41e 1860 err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "perf/x86:prepare",
95ca792c
TG
1861 x86_pmu_prepare_cpu, x86_pmu_dead_cpu);
1862 if (err)
1863 return err;
1864
1865 err = cpuhp_setup_state(CPUHP_AP_PERF_X86_STARTING,
73c1b41e 1866 "perf/x86:starting", x86_pmu_starting_cpu,
95ca792c
TG
1867 x86_pmu_dying_cpu);
1868 if (err)
1869 goto out;
1870
73c1b41e 1871 err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "perf/x86:online",
95ca792c
TG
1872 x86_pmu_online_cpu, NULL);
1873 if (err)
1874 goto out1;
1875
1876 err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1877 if (err)
1878 goto out2;
004417a6
PZ
1879
1880 return 0;
95ca792c
TG
1881
1882out2:
1883 cpuhp_remove_state(CPUHP_AP_PERF_X86_ONLINE);
1884out1:
1885 cpuhp_remove_state(CPUHP_AP_PERF_X86_STARTING);
1886out:
1887 cpuhp_remove_state(CPUHP_PERF_X86_PREPARE);
1888 return err;
241771ef 1889}
004417a6 1890early_initcall(init_hw_perf_events);
621a01ea 1891
cdd6c482 1892static inline void x86_pmu_read(struct perf_event *event)
ee06094f 1893{
bcfbe5c4
KL
1894 if (x86_pmu.read)
1895 return x86_pmu.read(event);
cc2ad4ba 1896 x86_perf_event_update(event);
ee06094f
IM
1897}
1898
4d1c52b0
LM
1899/*
1900 * Start group events scheduling transaction
1901 * Set the flag to make pmu::enable() not perform the
1902 * schedulability test, it will be performed at commit time
fbbe0701
SB
1903 *
1904 * We only support PERF_PMU_TXN_ADD transactions. Save the
1905 * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
1906 * transactions.
4d1c52b0 1907 */
fbbe0701 1908static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
4d1c52b0 1909{
fbbe0701
SB
1910 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1911
1912 WARN_ON_ONCE(cpuc->txn_flags); /* txn already in flight */
1913
1914 cpuc->txn_flags = txn_flags;
1915 if (txn_flags & ~PERF_PMU_TXN_ADD)
1916 return;
1917
33696fc0 1918 perf_pmu_disable(pmu);
0a3aee0d 1919 __this_cpu_write(cpu_hw_events.n_txn, 0);
4d1c52b0
LM
1920}
1921
1922/*
1923 * Stop group events scheduling transaction
1924 * Clear the flag and pmu::enable() will perform the
1925 * schedulability test.
1926 */
51b0fe39 1927static void x86_pmu_cancel_txn(struct pmu *pmu)
4d1c52b0 1928{
fbbe0701
SB
1929 unsigned int txn_flags;
1930 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1931
1932 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
1933
1934 txn_flags = cpuc->txn_flags;
1935 cpuc->txn_flags = 0;
1936 if (txn_flags & ~PERF_PMU_TXN_ADD)
1937 return;
1938
90151c35 1939 /*
c347a2f1
PZ
1940 * Truncate collected array by the number of events added in this
1941 * transaction. See x86_pmu_add() and x86_pmu_*_txn().
90151c35 1942 */
0a3aee0d
TH
1943 __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
1944 __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
33696fc0 1945 perf_pmu_enable(pmu);
4d1c52b0
LM
1946}
1947
1948/*
1949 * Commit group events scheduling transaction
1950 * Perform the group schedulability test as a whole
1951 * Return 0 if success
c347a2f1
PZ
1952 *
1953 * Does not cancel the transaction on failure; expects the caller to do this.
4d1c52b0 1954 */
51b0fe39 1955static int x86_pmu_commit_txn(struct pmu *pmu)
4d1c52b0 1956{
89cbc767 1957 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4d1c52b0
LM
1958 int assign[X86_PMC_IDX_MAX];
1959 int n, ret;
1960
fbbe0701
SB
1961 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
1962
1963 if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) {
1964 cpuc->txn_flags = 0;
1965 return 0;
1966 }
1967
4d1c52b0
LM
1968 n = cpuc->n_events;
1969
1970 if (!x86_pmu_initialized())
1971 return -EAGAIN;
1972
1973 ret = x86_pmu.schedule_events(cpuc, n, assign);
1974 if (ret)
1975 return ret;
1976
1977 /*
1978 * copy new assignment, now we know it is possible
1979 * will be used by hw_perf_enable()
1980 */
1981 memcpy(cpuc->assign, assign, n*sizeof(int));
1982
fbbe0701 1983 cpuc->txn_flags = 0;
33696fc0 1984 perf_pmu_enable(pmu);
4d1c52b0
LM
1985 return 0;
1986}
cd8a38d3
SE
1987/*
1988 * a fake_cpuc is used to validate event groups. Due to
1989 * the extra reg logic, we need to also allocate a fake
1990 * per_core and per_cpu structure. Otherwise, group events
1991 * using extra reg may conflict without the kernel being
1992 * able to catch this when the last event gets added to
1993 * the group.
1994 */
1995static void free_fake_cpuc(struct cpu_hw_events *cpuc)
1996{
d01b1f96 1997 intel_cpuc_finish(cpuc);
cd8a38d3
SE
1998 kfree(cpuc);
1999}
2000
2001static struct cpu_hw_events *allocate_fake_cpuc(void)
2002{
2003 struct cpu_hw_events *cpuc;
2004 int cpu = raw_smp_processor_id();
2005
2006 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
2007 if (!cpuc)
2008 return ERR_PTR(-ENOMEM);
b430f7c4 2009 cpuc->is_fake = 1;
d01b1f96
PZI
2010
2011 if (intel_cpuc_prepare(cpuc, cpu))
2012 goto error;
2013
cd8a38d3
SE
2014 return cpuc;
2015error:
2016 free_fake_cpuc(cpuc);
2017 return ERR_PTR(-ENOMEM);
2018}
4d1c52b0 2019
ca037701
PZ
2020/*
2021 * validate that we can schedule this event
2022 */
2023static int validate_event(struct perf_event *event)
2024{
2025 struct cpu_hw_events *fake_cpuc;
2026 struct event_constraint *c;
2027 int ret = 0;
2028
cd8a38d3
SE
2029 fake_cpuc = allocate_fake_cpuc();
2030 if (IS_ERR(fake_cpuc))
2031 return PTR_ERR(fake_cpuc);
ca037701 2032
21d65555 2033 c = x86_pmu.get_event_constraints(fake_cpuc, 0, event);
ca037701
PZ
2034
2035 if (!c || !c->weight)
aa2bc1ad 2036 ret = -EINVAL;
ca037701
PZ
2037
2038 if (x86_pmu.put_event_constraints)
2039 x86_pmu.put_event_constraints(fake_cpuc, event);
2040
cd8a38d3 2041 free_fake_cpuc(fake_cpuc);
ca037701
PZ
2042
2043 return ret;
2044}
2045
1da53e02
SE
2046/*
2047 * validate a single event group
2048 *
2049 * validation include:
184f412c
IM
2050 * - check events are compatible which each other
2051 * - events do not compete for the same counter
2052 * - number of events <= number of counters
1da53e02
SE
2053 *
2054 * validation ensures the group can be loaded onto the
2055 * PMU if it was the only group available.
2056 */
fe9081cc
PZ
2057static int validate_group(struct perf_event *event)
2058{
1da53e02 2059 struct perf_event *leader = event->group_leader;
502568d5 2060 struct cpu_hw_events *fake_cpuc;
aa2bc1ad 2061 int ret = -EINVAL, n;
fe9081cc 2062
cd8a38d3
SE
2063 fake_cpuc = allocate_fake_cpuc();
2064 if (IS_ERR(fake_cpuc))
2065 return PTR_ERR(fake_cpuc);
1da53e02
SE
2066 /*
2067 * the event is not yet connected with its
2068 * siblings therefore we must first collect
2069 * existing siblings, then add the new event
2070 * before we can simulate the scheduling
2071 */
502568d5 2072 n = collect_events(fake_cpuc, leader, true);
1da53e02 2073 if (n < 0)
cd8a38d3 2074 goto out;
fe9081cc 2075
502568d5
PZ
2076 fake_cpuc->n_events = n;
2077 n = collect_events(fake_cpuc, event, false);
1da53e02 2078 if (n < 0)
cd8a38d3 2079 goto out;
fe9081cc 2080
1f6a1e2d 2081 fake_cpuc->n_events = 0;
a072738e 2082 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
502568d5 2083
502568d5 2084out:
cd8a38d3 2085 free_fake_cpuc(fake_cpuc);
502568d5 2086 return ret;
fe9081cc
PZ
2087}
2088
dda99116 2089static int x86_pmu_event_init(struct perf_event *event)
621a01ea 2090{
51b0fe39 2091 struct pmu *tmp;
621a01ea
IM
2092 int err;
2093
b0a873eb
PZ
2094 switch (event->attr.type) {
2095 case PERF_TYPE_RAW:
2096 case PERF_TYPE_HARDWARE:
2097 case PERF_TYPE_HW_CACHE:
2098 break;
2099
2100 default:
2101 return -ENOENT;
2102 }
2103
2104 err = __x86_pmu_event_init(event);
fe9081cc 2105 if (!err) {
8113070d
SE
2106 /*
2107 * we temporarily connect event to its pmu
2108 * such that validate_group() can classify
2109 * it as an x86 event using is_x86_event()
2110 */
2111 tmp = event->pmu;
2112 event->pmu = &pmu;
2113
fe9081cc
PZ
2114 if (event->group_leader != event)
2115 err = validate_group(event);
ca037701
PZ
2116 else
2117 err = validate_event(event);
8113070d
SE
2118
2119 event->pmu = tmp;
fe9081cc 2120 }
a1792cda 2121 if (err) {
cdd6c482
IM
2122 if (event->destroy)
2123 event->destroy(event);
a1792cda 2124 }
621a01ea 2125
1af22eba 2126 if (READ_ONCE(x86_pmu.attr_rdpmc) &&
174afc3e 2127 !(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS))
7911d3f7
AL
2128 event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
2129
b0a873eb 2130 return err;
621a01ea 2131}
d7d59fb3 2132
7911d3f7
AL
2133static void refresh_pce(void *ignored)
2134{
3d28ebce 2135 load_mm_cr4(this_cpu_read(cpu_tlbstate.loaded_mm));
7911d3f7
AL
2136}
2137
bfe33492 2138static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
7911d3f7
AL
2139{
2140 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
2141 return;
2142
4b07372a
AL
2143 /*
2144 * This function relies on not being called concurrently in two
2145 * tasks in the same mm. Otherwise one task could observe
2146 * perf_rdpmc_allowed > 1 and return all the way back to
2147 * userspace with CR4.PCE clear while another task is still
2148 * doing on_each_cpu_mask() to propagate CR4.PCE.
2149 *
2150 * For now, this can't happen because all callers hold mmap_sem
2151 * for write. If this changes, we'll need a different solution.
2152 */
bfe33492 2153 lockdep_assert_held_exclusive(&mm->mmap_sem);
4b07372a 2154
bfe33492
PZ
2155 if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
2156 on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
7911d3f7
AL
2157}
2158
bfe33492 2159static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
7911d3f7 2160{
7911d3f7
AL
2161
2162 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
2163 return;
2164
bfe33492
PZ
2165 if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
2166 on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
7911d3f7
AL
2167}
2168
fe4a3308
PZ
2169static int x86_pmu_event_idx(struct perf_event *event)
2170{
2171 int idx = event->hw.idx;
2172
7911d3f7 2173 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
c7206205
PZ
2174 return 0;
2175
15c7ad51
RR
2176 if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
2177 idx -= INTEL_PMC_IDX_FIXED;
fe4a3308
PZ
2178 idx |= 1 << 30;
2179 }
2180
2181 return idx + 1;
2182}
2183
0c9d42ed
PZ
2184static ssize_t get_attr_rdpmc(struct device *cdev,
2185 struct device_attribute *attr,
2186 char *buf)
2187{
2188 return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
2189}
2190
0c9d42ed
PZ
2191static ssize_t set_attr_rdpmc(struct device *cdev,
2192 struct device_attribute *attr,
2193 const char *buf, size_t count)
2194{
e2b297fc
SK
2195 unsigned long val;
2196 ssize_t ret;
2197
2198 ret = kstrtoul(buf, 0, &val);
2199 if (ret)
2200 return ret;
e97df763 2201
a6673429
AL
2202 if (val > 2)
2203 return -EINVAL;
2204
e97df763
PZ
2205 if (x86_pmu.attr_rdpmc_broken)
2206 return -ENOTSUPP;
0c9d42ed 2207
a6673429
AL
2208 if ((val == 2) != (x86_pmu.attr_rdpmc == 2)) {
2209 /*
2210 * Changing into or out of always available, aka
2211 * perf-event-bypassing mode. This path is extremely slow,
2212 * but only root can trigger it, so it's okay.
2213 */
2214 if (val == 2)
631fe154 2215 static_branch_inc(&rdpmc_always_available_key);
a6673429 2216 else
631fe154 2217 static_branch_dec(&rdpmc_always_available_key);
a6673429
AL
2218 on_each_cpu(refresh_pce, NULL, 1);
2219 }
2220
2221 x86_pmu.attr_rdpmc = val;
2222
0c9d42ed
PZ
2223 return count;
2224}
2225
2226static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc);
2227
2228static struct attribute *x86_pmu_attrs[] = {
2229 &dev_attr_rdpmc.attr,
2230 NULL,
2231};
2232
2766d2ee 2233static struct attribute_group x86_pmu_attr_group __ro_after_init = {
0c9d42ed
PZ
2234 .attrs = x86_pmu_attrs,
2235};
2236
5da382eb
PZ
2237static ssize_t max_precise_show(struct device *cdev,
2238 struct device_attribute *attr,
2239 char *buf)
2240{
2241 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu_max_precise());
2242}
2243
2244static DEVICE_ATTR_RO(max_precise);
2245
2246static struct attribute *x86_pmu_caps_attrs[] = {
2247 &dev_attr_max_precise.attr,
2248 NULL
2249};
2250
2766d2ee 2251static struct attribute_group x86_pmu_caps_group __ro_after_init = {
5da382eb
PZ
2252 .name = "caps",
2253 .attrs = x86_pmu_caps_attrs,
2254};
2255
0c9d42ed
PZ
2256static const struct attribute_group *x86_pmu_attr_groups[] = {
2257 &x86_pmu_attr_group,
641cc938 2258 &x86_pmu_format_group,
a4747393 2259 &x86_pmu_events_group,
b00233b5 2260 &x86_pmu_caps_group,
0c9d42ed
PZ
2261 NULL,
2262};
2263
ba532500 2264static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
d010b332 2265{
ba532500
YZ
2266 if (x86_pmu.sched_task)
2267 x86_pmu.sched_task(ctx, sched_in);
d010b332
SE
2268}
2269
c93dc84c
PZ
2270void perf_check_microcode(void)
2271{
2272 if (x86_pmu.check_microcode)
2273 x86_pmu.check_microcode();
2274}
c93dc84c 2275
81ec3f3c
JO
2276static int x86_pmu_check_period(struct perf_event *event, u64 value)
2277{
2278 if (x86_pmu.check_period && x86_pmu.check_period(event, value))
2279 return -EINVAL;
2280
2281 if (value && x86_pmu.limit_period) {
2282 if (x86_pmu.limit_period(event, value) > value)
2283 return -EINVAL;
2284 }
2285
2286 return 0;
2287}
2288
b0a873eb 2289static struct pmu pmu = {
d010b332
SE
2290 .pmu_enable = x86_pmu_enable,
2291 .pmu_disable = x86_pmu_disable,
a4eaf7f1 2292
c93dc84c 2293 .attr_groups = x86_pmu_attr_groups,
0c9d42ed 2294
c93dc84c 2295 .event_init = x86_pmu_event_init,
a4eaf7f1 2296
7911d3f7
AL
2297 .event_mapped = x86_pmu_event_mapped,
2298 .event_unmapped = x86_pmu_event_unmapped,
2299
d010b332
SE
2300 .add = x86_pmu_add,
2301 .del = x86_pmu_del,
2302 .start = x86_pmu_start,
2303 .stop = x86_pmu_stop,
2304 .read = x86_pmu_read,
a4eaf7f1 2305
c93dc84c
PZ
2306 .start_txn = x86_pmu_start_txn,
2307 .cancel_txn = x86_pmu_cancel_txn,
2308 .commit_txn = x86_pmu_commit_txn,
fe4a3308 2309
c93dc84c 2310 .event_idx = x86_pmu_event_idx,
ba532500 2311 .sched_task = x86_pmu_sched_task,
e18bf526 2312 .task_ctx_size = sizeof(struct x86_perf_task_context),
81ec3f3c 2313 .check_period = x86_pmu_check_period,
b0a873eb
PZ
2314};
2315
c1317ec2
AL
2316void arch_perf_update_userpage(struct perf_event *event,
2317 struct perf_event_mmap_page *userpg, u64 now)
e3f3541c 2318{
59eaef78 2319 struct cyc2ns_data data;
698eff63 2320 u64 offset;
20d1c86a 2321
fa731587
PZ
2322 userpg->cap_user_time = 0;
2323 userpg->cap_user_time_zero = 0;
7911d3f7
AL
2324 userpg->cap_user_rdpmc =
2325 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
c7206205
PZ
2326 userpg->pmc_width = x86_pmu.cntval_bits;
2327
698eff63 2328 if (!using_native_sched_clock() || !sched_clock_stable())
e3f3541c
PZ
2329 return;
2330
59eaef78 2331 cyc2ns_read_begin(&data);
20d1c86a 2332
59eaef78 2333 offset = data.cyc2ns_offset + __sched_clock_offset;
698eff63 2334
34f43927
PZ
2335 /*
2336 * Internal timekeeping for enabled/running/stopped times
2337 * is always in the local_clock domain.
2338 */
fa731587 2339 userpg->cap_user_time = 1;
59eaef78
PZ
2340 userpg->time_mult = data.cyc2ns_mul;
2341 userpg->time_shift = data.cyc2ns_shift;
698eff63 2342 userpg->time_offset = offset - now;
c73deb6a 2343
34f43927
PZ
2344 /*
2345 * cap_user_time_zero doesn't make sense when we're using a different
2346 * time base for the records.
2347 */
f454bfdd 2348 if (!event->attr.use_clockid) {
34f43927 2349 userpg->cap_user_time_zero = 1;
698eff63 2350 userpg->time_zero = offset;
34f43927 2351 }
20d1c86a 2352
59eaef78 2353 cyc2ns_read_end();
e3f3541c
PZ
2354}
2355
56962b44 2356void
cfbcf468 2357perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
d7d59fb3 2358{
35f4d9b3
JP
2359 struct unwind_state state;
2360 unsigned long addr;
2361
927c7a9e
FW
2362 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2363 /* TODO: We don't support guest os callchain now */
ed805261 2364 return;
927c7a9e
FW
2365 }
2366
019e579d
JP
2367 if (perf_callchain_store(entry, regs->ip))
2368 return;
d7d59fb3 2369
35f4d9b3
JP
2370 for (unwind_start(&state, current, regs, NULL); !unwind_done(&state);
2371 unwind_next_frame(&state)) {
2372 addr = unwind_get_return_address(&state);
2373 if (!addr || perf_callchain_store(entry, addr))
2374 return;
2375 }
d7d59fb3
PZ
2376}
2377
bc6ca7b3
AS
2378static inline int
2379valid_user_frame(const void __user *fp, unsigned long size)
2380{
2381 return (__range_not_ok(fp, size, TASK_SIZE) == 0);
2382}
2383
d07bdfd3
PZ
2384static unsigned long get_segment_base(unsigned int segment)
2385{
2386 struct desc_struct *desc;
990e9dc3 2387 unsigned int idx = segment >> 3;
d07bdfd3
PZ
2388
2389 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
a5b9e5a2 2390#ifdef CONFIG_MODIFY_LDT_SYSCALL
37868fe1
AL
2391 struct ldt_struct *ldt;
2392
37868fe1 2393 /* IRQs are off, so this synchronizes with smp_store_release */
506458ef 2394 ldt = READ_ONCE(current->active_mm->context.ldt);
eaa2f87c 2395 if (!ldt || idx >= ldt->nr_entries)
d07bdfd3
PZ
2396 return 0;
2397
37868fe1 2398 desc = &ldt->entries[idx];
a5b9e5a2
AL
2399#else
2400 return 0;
2401#endif
d07bdfd3 2402 } else {
eaa2f87c 2403 if (idx >= GDT_ENTRIES)
d07bdfd3
PZ
2404 return 0;
2405
37868fe1 2406 desc = raw_cpu_ptr(gdt_page.gdt) + idx;
d07bdfd3
PZ
2407 }
2408
37868fe1 2409 return get_desc_base(desc);
d07bdfd3
PZ
2410}
2411
10ed3493 2412#ifdef CONFIG_IA32_EMULATION
d1a797f3 2413
0d55303c 2414#include <linux/compat.h>
d1a797f3 2415
257ef9d2 2416static inline int
cfbcf468 2417perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
74193ef0 2418{
257ef9d2 2419 /* 32-bit process in 64-bit kernel. */
d07bdfd3 2420 unsigned long ss_base, cs_base;
257ef9d2
TE
2421 struct stack_frame_ia32 frame;
2422 const void __user *fp;
74193ef0 2423
257ef9d2
TE
2424 if (!test_thread_flag(TIF_IA32))
2425 return 0;
2426
d07bdfd3
PZ
2427 cs_base = get_segment_base(regs->cs);
2428 ss_base = get_segment_base(regs->ss);
2429
2430 fp = compat_ptr(ss_base + regs->bp);
75925e1a 2431 pagefault_disable();
3b1fff08 2432 while (entry->nr < entry->max_stack) {
257ef9d2
TE
2433 unsigned long bytes;
2434 frame.next_frame = 0;
2435 frame.return_address = 0;
2436
ae31fe51 2437 if (!valid_user_frame(fp, sizeof(frame)))
75925e1a
AK
2438 break;
2439
2440 bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
2441 if (bytes != 0)
2442 break;
2443 bytes = __copy_from_user_nmi(&frame.return_address, fp+4, 4);
0a196848 2444 if (bytes != 0)
257ef9d2 2445 break;
74193ef0 2446
d07bdfd3
PZ
2447 perf_callchain_store(entry, cs_base + frame.return_address);
2448 fp = compat_ptr(ss_base + frame.next_frame);
257ef9d2 2449 }
75925e1a 2450 pagefault_enable();
257ef9d2 2451 return 1;
d7d59fb3 2452}
257ef9d2
TE
2453#else
2454static inline int
cfbcf468 2455perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
257ef9d2
TE
2456{
2457 return 0;
2458}
2459#endif
d7d59fb3 2460
56962b44 2461void
cfbcf468 2462perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
d7d59fb3
PZ
2463{
2464 struct stack_frame frame;
fc188225 2465 const unsigned long __user *fp;
d7d59fb3 2466
927c7a9e
FW
2467 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2468 /* TODO: We don't support guest os callchain now */
ed805261 2469 return;
927c7a9e 2470 }
5a6cec3a 2471
d07bdfd3
PZ
2472 /*
2473 * We don't know what to do with VM86 stacks.. ignore them for now.
2474 */
2475 if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
2476 return;
2477
fc188225 2478 fp = (unsigned long __user *)regs->bp;
d7d59fb3 2479
70791ce9 2480 perf_callchain_store(entry, regs->ip);
d7d59fb3 2481
4012e77a 2482 if (!nmi_uaccess_okay())
20afc60f
AV
2483 return;
2484
257ef9d2
TE
2485 if (perf_callchain_user32(regs, entry))
2486 return;
2487
75925e1a 2488 pagefault_disable();
3b1fff08 2489 while (entry->nr < entry->max_stack) {
257ef9d2 2490 unsigned long bytes;
fc188225 2491
038e836e 2492 frame.next_frame = NULL;
d7d59fb3
PZ
2493 frame.return_address = 0;
2494
ae31fe51 2495 if (!valid_user_frame(fp, sizeof(frame)))
75925e1a
AK
2496 break;
2497
fc188225 2498 bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
75925e1a
AK
2499 if (bytes != 0)
2500 break;
fc188225 2501 bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
0a196848 2502 if (bytes != 0)
d7d59fb3
PZ
2503 break;
2504
70791ce9 2505 perf_callchain_store(entry, frame.return_address);
75925e1a 2506 fp = (void __user *)frame.next_frame;
d7d59fb3 2507 }
75925e1a 2508 pagefault_enable();
d7d59fb3
PZ
2509}
2510
d07bdfd3
PZ
2511/*
2512 * Deal with code segment offsets for the various execution modes:
2513 *
2514 * VM86 - the good olde 16 bit days, where the linear address is
2515 * 20 bits and we use regs->ip + 0x10 * regs->cs.
2516 *
2517 * IA32 - Where we need to look at GDT/LDT segment descriptor tables
2518 * to figure out what the 32bit base address is.
2519 *
2520 * X32 - has TIF_X32 set, but is running in x86_64
2521 *
2522 * X86_64 - CS,DS,SS,ES are all zero based.
2523 */
2524static unsigned long code_segment_base(struct pt_regs *regs)
39447b38 2525{
383f3af3
AL
2526 /*
2527 * For IA32 we look at the GDT/LDT segment base to convert the
2528 * effective IP to a linear address.
2529 */
2530
2531#ifdef CONFIG_X86_32
d07bdfd3
PZ
2532 /*
2533 * If we are in VM86 mode, add the segment offset to convert to a
2534 * linear address.
2535 */
2536 if (regs->flags & X86_VM_MASK)
2537 return 0x10 * regs->cs;
2538
55474c48 2539 if (user_mode(regs) && regs->cs != __USER_CS)
d07bdfd3
PZ
2540 return get_segment_base(regs->cs);
2541#else
c56716af
AL
2542 if (user_mode(regs) && !user_64bit_mode(regs) &&
2543 regs->cs != __USER32_CS)
2544 return get_segment_base(regs->cs);
d07bdfd3
PZ
2545#endif
2546 return 0;
2547}
dcf46b94 2548
d07bdfd3
PZ
2549unsigned long perf_instruction_pointer(struct pt_regs *regs)
2550{
39447b38 2551 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
d07bdfd3 2552 return perf_guest_cbs->get_guest_ip();
dcf46b94 2553
d07bdfd3 2554 return regs->ip + code_segment_base(regs);
39447b38
ZY
2555}
2556
2557unsigned long perf_misc_flags(struct pt_regs *regs)
2558{
2559 int misc = 0;
dcf46b94 2560
39447b38 2561 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
dcf46b94
ZY
2562 if (perf_guest_cbs->is_user_mode())
2563 misc |= PERF_RECORD_MISC_GUEST_USER;
2564 else
2565 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
2566 } else {
d07bdfd3 2567 if (user_mode(regs))
dcf46b94
ZY
2568 misc |= PERF_RECORD_MISC_USER;
2569 else
2570 misc |= PERF_RECORD_MISC_KERNEL;
2571 }
2572
39447b38 2573 if (regs->flags & PERF_EFLAGS_EXACT)
ab608344 2574 misc |= PERF_RECORD_MISC_EXACT_IP;
39447b38
ZY
2575
2576 return misc;
2577}
b3d9468a
GN
2578
2579void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
2580{
2581 cap->version = x86_pmu.version;
2582 cap->num_counters_gp = x86_pmu.num_counters;
2583 cap->num_counters_fixed = x86_pmu.num_counters_fixed;
2584 cap->bit_width_gp = x86_pmu.cntval_bits;
2585 cap->bit_width_fixed = x86_pmu.cntval_bits;
2586 cap->events_mask = (unsigned int)x86_pmu.events_maskl;
2587 cap->events_mask_len = x86_pmu.events_mask_len;
2588}
2589EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);