1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/perf_event.h>
3 #include <linux/jump_label.h>
4 #include <linux/export.h>
5 #include <linux/types.h>
6 #include <linux/init.h>
7 #include <linux/slab.h>
8 #include <linux/delay.h>
9 #include <linux/jiffies.h>
10 #include <asm/apicdef.h>
14 #include "../perf_event.h"
16 static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp);
17 static unsigned long perf_nmi_window;
19 /* AMD Event 0xFFF: Merge. Used with Large Increment per Cycle events */
20 #define AMD_MERGE_EVENT ((0xFULL << 32) | 0xFFULL)
21 #define AMD_MERGE_EVENT_ENABLE (AMD_MERGE_EVENT | ARCH_PERFMON_EVENTSEL_ENABLE)
23 /* PMC Enable and Overflow bits for PerfCntrGlobal* registers */
24 static u64 amd_pmu_global_cntr_mask __read_mostly;
26 static __initconst const u64 amd_hw_cache_event_ids
27 [PERF_COUNT_HW_CACHE_MAX]
28 [PERF_COUNT_HW_CACHE_OP_MAX]
29 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
33 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
34 [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */
37 [ C(RESULT_ACCESS) ] = 0,
38 [ C(RESULT_MISS) ] = 0,
40 [ C(OP_PREFETCH) ] = {
41 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
42 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
47 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
48 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
51 [ C(RESULT_ACCESS) ] = -1,
52 [ C(RESULT_MISS) ] = -1,
54 [ C(OP_PREFETCH) ] = {
55 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
56 [ C(RESULT_MISS) ] = 0,
61 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
62 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
65 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
66 [ C(RESULT_MISS) ] = 0,
68 [ C(OP_PREFETCH) ] = {
69 [ C(RESULT_ACCESS) ] = 0,
70 [ C(RESULT_MISS) ] = 0,
75 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
76 [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
79 [ C(RESULT_ACCESS) ] = 0,
80 [ C(RESULT_MISS) ] = 0,
82 [ C(OP_PREFETCH) ] = {
83 [ C(RESULT_ACCESS) ] = 0,
84 [ C(RESULT_MISS) ] = 0,
89 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
90 [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
93 [ C(RESULT_ACCESS) ] = -1,
94 [ C(RESULT_MISS) ] = -1,
96 [ C(OP_PREFETCH) ] = {
97 [ C(RESULT_ACCESS) ] = -1,
98 [ C(RESULT_MISS) ] = -1,
103 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
104 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
107 [ C(RESULT_ACCESS) ] = -1,
108 [ C(RESULT_MISS) ] = -1,
110 [ C(OP_PREFETCH) ] = {
111 [ C(RESULT_ACCESS) ] = -1,
112 [ C(RESULT_MISS) ] = -1,
117 [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
118 [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */
121 [ C(RESULT_ACCESS) ] = -1,
122 [ C(RESULT_MISS) ] = -1,
124 [ C(OP_PREFETCH) ] = {
125 [ C(RESULT_ACCESS) ] = -1,
126 [ C(RESULT_MISS) ] = -1,
131 static __initconst const u64 amd_hw_cache_event_ids_f17h
132 [PERF_COUNT_HW_CACHE_MAX]
133 [PERF_COUNT_HW_CACHE_OP_MAX]
134 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
137 [C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */
138 [C(RESULT_MISS)] = 0xc860, /* L2$ access from DC Miss */
141 [C(RESULT_ACCESS)] = 0,
142 [C(RESULT_MISS)] = 0,
145 [C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */
146 [C(RESULT_MISS)] = 0,
151 [C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches */
152 [C(RESULT_MISS)] = 0x0081, /* Instruction cache misses */
155 [C(RESULT_ACCESS)] = -1,
156 [C(RESULT_MISS)] = -1,
159 [C(RESULT_ACCESS)] = 0,
160 [C(RESULT_MISS)] = 0,
165 [C(RESULT_ACCESS)] = 0,
166 [C(RESULT_MISS)] = 0,
169 [C(RESULT_ACCESS)] = 0,
170 [C(RESULT_MISS)] = 0,
173 [C(RESULT_ACCESS)] = 0,
174 [C(RESULT_MISS)] = 0,
179 [C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */
180 [C(RESULT_MISS)] = 0xf045, /* L2 DTLB misses (PT walks) */
183 [C(RESULT_ACCESS)] = 0,
184 [C(RESULT_MISS)] = 0,
187 [C(RESULT_ACCESS)] = 0,
188 [C(RESULT_MISS)] = 0,
193 [C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */
194 [C(RESULT_MISS)] = 0xff85, /* L1 ITLB misses, L2 misses */
197 [C(RESULT_ACCESS)] = -1,
198 [C(RESULT_MISS)] = -1,
201 [C(RESULT_ACCESS)] = -1,
202 [C(RESULT_MISS)] = -1,
207 [C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr. */
208 [C(RESULT_MISS)] = 0x00c3, /* Retired Mispredicted BI */
211 [C(RESULT_ACCESS)] = -1,
212 [C(RESULT_MISS)] = -1,
215 [C(RESULT_ACCESS)] = -1,
216 [C(RESULT_MISS)] = -1,
221 [C(RESULT_ACCESS)] = 0,
222 [C(RESULT_MISS)] = 0,
225 [C(RESULT_ACCESS)] = -1,
226 [C(RESULT_MISS)] = -1,
229 [C(RESULT_ACCESS)] = -1,
230 [C(RESULT_MISS)] = -1,
236 * AMD Performance Monitor K7 and later, up to and including Family 16h:
238 static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
240 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
241 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
242 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
243 [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
244 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
245 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
246 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
247 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
251 * AMD Performance Monitor Family 17h and later:
253 static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
255 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
256 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
257 [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
258 [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
259 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
260 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
261 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
262 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
265 static u64 amd_pmu_event_map(int hw_event)
267 if (boot_cpu_data.x86 >= 0x17)
268 return amd_f17h_perfmon_event_map[hw_event];
270 return amd_perfmon_event_map[hw_event];
274 * Previously calculated offsets
276 static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
277 static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
281 * 4 counters starting at 0xc0010000 each offset by 1
283 * CPUs with core performance counter extensions:
284 * 6 counters starting at 0xc0010200 each offset by 2
286 static inline int amd_pmu_addr_offset(int index, bool eventsel)
294 offset = event_offsets[index];
296 offset = count_offsets[index];
301 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
307 event_offsets[index] = offset;
309 count_offsets[index] = offset;
315 * AMD64 events are detected based on their event codes.
317 static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
319 return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
322 static inline bool amd_is_pair_event_code(struct hw_perf_event *hwc)
324 if (!(x86_pmu.flags & PMU_FL_PAIR))
327 switch (amd_get_event_code(hwc)) {
328 case 0x003: return true; /* Retired SSE/AVX FLOPs */
329 default: return false;
333 DEFINE_STATIC_CALL_RET0(amd_pmu_branch_hw_config, *x86_pmu.hw_config);
335 static int amd_core_hw_config(struct perf_event *event)
337 if (event->attr.exclude_host && event->attr.exclude_guest)
339 * When HO == GO == 1 the hardware treats that as GO == HO == 0
340 * and will count in both modes. We don't want to count in that
341 * case so we emulate no-counting by setting US = OS = 0.
343 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
344 ARCH_PERFMON_EVENTSEL_OS);
345 else if (event->attr.exclude_host)
346 event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
347 else if (event->attr.exclude_guest)
348 event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
350 if ((x86_pmu.flags & PMU_FL_PAIR) && amd_is_pair_event_code(&event->hw))
351 event->hw.flags |= PERF_X86_EVENT_PAIR;
353 if (has_branch_stack(event))
354 return static_call(amd_pmu_branch_hw_config)(event);
359 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
361 return (hwc->config & 0xe0) == 0xe0;
364 static inline int amd_has_nb(struct cpu_hw_events *cpuc)
366 struct amd_nb *nb = cpuc->amd_nb;
368 return nb && nb->nb_id != -1;
371 static int amd_pmu_hw_config(struct perf_event *event)
375 /* pass precise event sampling to ibs: */
376 if (event->attr.precise_ip && get_ibs_caps())
379 if (has_branch_stack(event) && !x86_pmu.lbr_nr)
382 ret = x86_pmu_hw_config(event);
386 if (event->attr.type == PERF_TYPE_RAW)
387 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
389 return amd_core_hw_config(event);
392 static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
393 struct perf_event *event)
395 struct amd_nb *nb = cpuc->amd_nb;
399 * need to scan whole list because event may not have
400 * been assigned during scheduling
402 * no race condition possible because event can only
403 * be removed on one CPU at a time AND PMU is disabled
406 for (i = 0; i < x86_pmu.num_counters; i++) {
407 if (cmpxchg(nb->owners + i, event, NULL) == event)
413 * AMD64 NorthBridge events need special treatment because
414 * counter access needs to be synchronized across all cores
415 * of a package. Refer to BKDG section 3.12
417 * NB events are events measuring L3 cache, Hypertransport
418 * traffic. They are identified by an event code >= 0xe00.
419 * They measure events on the NorthBride which is shared
420 * by all cores on a package. NB events are counted on a
421 * shared set of counters. When a NB event is programmed
422 * in a counter, the data actually comes from a shared
423 * counter. Thus, access to those counters needs to be
426 * We implement the synchronization such that no two cores
427 * can be measuring NB events using the same counters. Thus,
428 * we maintain a per-NB allocation table. The available slot
429 * is propagated using the event_constraint structure.
431 * We provide only one choice for each NB event based on
432 * the fact that only NB events have restrictions. Consequently,
433 * if a counter is available, there is a guarantee the NB event
434 * will be assigned to it. If no slot is available, an empty
435 * constraint is returned and scheduling will eventually fail
438 * Note that all cores attached the same NB compete for the same
439 * counters to host NB events, this is why we use atomic ops. Some
440 * multi-chip CPUs may have more than one NB.
442 * Given that resources are allocated (cmpxchg), they must be
443 * eventually freed for others to use. This is accomplished by
444 * calling __amd_put_nb_event_constraints()
446 * Non NB events are not impacted by this restriction.
448 static struct event_constraint *
449 __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
450 struct event_constraint *c)
452 struct hw_perf_event *hwc = &event->hw;
453 struct amd_nb *nb = cpuc->amd_nb;
454 struct perf_event *old;
464 * detect if already present, if so reuse
466 * cannot merge with actual allocation
467 * because of possible holes
469 * event can already be present yet not assigned (in hwc->idx)
470 * because of successive calls to x86_schedule_events() from
471 * hw_perf_group_sched_in() without hw_perf_enable()
473 for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
474 if (new == -1 || hwc->idx == idx)
475 /* assign free slot, prefer hwc->idx */
476 old = cmpxchg(nb->owners + idx, NULL, event);
477 else if (nb->owners[idx] == event)
478 /* event already present */
483 if (old && old != event)
486 /* reassign to this slot */
488 cmpxchg(nb->owners + new, event, NULL);
491 /* already present, reuse */
497 return &emptyconstraint;
499 return &nb->event_constraints[new];
502 static struct amd_nb *amd_alloc_nb(int cpu)
507 nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu));
514 * initialize all possible NB constraints
516 for (i = 0; i < x86_pmu.num_counters; i++) {
517 __set_bit(i, nb->event_constraints[i].idxmsk);
518 nb->event_constraints[i].weight = 1;
523 typedef void (amd_pmu_branch_reset_t)(void);
524 DEFINE_STATIC_CALL_NULL(amd_pmu_branch_reset, amd_pmu_branch_reset_t);
526 static void amd_pmu_cpu_reset(int cpu)
529 static_call(amd_pmu_branch_reset)();
531 if (x86_pmu.version < 2)
534 /* Clear enable bits i.e. PerfCntrGlobalCtl.PerfCntrEn */
535 wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);
537 /* Clear overflow bits i.e. PerfCntrGLobalStatus.PerfCntrOvfl */
538 wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, amd_pmu_global_cntr_mask);
541 static int amd_pmu_cpu_prepare(int cpu)
543 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
545 cpuc->lbr_sel = kzalloc_node(sizeof(struct er_account), GFP_KERNEL,
550 WARN_ON_ONCE(cpuc->amd_nb);
552 if (!x86_pmu.amd_nb_constraints)
555 cpuc->amd_nb = amd_alloc_nb(cpu);
559 kfree(cpuc->lbr_sel);
560 cpuc->lbr_sel = NULL;
565 static void amd_pmu_cpu_starting(int cpu)
567 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
568 void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
572 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
574 if (!x86_pmu.amd_nb_constraints)
577 nb_id = topology_die_id(cpu);
578 WARN_ON_ONCE(nb_id == BAD_APICID);
580 for_each_online_cpu(i) {
581 nb = per_cpu(cpu_hw_events, i).amd_nb;
582 if (WARN_ON_ONCE(!nb))
585 if (nb->nb_id == nb_id) {
586 *onln = cpuc->amd_nb;
592 cpuc->amd_nb->nb_id = nb_id;
593 cpuc->amd_nb->refcnt++;
595 amd_pmu_cpu_reset(cpu);
598 static void amd_pmu_cpu_dead(int cpu)
600 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
602 kfree(cpuhw->lbr_sel);
603 cpuhw->lbr_sel = NULL;
605 if (!x86_pmu.amd_nb_constraints)
609 struct amd_nb *nb = cpuhw->amd_nb;
611 if (nb->nb_id == -1 || --nb->refcnt == 0)
614 cpuhw->amd_nb = NULL;
617 amd_pmu_cpu_reset(cpu);
620 static inline void amd_pmu_set_global_ctl(u64 ctl)
622 wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl);
625 static inline u64 amd_pmu_get_global_status(void)
629 /* PerfCntrGlobalStatus is read-only */
630 rdmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, status);
635 static inline void amd_pmu_ack_global_status(u64 status)
638 * PerfCntrGlobalStatus is read-only but an overflow acknowledgment
639 * mechanism exists; writing 1 to a bit in PerfCntrGlobalStatusClr
640 * clears the same bit in PerfCntrGlobalStatus
643 wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, status);
646 static bool amd_pmu_test_overflow_topbit(int idx)
650 rdmsrl(x86_pmu_event_addr(idx), counter);
652 return !(counter & BIT_ULL(x86_pmu.cntval_bits - 1));
655 static bool amd_pmu_test_overflow_status(int idx)
657 return amd_pmu_get_global_status() & BIT_ULL(idx);
660 DEFINE_STATIC_CALL(amd_pmu_test_overflow, amd_pmu_test_overflow_topbit);
663 * When a PMC counter overflows, an NMI is used to process the event and
664 * reset the counter. NMI latency can result in the counter being updated
665 * before the NMI can run, which can result in what appear to be spurious
666 * NMIs. This function is intended to wait for the NMI to run and reset
667 * the counter to avoid possible unhandled NMI messages.
669 #define OVERFLOW_WAIT_COUNT 50
671 static void amd_pmu_wait_on_overflow(int idx)
676 * Wait for the counter to be reset if it has overflowed. This loop
677 * should exit very, very quickly, but just in case, don't wait
680 for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
681 if (!static_call(amd_pmu_test_overflow)(idx))
684 /* Might be in IRQ context, so can't sleep */
689 static void amd_pmu_check_overflow(void)
691 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
695 * This shouldn't be called from NMI context, but add a safeguard here
696 * to return, since if we're in NMI context we can't wait for an NMI
697 * to reset an overflowed counter value.
703 * Check each counter for overflow and wait for it to be reset by the
704 * NMI if it has overflowed. This relies on the fact that all active
705 * counters are always enabled when this function is called and
706 * ARCH_PERFMON_EVENTSEL_INT is always set.
708 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
709 if (!test_bit(idx, cpuc->active_mask))
712 amd_pmu_wait_on_overflow(idx);
716 static void amd_pmu_enable_event(struct perf_event *event)
718 x86_pmu_enable_event(event);
721 static void amd_pmu_enable_all(int added)
723 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
726 amd_brs_enable_all();
728 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
729 /* only activate events which are marked as active */
730 if (!test_bit(idx, cpuc->active_mask))
733 amd_pmu_enable_event(cpuc->events[idx]);
737 static void amd_pmu_v2_enable_event(struct perf_event *event)
739 struct hw_perf_event *hwc = &event->hw;
742 * Testing cpu_hw_events.enabled should be skipped in this case unlike
743 * in x86_pmu_enable_event().
745 * Since cpu_hw_events.enabled is set only after returning from
746 * x86_pmu_start(), the PMCs must be programmed and kept ready.
747 * Counting starts only after x86_pmu_enable_all() is called.
749 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
752 static __always_inline void amd_pmu_core_enable_all(void)
754 amd_pmu_set_global_ctl(amd_pmu_global_cntr_mask);
757 static void amd_pmu_v2_enable_all(int added)
759 amd_pmu_lbr_enable_all();
760 amd_pmu_core_enable_all();
763 static void amd_pmu_disable_event(struct perf_event *event)
765 x86_pmu_disable_event(event);
768 * This can be called from NMI context (via x86_pmu_stop). The counter
769 * may have overflowed, but either way, we'll never see it get reset
770 * by the NMI if we're already in the NMI. And the NMI latency support
771 * below will take care of any pending NMI that might have been
772 * generated by the overflow.
777 amd_pmu_wait_on_overflow(event->hw.idx);
780 static void amd_pmu_disable_all(void)
782 amd_brs_disable_all();
783 x86_pmu_disable_all();
784 amd_pmu_check_overflow();
787 static __always_inline void amd_pmu_core_disable_all(void)
789 amd_pmu_set_global_ctl(0);
792 static void amd_pmu_v2_disable_all(void)
794 amd_pmu_core_disable_all();
795 amd_pmu_lbr_disable_all();
796 amd_pmu_check_overflow();
799 DEFINE_STATIC_CALL_NULL(amd_pmu_branch_add, *x86_pmu.add);
801 static void amd_pmu_add_event(struct perf_event *event)
803 if (needs_branch_stack(event))
804 static_call(amd_pmu_branch_add)(event);
807 DEFINE_STATIC_CALL_NULL(amd_pmu_branch_del, *x86_pmu.del);
809 static void amd_pmu_del_event(struct perf_event *event)
811 if (needs_branch_stack(event))
812 static_call(amd_pmu_branch_del)(event);
816 * Because of NMI latency, if multiple PMC counters are active or other sources
817 * of NMIs are received, the perf NMI handler can handle one or more overflowed
818 * PMC counters outside of the NMI associated with the PMC overflow. If the NMI
819 * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
820 * back-to-back NMI support won't be active. This PMC handler needs to take into
821 * account that this can occur, otherwise this could result in unknown NMI
822 * messages being issued. Examples of this is PMC overflow while in the NMI
823 * handler when multiple PMCs are active or PMC overflow while handling some
824 * other source of an NMI.
826 * Attempt to mitigate this by creating an NMI window in which un-handled NMIs
827 * received during this window will be claimed. This prevents extending the
828 * window past when it is possible that latent NMIs should be received. The
829 * per-CPU perf_nmi_tstamp will be set to the window end time whenever perf has
830 * handled a counter. When an un-handled NMI is received, it will be claimed
831 * only if arriving within that window.
833 static inline int amd_pmu_adjust_nmi_window(int handled)
836 * If a counter was handled, record a timestamp such that un-handled
837 * NMIs will be claimed if arriving within that window.
840 this_cpu_write(perf_nmi_tstamp, jiffies + perf_nmi_window);
845 if (time_after(jiffies, this_cpu_read(perf_nmi_tstamp)))
851 static int amd_pmu_handle_irq(struct pt_regs *regs)
853 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
858 * Save the PMU state.
859 * It needs to be restored when leaving the handler.
861 pmu_enabled = cpuc->enabled;
864 amd_brs_disable_all();
866 /* Drain BRS is in use (could be inactive) */
870 /* Process any counter overflows */
871 handled = x86_pmu_handle_irq(regs);
873 cpuc->enabled = pmu_enabled;
875 amd_brs_enable_all();
877 return amd_pmu_adjust_nmi_window(handled);
880 static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
882 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
883 struct perf_sample_data data;
884 struct hw_perf_event *hwc;
885 struct perf_event *event;
886 int handled = 0, idx;
891 * Save the PMU state as it needs to be restored when leaving the
894 pmu_enabled = cpuc->enabled;
897 /* Stop counting but do not disable LBR */
898 amd_pmu_core_disable_all();
900 status = amd_pmu_get_global_status();
902 /* Check if any overflows are pending */
906 /* Read branch records before unfreezing */
907 if (status & GLOBAL_STATUS_LBRS_FROZEN) {
909 status &= ~GLOBAL_STATUS_LBRS_FROZEN;
912 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
913 if (!test_bit(idx, cpuc->active_mask))
916 event = cpuc->events[idx];
918 x86_perf_event_update(event);
921 if (!(status & mask))
926 perf_sample_data_init(&data, 0, hwc->last_period);
928 if (!x86_perf_event_set_period(event))
931 if (has_branch_stack(event)) {
932 data.br_stack = &cpuc->lbr_stack;
933 data.sample_flags |= PERF_SAMPLE_BRANCH_STACK;
936 if (perf_event_overflow(event, &data, regs))
937 x86_pmu_stop(event, 0);
943 * It should never be the case that some overflows are not handled as
944 * the corresponding PMCs are expected to be inactive according to the
949 /* Clear overflow and freeze bits */
950 amd_pmu_ack_global_status(~status);
953 * Unmasking the LVTPC is not required as the Mask (M) bit of the LVT
954 * PMI entry is not set by the local APIC when a PMC overflow occurs
956 inc_irq_stat(apic_perf_irqs);
959 cpuc->enabled = pmu_enabled;
961 /* Resume counting only if PMU is active */
963 amd_pmu_core_enable_all();
965 return amd_pmu_adjust_nmi_window(handled);
968 static struct event_constraint *
969 amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
970 struct perf_event *event)
973 * if not NB event or no NB, then no constraints
975 if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
976 return &unconstrained;
978 return __amd_get_nb_event_constraints(cpuc, event, NULL);
981 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
982 struct perf_event *event)
984 if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
985 __amd_put_nb_event_constraints(cpuc, event);
988 PMU_FORMAT_ATTR(event, "config:0-7,32-35");
989 PMU_FORMAT_ATTR(umask, "config:8-15" );
990 PMU_FORMAT_ATTR(edge, "config:18" );
991 PMU_FORMAT_ATTR(inv, "config:23" );
992 PMU_FORMAT_ATTR(cmask, "config:24-31" );
994 static struct attribute *amd_format_attr[] = {
995 &format_attr_event.attr,
996 &format_attr_umask.attr,
997 &format_attr_edge.attr,
998 &format_attr_inv.attr,
999 &format_attr_cmask.attr,
1003 /* AMD Family 15h */
1005 #define AMD_EVENT_TYPE_MASK 0x000000F0ULL
1007 #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
1008 #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
1009 #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
1010 #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
1011 #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
1012 #define AMD_EVENT_EX_LS 0x000000C0ULL
1013 #define AMD_EVENT_DE 0x000000D0ULL
1014 #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
1017 * AMD family 15h event code/PMC mappings:
1019 * type = event_code & 0x0F0:
1021 * 0x000 FP PERF_CTL[5:3]
1022 * 0x010 FP PERF_CTL[5:3]
1023 * 0x020 LS PERF_CTL[5:0]
1024 * 0x030 LS PERF_CTL[5:0]
1025 * 0x040 DC PERF_CTL[5:0]
1026 * 0x050 DC PERF_CTL[5:0]
1027 * 0x060 CU PERF_CTL[2:0]
1028 * 0x070 CU PERF_CTL[2:0]
1029 * 0x080 IC/DE PERF_CTL[2:0]
1030 * 0x090 IC/DE PERF_CTL[2:0]
1033 * 0x0C0 EX/LS PERF_CTL[5:0]
1034 * 0x0D0 DE PERF_CTL[2:0]
1035 * 0x0E0 NB NB_PERF_CTL[3:0]
1036 * 0x0F0 NB NB_PERF_CTL[3:0]
1040 * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
1041 * 0x003 FP PERF_CTL[3]
1042 * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
1043 * 0x00B FP PERF_CTL[3]
1044 * 0x00D FP PERF_CTL[3]
1045 * 0x023 DE PERF_CTL[2:0]
1046 * 0x02D LS PERF_CTL[3]
1047 * 0x02E LS PERF_CTL[3,0]
1048 * 0x031 LS PERF_CTL[2:0] (**)
1049 * 0x043 CU PERF_CTL[2:0]
1050 * 0x045 CU PERF_CTL[2:0]
1051 * 0x046 CU PERF_CTL[2:0]
1052 * 0x054 CU PERF_CTL[2:0]
1053 * 0x055 CU PERF_CTL[2:0]
1054 * 0x08F IC PERF_CTL[0]
1055 * 0x187 DE PERF_CTL[0]
1056 * 0x188 DE PERF_CTL[0]
1057 * 0x0DB EX PERF_CTL[5:0]
1058 * 0x0DC LS PERF_CTL[5:0]
1059 * 0x0DD LS PERF_CTL[5:0]
1060 * 0x0DE LS PERF_CTL[5:0]
1061 * 0x0DF LS PERF_CTL[5:0]
1062 * 0x1C0 EX PERF_CTL[5:3]
1063 * 0x1D6 EX PERF_CTL[5:0]
1064 * 0x1D8 EX PERF_CTL[5:0]
1066 * (*) depending on the umask all FPU counters may be used
1067 * (**) only one unitmask enabled at a time
1070 static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
1071 static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
1072 static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
1073 static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
1074 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
1075 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
1077 static struct event_constraint *
1078 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
1079 struct perf_event *event)
1081 struct hw_perf_event *hwc = &event->hw;
1082 unsigned int event_code = amd_get_event_code(hwc);
1084 switch (event_code & AMD_EVENT_TYPE_MASK) {
1086 switch (event_code) {
1088 if (!(hwc->config & 0x0000F000ULL))
1090 if (!(hwc->config & 0x00000F00ULL))
1092 return &amd_f15_PMC3;
1094 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
1096 return &amd_f15_PMC3;
1100 return &amd_f15_PMC3;
1102 return &amd_f15_PMC53;
1105 case AMD_EVENT_EX_LS:
1106 switch (event_code) {
1113 return &amd_f15_PMC20;
1115 return &amd_f15_PMC3;
1117 return &amd_f15_PMC30;
1119 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
1120 return &amd_f15_PMC20;
1121 return &emptyconstraint;
1123 return &amd_f15_PMC53;
1125 return &amd_f15_PMC50;
1128 case AMD_EVENT_IC_DE:
1130 switch (event_code) {
1134 return &amd_f15_PMC0;
1135 case 0x0DB ... 0x0DF:
1138 return &amd_f15_PMC50;
1140 return &amd_f15_PMC20;
1143 /* moved to uncore.c */
1144 return &emptyconstraint;
1146 return &emptyconstraint;
1150 static struct event_constraint pair_constraint;
1152 static struct event_constraint *
1153 amd_get_event_constraints_f17h(struct cpu_hw_events *cpuc, int idx,
1154 struct perf_event *event)
1156 struct hw_perf_event *hwc = &event->hw;
1158 if (amd_is_pair_event_code(hwc))
1159 return &pair_constraint;
1161 return &unconstrained;
1164 static void amd_put_event_constraints_f17h(struct cpu_hw_events *cpuc,
1165 struct perf_event *event)
1167 struct hw_perf_event *hwc = &event->hw;
1169 if (is_counter_pair(hwc))
1174 * Because of the way BRS operates with an inactive and active phases, and
1175 * the link to one counter, it is not possible to have two events using BRS
1176 * scheduled at the same time. There would be an issue with enforcing the
1177 * period of each one and given that the BRS saturates, it would not be possible
1178 * to guarantee correlated content for all events. Therefore, in situations
1179 * where multiple events want to use BRS, the kernel enforces mutual exclusion.
1180 * Exclusion is enforced by chosing only one counter for events using BRS.
1181 * The event scheduling logic will then automatically multiplex the
1182 * events and ensure that at most one event is actively using BRS.
1184 * The BRS counter could be any counter, but there is no constraint on Fam19h,
1185 * therefore all counters are equal and thus we pick the first one: PMC0
1187 static struct event_constraint amd_fam19h_brs_cntr0_constraint =
1188 EVENT_CONSTRAINT(0, 0x1, AMD64_RAW_EVENT_MASK);
1190 static struct event_constraint amd_fam19h_brs_pair_cntr0_constraint =
1191 __EVENT_CONSTRAINT(0, 0x1, AMD64_RAW_EVENT_MASK, 1, 0, PERF_X86_EVENT_PAIR);
1193 static struct event_constraint *
1194 amd_get_event_constraints_f19h(struct cpu_hw_events *cpuc, int idx,
1195 struct perf_event *event)
1197 struct hw_perf_event *hwc = &event->hw;
1198 bool has_brs = has_amd_brs(hwc);
1201 * In case BRS is used with an event requiring a counter pair,
1202 * the kernel allows it but only on counter 0 & 1 to enforce
1203 * multiplexing requiring to protect BRS in case of multiple
1206 if (amd_is_pair_event_code(hwc)) {
1207 return has_brs ? &amd_fam19h_brs_pair_cntr0_constraint
1212 return &amd_fam19h_brs_cntr0_constraint;
1214 return &unconstrained;
1218 static ssize_t amd_event_sysfs_show(char *page, u64 config)
1220 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) |
1221 (config & AMD64_EVENTSEL_EVENT) >> 24;
1223 return x86_event_sysfs_show(page, config, event);
1226 static void amd_pmu_limit_period(struct perf_event *event, s64 *left)
1229 * Decrease period by the depth of the BRS feature to get the last N
1230 * taken branches and approximate the desired period
1232 if (has_branch_stack(event) && *left > x86_pmu.lbr_nr)
1233 *left -= x86_pmu.lbr_nr;
1236 static __initconst const struct x86_pmu amd_pmu = {
1238 .handle_irq = amd_pmu_handle_irq,
1239 .disable_all = amd_pmu_disable_all,
1240 .enable_all = amd_pmu_enable_all,
1241 .enable = amd_pmu_enable_event,
1242 .disable = amd_pmu_disable_event,
1243 .hw_config = amd_pmu_hw_config,
1244 .schedule_events = x86_schedule_events,
1245 .eventsel = MSR_K7_EVNTSEL0,
1246 .perfctr = MSR_K7_PERFCTR0,
1247 .addr_offset = amd_pmu_addr_offset,
1248 .event_map = amd_pmu_event_map,
1249 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
1250 .num_counters = AMD64_NUM_COUNTERS,
1251 .add = amd_pmu_add_event,
1252 .del = amd_pmu_del_event,
1254 .cntval_mask = (1ULL << 48) - 1,
1256 /* use highest bit to detect overflow */
1257 .max_period = (1ULL << 47) - 1,
1258 .get_event_constraints = amd_get_event_constraints,
1259 .put_event_constraints = amd_put_event_constraints,
1261 .format_attrs = amd_format_attr,
1262 .events_sysfs_show = amd_event_sysfs_show,
1264 .cpu_prepare = amd_pmu_cpu_prepare,
1265 .cpu_starting = amd_pmu_cpu_starting,
1266 .cpu_dead = amd_pmu_cpu_dead,
1268 .amd_nb_constraints = 1,
1271 static ssize_t branches_show(struct device *cdev,
1272 struct device_attribute *attr,
1275 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
1278 static DEVICE_ATTR_RO(branches);
1280 static struct attribute *amd_pmu_branches_attrs[] = {
1281 &dev_attr_branches.attr,
1286 amd_branches_is_visible(struct kobject *kobj, struct attribute *attr, int i)
1288 return x86_pmu.lbr_nr ? attr->mode : 0;
1291 static struct attribute_group group_caps_amd_branches = {
1293 .attrs = amd_pmu_branches_attrs,
1294 .is_visible = amd_branches_is_visible,
1297 #ifdef CONFIG_PERF_EVENTS_AMD_BRS
1299 EVENT_ATTR_STR(branch-brs, amd_branch_brs,
1300 "event=" __stringify(AMD_FAM19H_BRS_EVENT)"\n");
1302 static struct attribute *amd_brs_events_attrs[] = {
1303 EVENT_PTR(amd_branch_brs),
1308 amd_brs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
1310 return static_cpu_has(X86_FEATURE_BRS) && x86_pmu.lbr_nr ?
1314 static struct attribute_group group_events_amd_brs = {
1316 .attrs = amd_brs_events_attrs,
1317 .is_visible = amd_brs_is_visible,
1320 #endif /* CONFIG_PERF_EVENTS_AMD_BRS */
1322 static const struct attribute_group *amd_attr_update[] = {
1323 &group_caps_amd_branches,
1324 #ifdef CONFIG_PERF_EVENTS_AMD_BRS
1325 &group_events_amd_brs,
1330 static int __init amd_core_pmu_init(void)
1332 union cpuid_0x80000022_ebx ebx;
1333 u64 even_ctr_mask = 0ULL;
1336 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
1339 /* Avoid calculating the value each time in the NMI handler */
1340 perf_nmi_window = msecs_to_jiffies(100);
1343 * If core performance counter extensions exists, we must use
1344 * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
1345 * amd_pmu_addr_offset().
1347 x86_pmu.eventsel = MSR_F15H_PERF_CTL;
1348 x86_pmu.perfctr = MSR_F15H_PERF_CTR;
1349 x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
1351 /* Check for Performance Monitoring v2 support */
1352 if (boot_cpu_has(X86_FEATURE_PERFMON_V2)) {
1353 ebx.full = cpuid_ebx(EXT_PERFMON_DEBUG_FEATURES);
1355 /* Update PMU version for later usage */
1356 x86_pmu.version = 2;
1358 /* Find the number of available Core PMCs */
1359 x86_pmu.num_counters = ebx.split.num_core_pmc;
1361 amd_pmu_global_cntr_mask = (1ULL << x86_pmu.num_counters) - 1;
1363 /* Update PMC handling functions */
1364 x86_pmu.enable_all = amd_pmu_v2_enable_all;
1365 x86_pmu.disable_all = amd_pmu_v2_disable_all;
1366 x86_pmu.enable = amd_pmu_v2_enable_event;
1367 x86_pmu.handle_irq = amd_pmu_v2_handle_irq;
1368 static_call_update(amd_pmu_test_overflow, amd_pmu_test_overflow_status);
1372 * AMD Core perfctr has separate MSRs for the NB events, see
1373 * the amd/uncore.c driver.
1375 x86_pmu.amd_nb_constraints = 0;
1377 if (boot_cpu_data.x86 == 0x15) {
1379 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
1381 if (boot_cpu_data.x86 >= 0x17) {
1382 pr_cont("Fam17h+ ");
1384 * Family 17h and compatibles have constraints for Large
1385 * Increment per Cycle events: they may only be assigned an
1386 * even numbered counter that has a consecutive adjacent odd
1387 * numbered counter following it.
1389 for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
1390 even_ctr_mask |= BIT_ULL(i);
1392 pair_constraint = (struct event_constraint)
1393 __EVENT_CONSTRAINT(0, even_ctr_mask, 0,
1394 x86_pmu.num_counters / 2, 0,
1395 PERF_X86_EVENT_PAIR);
1397 x86_pmu.get_event_constraints = amd_get_event_constraints_f17h;
1398 x86_pmu.put_event_constraints = amd_put_event_constraints_f17h;
1399 x86_pmu.perf_ctr_pair_en = AMD_MERGE_EVENT_ENABLE;
1400 x86_pmu.flags |= PMU_FL_PAIR;
1403 /* LBR and BRS are mutually exclusive features */
1404 if (!amd_pmu_lbr_init()) {
1405 /* LBR requires flushing on context switch */
1406 x86_pmu.sched_task = amd_pmu_lbr_sched_task;
1407 static_call_update(amd_pmu_branch_hw_config, amd_pmu_lbr_hw_config);
1408 static_call_update(amd_pmu_branch_reset, amd_pmu_lbr_reset);
1409 static_call_update(amd_pmu_branch_add, amd_pmu_lbr_add);
1410 static_call_update(amd_pmu_branch_del, amd_pmu_lbr_del);
1411 } else if (!amd_brs_init()) {
1413 * BRS requires special event constraints and flushing on ctxsw.
1415 x86_pmu.get_event_constraints = amd_get_event_constraints_f19h;
1416 x86_pmu.sched_task = amd_pmu_brs_sched_task;
1417 x86_pmu.limit_period = amd_pmu_limit_period;
1419 static_call_update(amd_pmu_branch_hw_config, amd_brs_hw_config);
1420 static_call_update(amd_pmu_branch_reset, amd_brs_reset);
1421 static_call_update(amd_pmu_branch_add, amd_pmu_brs_add);
1422 static_call_update(amd_pmu_branch_del, amd_pmu_brs_del);
1425 * put_event_constraints callback same as Fam17h, set above
1428 /* branch sampling must be stopped when entering low power */
1429 amd_brs_lopwr_init();
1432 x86_pmu.attr_update = amd_attr_update;
1434 pr_cont("core perfctr, ");
1438 __init int amd_pmu_init(void)
1442 /* Performance-monitoring supported from K7 and later: */
1443 if (boot_cpu_data.x86 < 6)
1448 ret = amd_core_pmu_init();
1452 if (num_possible_cpus() == 1) {
1454 * No point in allocating data structures to serialize
1455 * against other CPUs, when there is only the one CPU.
1457 x86_pmu.amd_nb_constraints = 0;
1460 if (boot_cpu_data.x86 >= 0x17)
1461 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids));
1463 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids));
1468 static inline void amd_pmu_reload_virt(void)
1470 if (x86_pmu.version >= 2) {
1472 * Clear global enable bits, reprogram the PERF_CTL
1473 * registers with updated perf_ctr_virt_mask and then
1474 * set global enable bits once again
1476 amd_pmu_v2_disable_all();
1477 amd_pmu_enable_all(0);
1478 amd_pmu_v2_enable_all(0);
1482 amd_pmu_disable_all();
1483 amd_pmu_enable_all(0);
1486 void amd_pmu_enable_virt(void)
1488 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1490 cpuc->perf_ctr_virt_mask = 0;
1492 /* Reload all events */
1493 amd_pmu_reload_virt();
1495 EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
1497 void amd_pmu_disable_virt(void)
1499 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1502 * We only mask out the Host-only bit so that host-only counting works
1503 * when SVM is disabled. If someone sets up a guest-only counter when
1504 * SVM is disabled the Guest-only bits still gets set and the counter
1505 * will not count anything.
1507 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
1509 /* Reload all events */
1510 amd_pmu_reload_virt();
1512 EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);