1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/perf_event.h>
3 #include <linux/jump_label.h>
4 #include <linux/export.h>
5 #include <linux/types.h>
6 #include <linux/init.h>
7 #include <linux/slab.h>
8 #include <linux/delay.h>
9 #include <linux/jiffies.h>
10 #include <asm/apicdef.h>
14 #include "../perf_event.h"
16 static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp);
17 static unsigned long perf_nmi_window;
19 /* AMD Event 0xFFF: Merge. Used with Large Increment per Cycle events */
20 #define AMD_MERGE_EVENT ((0xFULL << 32) | 0xFFULL)
21 #define AMD_MERGE_EVENT_ENABLE (AMD_MERGE_EVENT | ARCH_PERFMON_EVENTSEL_ENABLE)
23 /* PMC Enable and Overflow bits for PerfCntrGlobal* registers */
24 static u64 amd_pmu_global_cntr_mask __read_mostly;
26 static __initconst const u64 amd_hw_cache_event_ids
27 [PERF_COUNT_HW_CACHE_MAX]
28 [PERF_COUNT_HW_CACHE_OP_MAX]
29 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
33 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
34 [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */
37 [ C(RESULT_ACCESS) ] = 0,
38 [ C(RESULT_MISS) ] = 0,
40 [ C(OP_PREFETCH) ] = {
41 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
42 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
47 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
48 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
51 [ C(RESULT_ACCESS) ] = -1,
52 [ C(RESULT_MISS) ] = -1,
54 [ C(OP_PREFETCH) ] = {
55 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
56 [ C(RESULT_MISS) ] = 0,
61 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
62 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
65 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
66 [ C(RESULT_MISS) ] = 0,
68 [ C(OP_PREFETCH) ] = {
69 [ C(RESULT_ACCESS) ] = 0,
70 [ C(RESULT_MISS) ] = 0,
75 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
76 [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
79 [ C(RESULT_ACCESS) ] = 0,
80 [ C(RESULT_MISS) ] = 0,
82 [ C(OP_PREFETCH) ] = {
83 [ C(RESULT_ACCESS) ] = 0,
84 [ C(RESULT_MISS) ] = 0,
89 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
90 [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
93 [ C(RESULT_ACCESS) ] = -1,
94 [ C(RESULT_MISS) ] = -1,
96 [ C(OP_PREFETCH) ] = {
97 [ C(RESULT_ACCESS) ] = -1,
98 [ C(RESULT_MISS) ] = -1,
103 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
104 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
107 [ C(RESULT_ACCESS) ] = -1,
108 [ C(RESULT_MISS) ] = -1,
110 [ C(OP_PREFETCH) ] = {
111 [ C(RESULT_ACCESS) ] = -1,
112 [ C(RESULT_MISS) ] = -1,
117 [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
118 [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */
121 [ C(RESULT_ACCESS) ] = -1,
122 [ C(RESULT_MISS) ] = -1,
124 [ C(OP_PREFETCH) ] = {
125 [ C(RESULT_ACCESS) ] = -1,
126 [ C(RESULT_MISS) ] = -1,
131 static __initconst const u64 amd_hw_cache_event_ids_f17h
132 [PERF_COUNT_HW_CACHE_MAX]
133 [PERF_COUNT_HW_CACHE_OP_MAX]
134 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
137 [C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */
138 [C(RESULT_MISS)] = 0xc860, /* L2$ access from DC Miss */
141 [C(RESULT_ACCESS)] = 0,
142 [C(RESULT_MISS)] = 0,
145 [C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */
146 [C(RESULT_MISS)] = 0,
151 [C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches */
152 [C(RESULT_MISS)] = 0x0081, /* Instruction cache misses */
155 [C(RESULT_ACCESS)] = -1,
156 [C(RESULT_MISS)] = -1,
159 [C(RESULT_ACCESS)] = 0,
160 [C(RESULT_MISS)] = 0,
165 [C(RESULT_ACCESS)] = 0,
166 [C(RESULT_MISS)] = 0,
169 [C(RESULT_ACCESS)] = 0,
170 [C(RESULT_MISS)] = 0,
173 [C(RESULT_ACCESS)] = 0,
174 [C(RESULT_MISS)] = 0,
179 [C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */
180 [C(RESULT_MISS)] = 0xf045, /* L2 DTLB misses (PT walks) */
183 [C(RESULT_ACCESS)] = 0,
184 [C(RESULT_MISS)] = 0,
187 [C(RESULT_ACCESS)] = 0,
188 [C(RESULT_MISS)] = 0,
193 [C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */
194 [C(RESULT_MISS)] = 0xff85, /* L1 ITLB misses, L2 misses */
197 [C(RESULT_ACCESS)] = -1,
198 [C(RESULT_MISS)] = -1,
201 [C(RESULT_ACCESS)] = -1,
202 [C(RESULT_MISS)] = -1,
207 [C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr. */
208 [C(RESULT_MISS)] = 0x00c3, /* Retired Mispredicted BI */
211 [C(RESULT_ACCESS)] = -1,
212 [C(RESULT_MISS)] = -1,
215 [C(RESULT_ACCESS)] = -1,
216 [C(RESULT_MISS)] = -1,
221 [C(RESULT_ACCESS)] = 0,
222 [C(RESULT_MISS)] = 0,
225 [C(RESULT_ACCESS)] = -1,
226 [C(RESULT_MISS)] = -1,
229 [C(RESULT_ACCESS)] = -1,
230 [C(RESULT_MISS)] = -1,
236 * AMD Performance Monitor K7 and later, up to and including Family 16h:
238 static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
240 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
241 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
242 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
243 [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
244 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
245 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
246 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
247 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
251 * AMD Performance Monitor Family 17h and later:
253 static const u64 amd_zen1_perfmon_event_map[PERF_COUNT_HW_MAX] =
255 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
256 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
257 [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
258 [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
259 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
260 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
261 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
262 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
265 static const u64 amd_zen2_perfmon_event_map[PERF_COUNT_HW_MAX] =
267 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
268 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
269 [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
270 [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
271 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
272 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
273 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a9,
276 static const u64 amd_zen4_perfmon_event_map[PERF_COUNT_HW_MAX] =
278 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
279 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
280 [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
281 [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
282 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
283 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
284 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a9,
285 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x100000120,
288 static u64 amd_pmu_event_map(int hw_event)
290 if (cpu_feature_enabled(X86_FEATURE_ZEN4) || boot_cpu_data.x86 >= 0x1a)
291 return amd_zen4_perfmon_event_map[hw_event];
293 if (cpu_feature_enabled(X86_FEATURE_ZEN2) || boot_cpu_data.x86 >= 0x19)
294 return amd_zen2_perfmon_event_map[hw_event];
296 if (cpu_feature_enabled(X86_FEATURE_ZEN1))
297 return amd_zen1_perfmon_event_map[hw_event];
299 return amd_perfmon_event_map[hw_event];
303 * Previously calculated offsets
305 static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
306 static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
310 * 4 counters starting at 0xc0010000 each offset by 1
312 * CPUs with core performance counter extensions:
313 * 6 counters starting at 0xc0010200 each offset by 2
315 static inline int amd_pmu_addr_offset(int index, bool eventsel)
323 offset = event_offsets[index];
325 offset = count_offsets[index];
330 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
336 event_offsets[index] = offset;
338 count_offsets[index] = offset;
344 * AMD64 events are detected based on their event codes.
346 static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
348 return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
351 static inline bool amd_is_pair_event_code(struct hw_perf_event *hwc)
353 if (!(x86_pmu.flags & PMU_FL_PAIR))
356 switch (amd_get_event_code(hwc)) {
357 case 0x003: return true; /* Retired SSE/AVX FLOPs */
358 default: return false;
362 DEFINE_STATIC_CALL_RET0(amd_pmu_branch_hw_config, *x86_pmu.hw_config);
364 static int amd_core_hw_config(struct perf_event *event)
366 if (event->attr.exclude_host && event->attr.exclude_guest)
368 * When HO == GO == 1 the hardware treats that as GO == HO == 0
369 * and will count in both modes. We don't want to count in that
370 * case so we emulate no-counting by setting US = OS = 0.
372 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
373 ARCH_PERFMON_EVENTSEL_OS);
374 else if (event->attr.exclude_host)
375 event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
376 else if (event->attr.exclude_guest)
377 event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
379 if ((x86_pmu.flags & PMU_FL_PAIR) && amd_is_pair_event_code(&event->hw))
380 event->hw.flags |= PERF_X86_EVENT_PAIR;
382 if (has_branch_stack(event))
383 return static_call(amd_pmu_branch_hw_config)(event);
388 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
390 return (hwc->config & 0xe0) == 0xe0;
393 static inline int amd_has_nb(struct cpu_hw_events *cpuc)
395 struct amd_nb *nb = cpuc->amd_nb;
397 return nb && nb->nb_id != -1;
400 static int amd_pmu_hw_config(struct perf_event *event)
404 /* pass precise event sampling to ibs: */
405 if (event->attr.precise_ip && get_ibs_caps())
406 return forward_event_to_ibs(event);
408 if (has_branch_stack(event) && !x86_pmu.lbr_nr)
411 ret = x86_pmu_hw_config(event);
415 if (event->attr.type == PERF_TYPE_RAW)
416 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
418 return amd_core_hw_config(event);
421 static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
422 struct perf_event *event)
424 struct amd_nb *nb = cpuc->amd_nb;
428 * need to scan whole list because event may not have
429 * been assigned during scheduling
431 * no race condition possible because event can only
432 * be removed on one CPU at a time AND PMU is disabled
435 for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
436 struct perf_event *tmp = event;
438 if (try_cmpxchg(nb->owners + i, &tmp, NULL))
444 * AMD64 NorthBridge events need special treatment because
445 * counter access needs to be synchronized across all cores
446 * of a package. Refer to BKDG section 3.12
448 * NB events are events measuring L3 cache, Hypertransport
449 * traffic. They are identified by an event code >= 0xe00.
450 * They measure events on the NorthBride which is shared
451 * by all cores on a package. NB events are counted on a
452 * shared set of counters. When a NB event is programmed
453 * in a counter, the data actually comes from a shared
454 * counter. Thus, access to those counters needs to be
457 * We implement the synchronization such that no two cores
458 * can be measuring NB events using the same counters. Thus,
459 * we maintain a per-NB allocation table. The available slot
460 * is propagated using the event_constraint structure.
462 * We provide only one choice for each NB event based on
463 * the fact that only NB events have restrictions. Consequently,
464 * if a counter is available, there is a guarantee the NB event
465 * will be assigned to it. If no slot is available, an empty
466 * constraint is returned and scheduling will eventually fail
469 * Note that all cores attached the same NB compete for the same
470 * counters to host NB events, this is why we use atomic ops. Some
471 * multi-chip CPUs may have more than one NB.
473 * Given that resources are allocated (cmpxchg), they must be
474 * eventually freed for others to use. This is accomplished by
475 * calling __amd_put_nb_event_constraints()
477 * Non NB events are not impacted by this restriction.
479 static struct event_constraint *
480 __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
481 struct event_constraint *c)
483 struct hw_perf_event *hwc = &event->hw;
484 struct amd_nb *nb = cpuc->amd_nb;
485 struct perf_event *old;
495 * detect if already present, if so reuse
497 * cannot merge with actual allocation
498 * because of possible holes
500 * event can already be present yet not assigned (in hwc->idx)
501 * because of successive calls to x86_schedule_events() from
502 * hw_perf_group_sched_in() without hw_perf_enable()
504 for_each_set_bit(idx, c->idxmsk, x86_pmu_max_num_counters(NULL)) {
505 if (new == -1 || hwc->idx == idx)
506 /* assign free slot, prefer hwc->idx */
507 old = cmpxchg(nb->owners + idx, NULL, event);
508 else if (nb->owners[idx] == event)
509 /* event already present */
514 if (old && old != event)
517 /* reassign to this slot */
519 cmpxchg(nb->owners + new, event, NULL);
522 /* already present, reuse */
528 return &emptyconstraint;
530 return &nb->event_constraints[new];
533 static struct amd_nb *amd_alloc_nb(int cpu)
538 nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu));
545 * initialize all possible NB constraints
547 for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
548 __set_bit(i, nb->event_constraints[i].idxmsk);
549 nb->event_constraints[i].weight = 1;
554 typedef void (amd_pmu_branch_reset_t)(void);
555 DEFINE_STATIC_CALL_NULL(amd_pmu_branch_reset, amd_pmu_branch_reset_t);
557 static void amd_pmu_cpu_reset(int cpu)
560 static_call(amd_pmu_branch_reset)();
562 if (x86_pmu.version < 2)
565 /* Clear enable bits i.e. PerfCntrGlobalCtl.PerfCntrEn */
566 wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);
569 * Clear freeze and overflow bits i.e. PerfCntrGLobalStatus.LbrFreeze
570 * and PerfCntrGLobalStatus.PerfCntrOvfl
572 wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
573 GLOBAL_STATUS_LBRS_FROZEN | amd_pmu_global_cntr_mask);
576 static int amd_pmu_cpu_prepare(int cpu)
578 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
580 cpuc->lbr_sel = kzalloc_node(sizeof(struct er_account), GFP_KERNEL,
585 WARN_ON_ONCE(cpuc->amd_nb);
587 if (!x86_pmu.amd_nb_constraints)
590 cpuc->amd_nb = amd_alloc_nb(cpu);
594 kfree(cpuc->lbr_sel);
595 cpuc->lbr_sel = NULL;
600 static void amd_pmu_cpu_starting(int cpu)
602 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
603 void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
607 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
608 amd_pmu_cpu_reset(cpu);
610 if (!x86_pmu.amd_nb_constraints)
613 nb_id = topology_amd_node_id(cpu);
614 WARN_ON_ONCE(nb_id == BAD_APICID);
616 for_each_online_cpu(i) {
617 nb = per_cpu(cpu_hw_events, i).amd_nb;
618 if (WARN_ON_ONCE(!nb))
621 if (nb->nb_id == nb_id) {
622 *onln = cpuc->amd_nb;
628 cpuc->amd_nb->nb_id = nb_id;
629 cpuc->amd_nb->refcnt++;
632 static void amd_pmu_cpu_dead(int cpu)
634 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
636 kfree(cpuhw->lbr_sel);
637 cpuhw->lbr_sel = NULL;
639 if (!x86_pmu.amd_nb_constraints)
643 struct amd_nb *nb = cpuhw->amd_nb;
645 if (nb->nb_id == -1 || --nb->refcnt == 0)
648 cpuhw->amd_nb = NULL;
652 static __always_inline void amd_pmu_set_global_ctl(u64 ctl)
654 wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl);
657 static inline u64 amd_pmu_get_global_status(void)
661 /* PerfCntrGlobalStatus is read-only */
662 rdmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, status);
667 static inline void amd_pmu_ack_global_status(u64 status)
670 * PerfCntrGlobalStatus is read-only but an overflow acknowledgment
671 * mechanism exists; writing 1 to a bit in PerfCntrGlobalStatusClr
672 * clears the same bit in PerfCntrGlobalStatus
675 wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, status);
678 static bool amd_pmu_test_overflow_topbit(int idx)
682 rdmsrl(x86_pmu_event_addr(idx), counter);
684 return !(counter & BIT_ULL(x86_pmu.cntval_bits - 1));
687 static bool amd_pmu_test_overflow_status(int idx)
689 return amd_pmu_get_global_status() & BIT_ULL(idx);
692 DEFINE_STATIC_CALL(amd_pmu_test_overflow, amd_pmu_test_overflow_topbit);
695 * When a PMC counter overflows, an NMI is used to process the event and
696 * reset the counter. NMI latency can result in the counter being updated
697 * before the NMI can run, which can result in what appear to be spurious
698 * NMIs. This function is intended to wait for the NMI to run and reset
699 * the counter to avoid possible unhandled NMI messages.
701 #define OVERFLOW_WAIT_COUNT 50
703 static void amd_pmu_wait_on_overflow(int idx)
708 * Wait for the counter to be reset if it has overflowed. This loop
709 * should exit very, very quickly, but just in case, don't wait
712 for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
713 if (!static_call(amd_pmu_test_overflow)(idx))
716 /* Might be in IRQ context, so can't sleep */
721 static void amd_pmu_check_overflow(void)
723 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
727 * This shouldn't be called from NMI context, but add a safeguard here
728 * to return, since if we're in NMI context we can't wait for an NMI
729 * to reset an overflowed counter value.
735 * Check each counter for overflow and wait for it to be reset by the
736 * NMI if it has overflowed. This relies on the fact that all active
737 * counters are always enabled when this function is called and
738 * ARCH_PERFMON_EVENTSEL_INT is always set.
740 for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
741 if (!test_bit(idx, cpuc->active_mask))
744 amd_pmu_wait_on_overflow(idx);
748 static void amd_pmu_enable_event(struct perf_event *event)
750 x86_pmu_enable_event(event);
753 static void amd_pmu_enable_all(int added)
755 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
758 amd_brs_enable_all();
760 for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
761 /* only activate events which are marked as active */
762 if (!test_bit(idx, cpuc->active_mask))
765 amd_pmu_enable_event(cpuc->events[idx]);
769 static void amd_pmu_v2_enable_event(struct perf_event *event)
771 struct hw_perf_event *hwc = &event->hw;
774 * Testing cpu_hw_events.enabled should be skipped in this case unlike
775 * in x86_pmu_enable_event().
777 * Since cpu_hw_events.enabled is set only after returning from
778 * x86_pmu_start(), the PMCs must be programmed and kept ready.
779 * Counting starts only after x86_pmu_enable_all() is called.
781 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
784 static __always_inline void amd_pmu_core_enable_all(void)
786 amd_pmu_set_global_ctl(amd_pmu_global_cntr_mask);
789 static void amd_pmu_v2_enable_all(int added)
791 amd_pmu_lbr_enable_all();
792 amd_pmu_core_enable_all();
795 static void amd_pmu_disable_event(struct perf_event *event)
797 x86_pmu_disable_event(event);
800 * This can be called from NMI context (via x86_pmu_stop). The counter
801 * may have overflowed, but either way, we'll never see it get reset
802 * by the NMI if we're already in the NMI. And the NMI latency support
803 * below will take care of any pending NMI that might have been
804 * generated by the overflow.
809 amd_pmu_wait_on_overflow(event->hw.idx);
812 static void amd_pmu_disable_all(void)
814 amd_brs_disable_all();
815 x86_pmu_disable_all();
816 amd_pmu_check_overflow();
819 static __always_inline void amd_pmu_core_disable_all(void)
821 amd_pmu_set_global_ctl(0);
824 static void amd_pmu_v2_disable_all(void)
826 amd_pmu_core_disable_all();
827 amd_pmu_lbr_disable_all();
828 amd_pmu_check_overflow();
831 DEFINE_STATIC_CALL_NULL(amd_pmu_branch_add, *x86_pmu.add);
833 static void amd_pmu_add_event(struct perf_event *event)
835 if (needs_branch_stack(event))
836 static_call(amd_pmu_branch_add)(event);
839 DEFINE_STATIC_CALL_NULL(amd_pmu_branch_del, *x86_pmu.del);
841 static void amd_pmu_del_event(struct perf_event *event)
843 if (needs_branch_stack(event))
844 static_call(amd_pmu_branch_del)(event);
848 * Because of NMI latency, if multiple PMC counters are active or other sources
849 * of NMIs are received, the perf NMI handler can handle one or more overflowed
850 * PMC counters outside of the NMI associated with the PMC overflow. If the NMI
851 * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
852 * back-to-back NMI support won't be active. This PMC handler needs to take into
853 * account that this can occur, otherwise this could result in unknown NMI
854 * messages being issued. Examples of this is PMC overflow while in the NMI
855 * handler when multiple PMCs are active or PMC overflow while handling some
856 * other source of an NMI.
858 * Attempt to mitigate this by creating an NMI window in which un-handled NMIs
859 * received during this window will be claimed. This prevents extending the
860 * window past when it is possible that latent NMIs should be received. The
861 * per-CPU perf_nmi_tstamp will be set to the window end time whenever perf has
862 * handled a counter. When an un-handled NMI is received, it will be claimed
863 * only if arriving within that window.
865 static inline int amd_pmu_adjust_nmi_window(int handled)
868 * If a counter was handled, record a timestamp such that un-handled
869 * NMIs will be claimed if arriving within that window.
872 this_cpu_write(perf_nmi_tstamp, jiffies + perf_nmi_window);
877 if (time_after(jiffies, this_cpu_read(perf_nmi_tstamp)))
883 static int amd_pmu_handle_irq(struct pt_regs *regs)
885 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
890 * Save the PMU state.
891 * It needs to be restored when leaving the handler.
893 pmu_enabled = cpuc->enabled;
896 amd_brs_disable_all();
898 /* Drain BRS is in use (could be inactive) */
902 /* Process any counter overflows */
903 handled = x86_pmu_handle_irq(regs);
905 cpuc->enabled = pmu_enabled;
907 amd_brs_enable_all();
909 return amd_pmu_adjust_nmi_window(handled);
913 * AMD-specific callback invoked through perf_snapshot_branch_stack static
914 * call, defined in include/linux/perf_event.h. See its definition for API
915 * details. It's up to caller to provide enough space in *entries* to fit all
916 * LBR records, otherwise returned result will be truncated to *cnt* entries.
918 static int amd_pmu_v2_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
920 struct cpu_hw_events *cpuc;
924 * The sequence of steps to freeze LBR should be completely inlined
925 * and contain no branches to minimize contamination of LBR snapshot
927 local_irq_save(flags);
928 amd_pmu_core_disable_all();
929 __amd_pmu_lbr_disable();
931 cpuc = this_cpu_ptr(&cpu_hw_events);
934 cnt = min(cnt, x86_pmu.lbr_nr);
935 memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt);
937 amd_pmu_v2_enable_all(0);
938 local_irq_restore(flags);
943 static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
945 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
946 struct perf_sample_data data;
947 struct hw_perf_event *hwc;
948 struct perf_event *event;
949 int handled = 0, idx;
950 u64 reserved, status, mask;
954 * Save the PMU state as it needs to be restored when leaving the
957 pmu_enabled = cpuc->enabled;
960 /* Stop counting but do not disable LBR */
961 amd_pmu_core_disable_all();
963 status = amd_pmu_get_global_status();
965 /* Check if any overflows are pending */
969 /* Read branch records */
970 if (x86_pmu.lbr_nr) {
972 status &= ~GLOBAL_STATUS_LBRS_FROZEN;
975 reserved = status & ~amd_pmu_global_cntr_mask;
977 pr_warn_once("Reserved PerfCntrGlobalStatus bits are set (0x%llx), please consider updating microcode\n",
980 /* Clear any reserved bits set by buggy microcode */
981 status &= amd_pmu_global_cntr_mask;
983 for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
984 if (!test_bit(idx, cpuc->active_mask))
987 event = cpuc->events[idx];
989 x86_perf_event_update(event);
992 if (!(status & mask))
998 perf_sample_data_init(&data, 0, hwc->last_period);
1000 if (!x86_perf_event_set_period(event))
1003 if (has_branch_stack(event))
1004 perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL);
1006 if (perf_event_overflow(event, &data, regs))
1007 x86_pmu_stop(event, 0);
1011 * It should never be the case that some overflows are not handled as
1012 * the corresponding PMCs are expected to be inactive according to the
1015 WARN_ON(status > 0);
1017 /* Clear overflow and freeze bits */
1018 amd_pmu_ack_global_status(~status);
1021 * Unmasking the LVTPC is not required as the Mask (M) bit of the LVT
1022 * PMI entry is not set by the local APIC when a PMC overflow occurs
1024 inc_irq_stat(apic_perf_irqs);
1027 cpuc->enabled = pmu_enabled;
1029 /* Resume counting only if PMU is active */
1031 amd_pmu_core_enable_all();
1033 return amd_pmu_adjust_nmi_window(handled);
1036 static struct event_constraint *
1037 amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
1038 struct perf_event *event)
1041 * if not NB event or no NB, then no constraints
1043 if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
1044 return &unconstrained;
1046 return __amd_get_nb_event_constraints(cpuc, event, NULL);
1049 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
1050 struct perf_event *event)
1052 if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
1053 __amd_put_nb_event_constraints(cpuc, event);
1056 PMU_FORMAT_ATTR(event, "config:0-7,32-35");
1057 PMU_FORMAT_ATTR(umask, "config:8-15" );
1058 PMU_FORMAT_ATTR(edge, "config:18" );
1059 PMU_FORMAT_ATTR(inv, "config:23" );
1060 PMU_FORMAT_ATTR(cmask, "config:24-31" );
1062 static struct attribute *amd_format_attr[] = {
1063 &format_attr_event.attr,
1064 &format_attr_umask.attr,
1065 &format_attr_edge.attr,
1066 &format_attr_inv.attr,
1067 &format_attr_cmask.attr,
1071 /* AMD Family 15h */
1073 #define AMD_EVENT_TYPE_MASK 0x000000F0ULL
1075 #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
1076 #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
1077 #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
1078 #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
1079 #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
1080 #define AMD_EVENT_EX_LS 0x000000C0ULL
1081 #define AMD_EVENT_DE 0x000000D0ULL
1082 #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
1085 * AMD family 15h event code/PMC mappings:
1087 * type = event_code & 0x0F0:
1089 * 0x000 FP PERF_CTL[5:3]
1090 * 0x010 FP PERF_CTL[5:3]
1091 * 0x020 LS PERF_CTL[5:0]
1092 * 0x030 LS PERF_CTL[5:0]
1093 * 0x040 DC PERF_CTL[5:0]
1094 * 0x050 DC PERF_CTL[5:0]
1095 * 0x060 CU PERF_CTL[2:0]
1096 * 0x070 CU PERF_CTL[2:0]
1097 * 0x080 IC/DE PERF_CTL[2:0]
1098 * 0x090 IC/DE PERF_CTL[2:0]
1101 * 0x0C0 EX/LS PERF_CTL[5:0]
1102 * 0x0D0 DE PERF_CTL[2:0]
1103 * 0x0E0 NB NB_PERF_CTL[3:0]
1104 * 0x0F0 NB NB_PERF_CTL[3:0]
1108 * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
1109 * 0x003 FP PERF_CTL[3]
1110 * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
1111 * 0x00B FP PERF_CTL[3]
1112 * 0x00D FP PERF_CTL[3]
1113 * 0x023 DE PERF_CTL[2:0]
1114 * 0x02D LS PERF_CTL[3]
1115 * 0x02E LS PERF_CTL[3,0]
1116 * 0x031 LS PERF_CTL[2:0] (**)
1117 * 0x043 CU PERF_CTL[2:0]
1118 * 0x045 CU PERF_CTL[2:0]
1119 * 0x046 CU PERF_CTL[2:0]
1120 * 0x054 CU PERF_CTL[2:0]
1121 * 0x055 CU PERF_CTL[2:0]
1122 * 0x08F IC PERF_CTL[0]
1123 * 0x187 DE PERF_CTL[0]
1124 * 0x188 DE PERF_CTL[0]
1125 * 0x0DB EX PERF_CTL[5:0]
1126 * 0x0DC LS PERF_CTL[5:0]
1127 * 0x0DD LS PERF_CTL[5:0]
1128 * 0x0DE LS PERF_CTL[5:0]
1129 * 0x0DF LS PERF_CTL[5:0]
1130 * 0x1C0 EX PERF_CTL[5:3]
1131 * 0x1D6 EX PERF_CTL[5:0]
1132 * 0x1D8 EX PERF_CTL[5:0]
1134 * (*) depending on the umask all FPU counters may be used
1135 * (**) only one unitmask enabled at a time
1138 static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
1139 static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
1140 static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
1141 static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
1142 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
1143 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
1145 static struct event_constraint *
1146 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
1147 struct perf_event *event)
1149 struct hw_perf_event *hwc = &event->hw;
1150 unsigned int event_code = amd_get_event_code(hwc);
1152 switch (event_code & AMD_EVENT_TYPE_MASK) {
1154 switch (event_code) {
1156 if (!(hwc->config & 0x0000F000ULL))
1158 if (!(hwc->config & 0x00000F00ULL))
1160 return &amd_f15_PMC3;
1162 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
1164 return &amd_f15_PMC3;
1168 return &amd_f15_PMC3;
1170 return &amd_f15_PMC53;
1173 case AMD_EVENT_EX_LS:
1174 switch (event_code) {
1181 return &amd_f15_PMC20;
1183 return &amd_f15_PMC3;
1185 return &amd_f15_PMC30;
1187 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
1188 return &amd_f15_PMC20;
1189 return &emptyconstraint;
1191 return &amd_f15_PMC53;
1193 return &amd_f15_PMC50;
1196 case AMD_EVENT_IC_DE:
1198 switch (event_code) {
1202 return &amd_f15_PMC0;
1203 case 0x0DB ... 0x0DF:
1206 return &amd_f15_PMC50;
1208 return &amd_f15_PMC20;
1211 /* moved to uncore.c */
1212 return &emptyconstraint;
1214 return &emptyconstraint;
1218 static struct event_constraint pair_constraint;
1220 static struct event_constraint *
1221 amd_get_event_constraints_f17h(struct cpu_hw_events *cpuc, int idx,
1222 struct perf_event *event)
1224 struct hw_perf_event *hwc = &event->hw;
1226 if (amd_is_pair_event_code(hwc))
1227 return &pair_constraint;
1229 return &unconstrained;
1232 static void amd_put_event_constraints_f17h(struct cpu_hw_events *cpuc,
1233 struct perf_event *event)
1235 struct hw_perf_event *hwc = &event->hw;
1237 if (is_counter_pair(hwc))
1242 * Because of the way BRS operates with an inactive and active phases, and
1243 * the link to one counter, it is not possible to have two events using BRS
1244 * scheduled at the same time. There would be an issue with enforcing the
1245 * period of each one and given that the BRS saturates, it would not be possible
1246 * to guarantee correlated content for all events. Therefore, in situations
1247 * where multiple events want to use BRS, the kernel enforces mutual exclusion.
1248 * Exclusion is enforced by choosing only one counter for events using BRS.
1249 * The event scheduling logic will then automatically multiplex the
1250 * events and ensure that at most one event is actively using BRS.
1252 * The BRS counter could be any counter, but there is no constraint on Fam19h,
1253 * therefore all counters are equal and thus we pick the first one: PMC0
1255 static struct event_constraint amd_fam19h_brs_cntr0_constraint =
1256 EVENT_CONSTRAINT(0, 0x1, AMD64_RAW_EVENT_MASK);
1258 static struct event_constraint amd_fam19h_brs_pair_cntr0_constraint =
1259 __EVENT_CONSTRAINT(0, 0x1, AMD64_RAW_EVENT_MASK, 1, 0, PERF_X86_EVENT_PAIR);
1261 static struct event_constraint *
1262 amd_get_event_constraints_f19h(struct cpu_hw_events *cpuc, int idx,
1263 struct perf_event *event)
1265 struct hw_perf_event *hwc = &event->hw;
1266 bool has_brs = has_amd_brs(hwc);
1269 * In case BRS is used with an event requiring a counter pair,
1270 * the kernel allows it but only on counter 0 & 1 to enforce
1271 * multiplexing requiring to protect BRS in case of multiple
1274 if (amd_is_pair_event_code(hwc)) {
1275 return has_brs ? &amd_fam19h_brs_pair_cntr0_constraint
1280 return &amd_fam19h_brs_cntr0_constraint;
1282 return &unconstrained;
1286 static ssize_t amd_event_sysfs_show(char *page, u64 config)
1288 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) |
1289 (config & AMD64_EVENTSEL_EVENT) >> 24;
1291 return x86_event_sysfs_show(page, config, event);
1294 static void amd_pmu_limit_period(struct perf_event *event, s64 *left)
1297 * Decrease period by the depth of the BRS feature to get the last N
1298 * taken branches and approximate the desired period
1300 if (has_branch_stack(event) && *left > x86_pmu.lbr_nr)
1301 *left -= x86_pmu.lbr_nr;
1304 static __initconst const struct x86_pmu amd_pmu = {
1306 .handle_irq = amd_pmu_handle_irq,
1307 .disable_all = amd_pmu_disable_all,
1308 .enable_all = amd_pmu_enable_all,
1309 .enable = amd_pmu_enable_event,
1310 .disable = amd_pmu_disable_event,
1311 .hw_config = amd_pmu_hw_config,
1312 .schedule_events = x86_schedule_events,
1313 .eventsel = MSR_K7_EVNTSEL0,
1314 .perfctr = MSR_K7_PERFCTR0,
1315 .addr_offset = amd_pmu_addr_offset,
1316 .event_map = amd_pmu_event_map,
1317 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
1318 .cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS - 1, 0),
1319 .add = amd_pmu_add_event,
1320 .del = amd_pmu_del_event,
1322 .cntval_mask = (1ULL << 48) - 1,
1324 /* use highest bit to detect overflow */
1325 .max_period = (1ULL << 47) - 1,
1326 .get_event_constraints = amd_get_event_constraints,
1327 .put_event_constraints = amd_put_event_constraints,
1329 .format_attrs = amd_format_attr,
1330 .events_sysfs_show = amd_event_sysfs_show,
1332 .cpu_prepare = amd_pmu_cpu_prepare,
1333 .cpu_starting = amd_pmu_cpu_starting,
1334 .cpu_dead = amd_pmu_cpu_dead,
1336 .amd_nb_constraints = 1,
1339 static ssize_t branches_show(struct device *cdev,
1340 struct device_attribute *attr,
1343 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
1346 static DEVICE_ATTR_RO(branches);
1348 static struct attribute *amd_pmu_branches_attrs[] = {
1349 &dev_attr_branches.attr,
1354 amd_branches_is_visible(struct kobject *kobj, struct attribute *attr, int i)
1356 return x86_pmu.lbr_nr ? attr->mode : 0;
1359 static struct attribute_group group_caps_amd_branches = {
1361 .attrs = amd_pmu_branches_attrs,
1362 .is_visible = amd_branches_is_visible,
1365 #ifdef CONFIG_PERF_EVENTS_AMD_BRS
1367 EVENT_ATTR_STR(branch-brs, amd_branch_brs,
1368 "event=" __stringify(AMD_FAM19H_BRS_EVENT)"\n");
1370 static struct attribute *amd_brs_events_attrs[] = {
1371 EVENT_PTR(amd_branch_brs),
1376 amd_brs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
1378 return static_cpu_has(X86_FEATURE_BRS) && x86_pmu.lbr_nr ?
1382 static struct attribute_group group_events_amd_brs = {
1384 .attrs = amd_brs_events_attrs,
1385 .is_visible = amd_brs_is_visible,
1388 #endif /* CONFIG_PERF_EVENTS_AMD_BRS */
1390 static const struct attribute_group *amd_attr_update[] = {
1391 &group_caps_amd_branches,
1392 #ifdef CONFIG_PERF_EVENTS_AMD_BRS
1393 &group_events_amd_brs,
1398 static int __init amd_core_pmu_init(void)
1400 union cpuid_0x80000022_ebx ebx;
1401 u64 even_ctr_mask = 0ULL;
1404 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
1407 /* Avoid calculating the value each time in the NMI handler */
1408 perf_nmi_window = msecs_to_jiffies(100);
1411 * If core performance counter extensions exists, we must use
1412 * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
1413 * amd_pmu_addr_offset().
1415 x86_pmu.eventsel = MSR_F15H_PERF_CTL;
1416 x86_pmu.perfctr = MSR_F15H_PERF_CTR;
1417 x86_pmu.cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS_CORE - 1, 0);
1419 /* Check for Performance Monitoring v2 support */
1420 if (boot_cpu_has(X86_FEATURE_PERFMON_V2)) {
1421 ebx.full = cpuid_ebx(EXT_PERFMON_DEBUG_FEATURES);
1423 /* Update PMU version for later usage */
1424 x86_pmu.version = 2;
1426 /* Find the number of available Core PMCs */
1427 x86_pmu.cntr_mask64 = GENMASK_ULL(ebx.split.num_core_pmc - 1, 0);
1429 amd_pmu_global_cntr_mask = x86_pmu.cntr_mask64;
1431 /* Update PMC handling functions */
1432 x86_pmu.enable_all = amd_pmu_v2_enable_all;
1433 x86_pmu.disable_all = amd_pmu_v2_disable_all;
1434 x86_pmu.enable = amd_pmu_v2_enable_event;
1435 x86_pmu.handle_irq = amd_pmu_v2_handle_irq;
1436 static_call_update(amd_pmu_test_overflow, amd_pmu_test_overflow_status);
1440 * AMD Core perfctr has separate MSRs for the NB events, see
1441 * the amd/uncore.c driver.
1443 x86_pmu.amd_nb_constraints = 0;
1445 if (boot_cpu_data.x86 == 0x15) {
1447 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
1449 if (boot_cpu_data.x86 >= 0x17) {
1450 pr_cont("Fam17h+ ");
1452 * Family 17h and compatibles have constraints for Large
1453 * Increment per Cycle events: they may only be assigned an
1454 * even numbered counter that has a consecutive adjacent odd
1455 * numbered counter following it.
1457 for (i = 0; i < x86_pmu_max_num_counters(NULL) - 1; i += 2)
1458 even_ctr_mask |= BIT_ULL(i);
1460 pair_constraint = (struct event_constraint)
1461 __EVENT_CONSTRAINT(0, even_ctr_mask, 0,
1462 x86_pmu_max_num_counters(NULL) / 2, 0,
1463 PERF_X86_EVENT_PAIR);
1465 x86_pmu.get_event_constraints = amd_get_event_constraints_f17h;
1466 x86_pmu.put_event_constraints = amd_put_event_constraints_f17h;
1467 x86_pmu.perf_ctr_pair_en = AMD_MERGE_EVENT_ENABLE;
1468 x86_pmu.flags |= PMU_FL_PAIR;
1471 /* LBR and BRS are mutually exclusive features */
1472 if (!amd_pmu_lbr_init()) {
1473 /* LBR requires flushing on context switch */
1474 x86_pmu.sched_task = amd_pmu_lbr_sched_task;
1475 static_call_update(amd_pmu_branch_hw_config, amd_pmu_lbr_hw_config);
1476 static_call_update(amd_pmu_branch_reset, amd_pmu_lbr_reset);
1477 static_call_update(amd_pmu_branch_add, amd_pmu_lbr_add);
1478 static_call_update(amd_pmu_branch_del, amd_pmu_lbr_del);
1480 /* Only support branch_stack snapshot on perfmon v2 */
1481 if (x86_pmu.handle_irq == amd_pmu_v2_handle_irq)
1482 static_call_update(perf_snapshot_branch_stack, amd_pmu_v2_snapshot_branch_stack);
1483 } else if (!amd_brs_init()) {
1485 * BRS requires special event constraints and flushing on ctxsw.
1487 x86_pmu.get_event_constraints = amd_get_event_constraints_f19h;
1488 x86_pmu.sched_task = amd_pmu_brs_sched_task;
1489 x86_pmu.limit_period = amd_pmu_limit_period;
1491 static_call_update(amd_pmu_branch_hw_config, amd_brs_hw_config);
1492 static_call_update(amd_pmu_branch_reset, amd_brs_reset);
1493 static_call_update(amd_pmu_branch_add, amd_pmu_brs_add);
1494 static_call_update(amd_pmu_branch_del, amd_pmu_brs_del);
1497 * put_event_constraints callback same as Fam17h, set above
1500 /* branch sampling must be stopped when entering low power */
1501 amd_brs_lopwr_init();
1504 x86_pmu.attr_update = amd_attr_update;
1506 pr_cont("core perfctr, ");
1510 __init int amd_pmu_init(void)
1514 /* Performance-monitoring supported from K7 and later: */
1515 if (boot_cpu_data.x86 < 6)
1520 ret = amd_core_pmu_init();
1524 if (num_possible_cpus() == 1) {
1526 * No point in allocating data structures to serialize
1527 * against other CPUs, when there is only the one CPU.
1529 x86_pmu.amd_nb_constraints = 0;
1532 if (boot_cpu_data.x86 >= 0x17)
1533 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids));
1535 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids));
1540 static inline void amd_pmu_reload_virt(void)
1542 if (x86_pmu.version >= 2) {
1544 * Clear global enable bits, reprogram the PERF_CTL
1545 * registers with updated perf_ctr_virt_mask and then
1546 * set global enable bits once again
1548 amd_pmu_v2_disable_all();
1549 amd_pmu_enable_all(0);
1550 amd_pmu_v2_enable_all(0);
1554 amd_pmu_disable_all();
1555 amd_pmu_enable_all(0);
1558 void amd_pmu_enable_virt(void)
1560 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1562 cpuc->perf_ctr_virt_mask = 0;
1564 /* Reload all events */
1565 amd_pmu_reload_virt();
1567 EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
1569 void amd_pmu_disable_virt(void)
1571 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1574 * We only mask out the Host-only bit so that host-only counting works
1575 * when SVM is disabled. If someone sets up a guest-only counter when
1576 * SVM is disabled the Guest-only bits still gets set and the counter
1577 * will not count anything.
1579 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
1581 /* Reload all events */
1582 amd_pmu_reload_virt();
1584 EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);