Commit | Line | Data |
---|---|---|
de0428a7 KW |
1 | /* |
2 | * Performance events x86 architecture header | |
3 | * | |
4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | |
6 | * Copyright (C) 2009 Jaswinder Singh Rajput | |
7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | |
90eec103 | 8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra |
de0428a7 KW |
9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> |
10 | * Copyright (C) 2009 Google, Inc., Stephane Eranian | |
11 | * | |
12 | * For licencing details see kernel-base/COPYING | |
13 | */ | |
14 | ||
15 | #include <linux/perf_event.h> | |
16 | ||
b50854ec | 17 | #include <asm/fpu/xstate.h> |
10043e02 | 18 | #include <asm/intel_ds.h> |
d9977c43 | 19 | #include <asm/cpu.h> |
10043e02 | 20 | |
f1ad4488 | 21 | /* To enable MSR tracing please use the generic trace points. */ |
1c2ac3fd | 22 | |
de0428a7 KW |
23 | /* |
24 | * | NHM/WSM | SNB | | |
25 | * register ------------------------------- | |
26 | * | HT | no HT | HT | no HT | | |
27 | *----------------------------------------- | |
28 | * offcore | core | core | cpu | core | | |
29 | * lbr_sel | core | core | cpu | core | | |
30 | * ld_lat | cpu | core | cpu | core | | |
31 | *----------------------------------------- | |
32 | * | |
33 | * Given that there is a small number of shared regs, | |
34 | * we can pre-allocate their slot in the per-cpu | |
35 | * per-core reg tables. | |
36 | */ | |
37 | enum extra_reg_type { | |
38aaf921 | 38 | EXTRA_REG_NONE = -1, /* not used */ |
de0428a7 | 39 | |
38aaf921 KL |
40 | EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ |
41 | EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ | |
42 | EXTRA_REG_LBR = 2, /* lbr_select */ | |
43 | EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ | |
44 | EXTRA_REG_FE = 4, /* fe_* */ | |
45 | EXTRA_REG_SNOOP_0 = 5, /* snoop response 0 */ | |
46 | EXTRA_REG_SNOOP_1 = 6, /* snoop response 1 */ | |
de0428a7 | 47 | |
38aaf921 | 48 | EXTRA_REG_MAX /* number of entries needed */ |
de0428a7 KW |
49 | }; |
50 | ||
51 | struct event_constraint { | |
52 | union { | |
53 | unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | |
54 | u64 idxmsk64; | |
55 | }; | |
63b79f6e PZ |
56 | u64 code; |
57 | u64 cmask; | |
58 | int weight; | |
59 | int overlap; | |
60 | int flags; | |
61 | unsigned int size; | |
de0428a7 | 62 | }; |
1f6a1e2d | 63 | |
63b79f6e PZ |
64 | static inline bool constraint_match(struct event_constraint *c, u64 ecode) |
65 | { | |
66 | return ((ecode & c->cmask) - c->code) <= (u64)c->size; | |
67 | } | |
68 | ||
88081cfb AK |
69 | #define PERF_ARCH(name, val) \ |
70 | PERF_X86_EVENT_##name = val, | |
71 | ||
f20093ee | 72 | /* |
2f7f73a5 | 73 | * struct hw_perf_event.flags flags |
f20093ee | 74 | */ |
88081cfb AK |
75 | enum { |
76 | #include "perf_event_flags.h" | |
77 | }; | |
78 | ||
79 | #undef PERF_ARCH | |
80 | ||
81 | #define PERF_ARCH(name, val) \ | |
82 | static_assert((PERF_X86_EVENT_##name & PERF_EVENT_FLAG_ARCH) == \ | |
83 | PERF_X86_EVENT_##name); | |
84 | ||
85 | #include "perf_event_flags.h" | |
86 | ||
87 | #undef PERF_ARCH | |
7b2c05a1 KL |
88 | |
89 | static inline bool is_topdown_count(struct perf_event *event) | |
90 | { | |
91 | return event->hw.flags & PERF_X86_EVENT_TOPDOWN; | |
92 | } | |
93 | ||
94 | static inline bool is_metric_event(struct perf_event *event) | |
95 | { | |
96 | u64 config = event->attr.config; | |
97 | ||
98 | return ((config & ARCH_PERFMON_EVENTSEL_EVENT) == 0) && | |
99 | ((config & INTEL_ARCH_EVENT_MASK) >= INTEL_TD_METRIC_RETIRING) && | |
100 | ((config & INTEL_ARCH_EVENT_MASK) <= INTEL_TD_METRIC_MAX); | |
101 | } | |
102 | ||
103 | static inline bool is_slots_event(struct perf_event *event) | |
104 | { | |
105 | return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS; | |
106 | } | |
107 | ||
108 | static inline bool is_topdown_event(struct perf_event *event) | |
109 | { | |
110 | return is_metric_event(event) || is_slots_event(event); | |
111 | } | |
de0428a7 KW |
112 | |
113 | struct amd_nb { | |
114 | int nb_id; /* NorthBridge id */ | |
115 | int refcnt; /* reference count */ | |
116 | struct perf_event *owners[X86_PMC_IDX_MAX]; | |
117 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; | |
118 | }; | |
119 | ||
fd583ad1 | 120 | #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1) |
42880f72 AS |
121 | #define PEBS_PMI_AFTER_EACH_RECORD BIT_ULL(60) |
122 | #define PEBS_OUTPUT_OFFSET 61 | |
123 | #define PEBS_OUTPUT_MASK (3ull << PEBS_OUTPUT_OFFSET) | |
124 | #define PEBS_OUTPUT_PT (1ull << PEBS_OUTPUT_OFFSET) | |
125 | #define PEBS_VIA_PT_MASK (PEBS_OUTPUT_PT | PEBS_PMI_AFTER_EACH_RECORD) | |
de0428a7 | 126 | |
3569c0d7 YZ |
127 | /* |
128 | * Flags PEBS can handle without an PMI. | |
129 | * | |
9c964efa | 130 | * TID can only be handled by flushing at context switch. |
2fe1bc1f | 131 | * REGS_USER can be handled for events limited to ring 3. |
9c964efa | 132 | * |
3569c0d7 | 133 | */ |
174afc3e | 134 | #define LARGE_PEBS_FLAGS \ |
9c964efa | 135 | (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \ |
3569c0d7 YZ |
136 | PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \ |
137 | PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \ | |
2fe1bc1f | 138 | PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \ |
11974914 | 139 | PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \ |
e60b7cb0 LX |
140 | PERF_SAMPLE_PERIOD | PERF_SAMPLE_CODE_PAGE_SIZE | \ |
141 | PERF_SAMPLE_WEIGHT_TYPE) | |
3569c0d7 | 142 | |
9d5dcc93 KL |
143 | #define PEBS_GP_REGS \ |
144 | ((1ULL << PERF_REG_X86_AX) | \ | |
145 | (1ULL << PERF_REG_X86_BX) | \ | |
146 | (1ULL << PERF_REG_X86_CX) | \ | |
147 | (1ULL << PERF_REG_X86_DX) | \ | |
148 | (1ULL << PERF_REG_X86_DI) | \ | |
149 | (1ULL << PERF_REG_X86_SI) | \ | |
150 | (1ULL << PERF_REG_X86_SP) | \ | |
151 | (1ULL << PERF_REG_X86_BP) | \ | |
152 | (1ULL << PERF_REG_X86_IP) | \ | |
153 | (1ULL << PERF_REG_X86_FLAGS) | \ | |
154 | (1ULL << PERF_REG_X86_R8) | \ | |
155 | (1ULL << PERF_REG_X86_R9) | \ | |
156 | (1ULL << PERF_REG_X86_R10) | \ | |
157 | (1ULL << PERF_REG_X86_R11) | \ | |
158 | (1ULL << PERF_REG_X86_R12) | \ | |
159 | (1ULL << PERF_REG_X86_R13) | \ | |
160 | (1ULL << PERF_REG_X86_R14) | \ | |
161 | (1ULL << PERF_REG_X86_R15)) | |
2fe1bc1f | 162 | |
de0428a7 KW |
163 | /* |
164 | * Per register state. | |
165 | */ | |
166 | struct er_account { | |
b8000586 | 167 | raw_spinlock_t lock; /* per-core: protect structure */ |
de0428a7 KW |
168 | u64 config; /* extra MSR config */ |
169 | u64 reg; /* extra MSR number */ | |
170 | atomic_t ref; /* reference count */ | |
171 | }; | |
172 | ||
173 | /* | |
174 | * Per core/cpu state | |
175 | * | |
176 | * Used to coordinate shared registers between HT threads or | |
177 | * among events on a single PMU. | |
178 | */ | |
179 | struct intel_shared_regs { | |
180 | struct er_account regs[EXTRA_REG_MAX]; | |
181 | int refcnt; /* per-core: #HT threads */ | |
182 | unsigned core_id; /* per-core: core id */ | |
183 | }; | |
184 | ||
6f6539ca MD |
185 | enum intel_excl_state_type { |
186 | INTEL_EXCL_UNUSED = 0, /* counter is unused */ | |
187 | INTEL_EXCL_SHARED = 1, /* counter can be used by both threads */ | |
188 | INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */ | |
189 | }; | |
190 | ||
191 | struct intel_excl_states { | |
6f6539ca | 192 | enum intel_excl_state_type state[X86_PMC_IDX_MAX]; |
e979121b | 193 | bool sched_started; /* true if scheduling has started */ |
6f6539ca MD |
194 | }; |
195 | ||
196 | struct intel_excl_cntrs { | |
197 | raw_spinlock_t lock; | |
198 | ||
199 | struct intel_excl_states states[2]; | |
200 | ||
cc1790cf PZ |
201 | union { |
202 | u16 has_exclusive[2]; | |
203 | u32 exclusive_present; | |
204 | }; | |
205 | ||
6f6539ca MD |
206 | int refcnt; /* per-core: #HT threads */ |
207 | unsigned core_id; /* per-core: core id */ | |
208 | }; | |
209 | ||
8b077e4a | 210 | struct x86_perf_task_context; |
9a92e16f | 211 | #define MAX_LBR_ENTRIES 32 |
de0428a7 | 212 | |
9f354a72 KL |
213 | enum { |
214 | LBR_FORMAT_32 = 0x00, | |
215 | LBR_FORMAT_LIP = 0x01, | |
216 | LBR_FORMAT_EIP = 0x02, | |
217 | LBR_FORMAT_EIP_FLAGS = 0x03, | |
218 | LBR_FORMAT_EIP_FLAGS2 = 0x04, | |
219 | LBR_FORMAT_INFO = 0x05, | |
220 | LBR_FORMAT_TIME = 0x06, | |
1ac7fd81 PZI |
221 | LBR_FORMAT_INFO2 = 0x07, |
222 | LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_INFO2, | |
9f354a72 KL |
223 | }; |
224 | ||
90413464 SE |
225 | enum { |
226 | X86_PERF_KFREE_SHARED = 0, | |
227 | X86_PERF_KFREE_EXCL = 1, | |
228 | X86_PERF_KFREE_MAX | |
229 | }; | |
230 | ||
de0428a7 KW |
231 | struct cpu_hw_events { |
232 | /* | |
233 | * Generic x86 PMC bits | |
234 | */ | |
235 | struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ | |
236 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | |
5471eea5 | 237 | unsigned long dirty[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
de0428a7 KW |
238 | int enabled; |
239 | ||
c347a2f1 PZ |
240 | int n_events; /* the # of events in the below arrays */ |
241 | int n_added; /* the # last events in the below arrays; | |
242 | they've never been enabled yet */ | |
243 | int n_txn; /* the # last events in the below arrays; | |
244 | added in the current transaction */ | |
871a93b0 | 245 | int n_txn_pair; |
3dbde695 | 246 | int n_txn_metric; |
de0428a7 KW |
247 | int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ |
248 | u64 tags[X86_PMC_IDX_MAX]; | |
b371b594 | 249 | |
de0428a7 | 250 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ |
b371b594 PZ |
251 | struct event_constraint *event_constraint[X86_PMC_IDX_MAX]; |
252 | ||
cc1790cf | 253 | int n_excl; /* the number of exclusive events */ |
de0428a7 | 254 | |
fbbe0701 | 255 | unsigned int txn_flags; |
5a425294 | 256 | int is_fake; |
de0428a7 KW |
257 | |
258 | /* | |
259 | * Intel DebugStore bits | |
260 | */ | |
261 | struct debug_store *ds; | |
c1961a46 HD |
262 | void *ds_pebs_vaddr; |
263 | void *ds_bts_vaddr; | |
de0428a7 | 264 | u64 pebs_enabled; |
09e61b4f PZ |
265 | int n_pebs; |
266 | int n_large_pebs; | |
42880f72 AS |
267 | int n_pebs_via_pt; |
268 | int pebs_output; | |
de0428a7 | 269 | |
c22497f5 KL |
270 | /* Current super set of events hardware configuration */ |
271 | u64 pebs_data_cfg; | |
272 | u64 active_pebs_data_cfg; | |
273 | int pebs_record_size; | |
274 | ||
fae9ebde KL |
275 | /* Intel Fixed counter configuration */ |
276 | u64 fixed_ctrl_val; | |
277 | u64 active_fixed_ctrl_val; | |
278 | ||
de0428a7 KW |
279 | /* |
280 | * Intel LBR bits | |
281 | */ | |
282 | int lbr_users; | |
d3617b98 | 283 | int lbr_pebs_users; |
de0428a7 KW |
284 | struct perf_branch_stack lbr_stack; |
285 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; | |
49d8184f KL |
286 | union { |
287 | struct er_account *lbr_sel; | |
288 | struct er_account *lbr_ctl; | |
289 | }; | |
3e702ff6 | 290 | u64 br_sel; |
f42be865 | 291 | void *last_task_ctx; |
8b077e4a | 292 | int last_log_id; |
e1ad1ac2 | 293 | int lbr_select; |
c085fb87 | 294 | void *lbr_xsave; |
de0428a7 | 295 | |
144d31e6 GN |
296 | /* |
297 | * Intel host/guest exclude bits | |
298 | */ | |
299 | u64 intel_ctrl_guest_mask; | |
300 | u64 intel_ctrl_host_mask; | |
301 | struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX]; | |
302 | ||
2b9e344d PZ |
303 | /* |
304 | * Intel checkpoint mask | |
305 | */ | |
306 | u64 intel_cp_status; | |
307 | ||
de0428a7 KW |
308 | /* |
309 | * manage shared (per-core, per-cpu) registers | |
310 | * used on Intel NHM/WSM/SNB | |
311 | */ | |
312 | struct intel_shared_regs *shared_regs; | |
6f6539ca MD |
313 | /* |
314 | * manage exclusive counter access between hyperthread | |
315 | */ | |
316 | struct event_constraint *constraint_list; /* in enable order */ | |
317 | struct intel_excl_cntrs *excl_cntrs; | |
318 | int excl_thread_id; /* 0 or 1 */ | |
de0428a7 | 319 | |
400816f6 PZI |
320 | /* |
321 | * SKL TSX_FORCE_ABORT shadow | |
322 | */ | |
323 | u64 tfa_shadow; | |
324 | ||
7b2c05a1 KL |
325 | /* |
326 | * Perf Metrics | |
327 | */ | |
328 | /* number of accepted metrics events */ | |
329 | int n_metric; | |
330 | ||
de0428a7 KW |
331 | /* |
332 | * AMD specific bits | |
333 | */ | |
1018faa6 | 334 | struct amd_nb *amd_nb; |
ada54345 SE |
335 | int brs_active; /* BRS is enabled */ |
336 | ||
1018faa6 JR |
337 | /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ |
338 | u64 perf_ctr_virt_mask; | |
57388912 | 339 | int n_pair; /* Large increment events */ |
de0428a7 | 340 | |
90413464 | 341 | void *kfree_on_online[X86_PERF_KFREE_MAX]; |
61e76d53 KL |
342 | |
343 | struct pmu *pmu; | |
de0428a7 KW |
344 | }; |
345 | ||
63b79f6e | 346 | #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \ |
de0428a7 KW |
347 | { .idxmsk64 = (n) }, \ |
348 | .code = (c), \ | |
63b79f6e | 349 | .size = (e) - (c), \ |
de0428a7 KW |
350 | .cmask = (m), \ |
351 | .weight = (w), \ | |
bc1738f6 | 352 | .overlap = (o), \ |
9fac2cf3 | 353 | .flags = f, \ |
de0428a7 KW |
354 | } |
355 | ||
63b79f6e PZ |
356 | #define __EVENT_CONSTRAINT(c, n, m, w, o, f) \ |
357 | __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f) | |
358 | ||
de0428a7 | 359 | #define EVENT_CONSTRAINT(c, n, m) \ |
9fac2cf3 | 360 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0) |
bc1738f6 | 361 | |
63b79f6e PZ |
362 | /* |
363 | * The constraint_match() function only works for 'simple' event codes | |
364 | * and not for extended (AMD64_EVENTSEL_EVENT) events codes. | |
365 | */ | |
366 | #define EVENT_CONSTRAINT_RANGE(c, e, n, m) \ | |
367 | __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0) | |
368 | ||
6f6539ca MD |
369 | #define INTEL_EXCLEVT_CONSTRAINT(c, n) \ |
370 | __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\ | |
371 | 0, PERF_X86_EVENT_EXCL) | |
372 | ||
bc1738f6 RR |
373 | /* |
374 | * The overlap flag marks event constraints with overlapping counter | |
375 | * masks. This is the case if the counter mask of such an event is not | |
376 | * a subset of any other counter mask of a constraint with an equal or | |
377 | * higher weight, e.g.: | |
378 | * | |
379 | * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); | |
380 | * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0); | |
381 | * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0); | |
382 | * | |
383 | * The event scheduler may not select the correct counter in the first | |
384 | * cycle because it needs to know which subsequent events will be | |
385 | * scheduled. It may fail to schedule the events then. So we set the | |
386 | * overlap flag for such constraints to give the scheduler a hint which | |
387 | * events to select for counter rescheduling. | |
388 | * | |
389 | * Care must be taken as the rescheduling algorithm is O(n!) which | |
6a6256f9 | 390 | * will increase scheduling cycles for an over-committed system |
bc1738f6 RR |
391 | * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros |
392 | * and its counter masks must be kept at a minimum. | |
393 | */ | |
394 | #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ | |
9fac2cf3 | 395 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0) |
de0428a7 KW |
396 | |
397 | /* | |
398 | * Constraint on the Event code. | |
399 | */ | |
400 | #define INTEL_EVENT_CONSTRAINT(c, n) \ | |
401 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) | |
402 | ||
63b79f6e PZ |
403 | /* |
404 | * Constraint on a range of Event codes | |
405 | */ | |
406 | #define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \ | |
407 | EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT) | |
408 | ||
de0428a7 KW |
409 | /* |
410 | * Constraint on the Event code + UMask + fixed-mask | |
411 | * | |
412 | * filter mask to validate fixed counter events. | |
413 | * the following filters disqualify for fixed counters: | |
414 | * - inv | |
415 | * - edge | |
416 | * - cnt-mask | |
3a632cb2 AK |
417 | * - in_tx |
418 | * - in_tx_checkpointed | |
de0428a7 KW |
419 | * The other filters are supported by fixed counters. |
420 | * The any-thread option is supported starting with v3. | |
421 | */ | |
3a632cb2 | 422 | #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED) |
de0428a7 | 423 | #define FIXED_EVENT_CONSTRAINT(c, n) \ |
3a632cb2 | 424 | EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS) |
de0428a7 | 425 | |
59a854e2 KL |
426 | /* |
427 | * The special metric counters do not actually exist. They are calculated from | |
428 | * the combination of the FxCtr3 + MSR_PERF_METRICS. | |
429 | * | |
430 | * The special metric counters are mapped to a dummy offset for the scheduler. | |
431 | * The sharing between multiple users of the same metric without multiplexing | |
432 | * is not allowed, even though the hardware supports that in principle. | |
433 | */ | |
434 | ||
435 | #define METRIC_EVENT_CONSTRAINT(c, n) \ | |
436 | EVENT_CONSTRAINT(c, (1ULL << (INTEL_PMC_IDX_METRIC_BASE + n)), \ | |
437 | INTEL_ARCH_EVENT_MASK) | |
438 | ||
de0428a7 KW |
439 | /* |
440 | * Constraint on the Event code + UMask | |
441 | */ | |
442 | #define INTEL_UEVENT_CONSTRAINT(c, n) \ | |
443 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) | |
444 | ||
b7883a1c AK |
445 | /* Constraint on specific umask bit only + event */ |
446 | #define INTEL_UBIT_EVENT_CONSTRAINT(c, n) \ | |
447 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c)) | |
448 | ||
7550ddff AK |
449 | /* Like UEVENT_CONSTRAINT, but match flags too */ |
450 | #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \ | |
451 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) | |
452 | ||
e979121b MD |
453 | #define INTEL_EXCLUEVT_CONSTRAINT(c, n) \ |
454 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ | |
455 | HWEIGHT(n), 0, PERF_X86_EVENT_EXCL) | |
456 | ||
f20093ee | 457 | #define INTEL_PLD_CONSTRAINT(c, n) \ |
86a04461 | 458 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
f20093ee SE |
459 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) |
460 | ||
61b985e3 KL |
461 | #define INTEL_PSD_CONSTRAINT(c, n) \ |
462 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
463 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_STLAT) | |
464 | ||
9ad64c0f | 465 | #define INTEL_PST_CONSTRAINT(c, n) \ |
86a04461 | 466 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
9ad64c0f SE |
467 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST) |
468 | ||
39a41278 KL |
469 | #define INTEL_HYBRID_LAT_CONSTRAINT(c, n) \ |
470 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
471 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LAT_HYBRID) | |
472 | ||
86a04461 AK |
473 | /* Event constraint, but match on all event flags too. */ |
474 | #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \ | |
6b89d4c1 | 475 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS) |
86a04461 | 476 | |
63b79f6e | 477 | #define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \ |
6b89d4c1 | 478 | EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS) |
63b79f6e | 479 | |
86a04461 AK |
480 | /* Check only flags, but allow all event/umask */ |
481 | #define INTEL_ALL_EVENT_CONSTRAINT(code, n) \ | |
482 | EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS) | |
483 | ||
484 | /* Check flags and event code, and set the HSW store flag */ | |
485 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \ | |
486 | __EVENT_CONSTRAINT(code, n, \ | |
487 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ | |
488 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) | |
489 | ||
490 | /* Check flags and event code, and set the HSW load flag */ | |
491 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \ | |
b63b4b45 | 492 | __EVENT_CONSTRAINT(code, n, \ |
86a04461 AK |
493 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ |
494 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) | |
495 | ||
63b79f6e PZ |
496 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \ |
497 | __EVENT_CONSTRAINT_RANGE(code, end, n, \ | |
498 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ | |
499 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) | |
500 | ||
b63b4b45 MD |
501 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \ |
502 | __EVENT_CONSTRAINT(code, n, \ | |
503 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ | |
504 | HWEIGHT(n), 0, \ | |
505 | PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) | |
506 | ||
86a04461 AK |
507 | /* Check flags and event code/umask, and set the HSW store flag */ |
508 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \ | |
509 | __EVENT_CONSTRAINT(code, n, \ | |
510 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
f9134f36 AK |
511 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) |
512 | ||
b63b4b45 MD |
513 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \ |
514 | __EVENT_CONSTRAINT(code, n, \ | |
515 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
516 | HWEIGHT(n), 0, \ | |
517 | PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL) | |
518 | ||
86a04461 AK |
519 | /* Check flags and event code/umask, and set the HSW load flag */ |
520 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \ | |
521 | __EVENT_CONSTRAINT(code, n, \ | |
522 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
523 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) | |
524 | ||
b63b4b45 MD |
525 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \ |
526 | __EVENT_CONSTRAINT(code, n, \ | |
527 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
528 | HWEIGHT(n), 0, \ | |
529 | PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) | |
530 | ||
86a04461 AK |
531 | /* Check flags and event code/umask, and set the HSW N/A flag */ |
532 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ | |
533 | __EVENT_CONSTRAINT(code, n, \ | |
169b932a | 534 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
86a04461 AK |
535 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) |
536 | ||
537 | ||
cf30d52e MD |
538 | /* |
539 | * We define the end marker as having a weight of -1 | |
540 | * to enable blacklisting of events using a counter bitmask | |
541 | * of zero and thus a weight of zero. | |
542 | * The end marker has a weight that cannot possibly be | |
543 | * obtained from counting the bits in the bitmask. | |
544 | */ | |
545 | #define EVENT_CONSTRAINT_END { .weight = -1 } | |
de0428a7 | 546 | |
cf30d52e MD |
547 | /* |
548 | * Check for end marker with weight == -1 | |
549 | */ | |
de0428a7 | 550 | #define for_each_event_constraint(e, c) \ |
cf30d52e | 551 | for ((e) = (c); (e)->weight != -1; (e)++) |
de0428a7 KW |
552 | |
553 | /* | |
554 | * Extra registers for specific events. | |
555 | * | |
556 | * Some events need large masks and require external MSRs. | |
557 | * Those extra MSRs end up being shared for all events on | |
558 | * a PMU and sometimes between PMU of sibling HT threads. | |
559 | * In either case, the kernel needs to handle conflicting | |
560 | * accesses to those extra, shared, regs. The data structure | |
561 | * to manage those registers is stored in cpu_hw_event. | |
562 | */ | |
563 | struct extra_reg { | |
564 | unsigned int event; | |
565 | unsigned int msr; | |
566 | u64 config_mask; | |
567 | u64 valid_mask; | |
568 | int idx; /* per_xxx->regs[] reg index */ | |
338b522c | 569 | bool extra_msr_access; |
de0428a7 KW |
570 | }; |
571 | ||
572 | #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ | |
338b522c KL |
573 | .event = (e), \ |
574 | .msr = (ms), \ | |
575 | .config_mask = (m), \ | |
576 | .valid_mask = (vm), \ | |
577 | .idx = EXTRA_REG_##i, \ | |
578 | .extra_msr_access = true, \ | |
de0428a7 KW |
579 | } |
580 | ||
581 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ | |
582 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) | |
583 | ||
f20093ee SE |
584 | #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \ |
585 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \ | |
586 | ARCH_PERFMON_EVENTSEL_UMASK, vm, idx) | |
587 | ||
588 | #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \ | |
589 | INTEL_UEVENT_EXTRA_REG(c, \ | |
590 | MSR_PEBS_LD_LAT_THRESHOLD, \ | |
591 | 0xffff, \ | |
592 | LDLAT) | |
593 | ||
de0428a7 KW |
594 | #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) |
595 | ||
596 | union perf_capabilities { | |
597 | struct { | |
598 | u64 lbr_format:6; | |
599 | u64 pebs_trap:1; | |
600 | u64 pebs_arch_reg:1; | |
601 | u64 pebs_format:4; | |
602 | u64 smm_freeze:1; | |
069e0c3c AK |
603 | /* |
604 | * PMU supports separate counter range for writing | |
605 | * values > 32bit. | |
606 | */ | |
607 | u64 full_width_write:1; | |
c22497f5 | 608 | u64 pebs_baseline:1; |
bbdbde2a | 609 | u64 perf_metrics:1; |
42880f72 | 610 | u64 pebs_output_pt_available:1; |
c87a3109 | 611 | u64 pebs_timing_info:1; |
cadbaa03 | 612 | u64 anythread_deprecated:1; |
de0428a7 KW |
613 | }; |
614 | u64 capabilities; | |
615 | }; | |
616 | ||
c1d6f42f PZ |
617 | struct x86_pmu_quirk { |
618 | struct x86_pmu_quirk *next; | |
619 | void (*func)(void); | |
620 | }; | |
621 | ||
f9b4eeb8 PZ |
622 | union x86_pmu_config { |
623 | struct { | |
624 | u64 event:8, | |
625 | umask:8, | |
626 | usr:1, | |
627 | os:1, | |
628 | edge:1, | |
629 | pc:1, | |
630 | interrupt:1, | |
631 | __reserved1:1, | |
632 | en:1, | |
633 | inv:1, | |
634 | cmask:8, | |
635 | event2:4, | |
636 | __reserved2:4, | |
637 | go:1, | |
638 | ho:1; | |
639 | } bits; | |
640 | u64 value; | |
641 | }; | |
642 | ||
643 | #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value | |
644 | ||
48070342 AS |
645 | enum { |
646 | x86_lbr_exclusive_lbr, | |
8062382c | 647 | x86_lbr_exclusive_bts, |
48070342 AS |
648 | x86_lbr_exclusive_pt, |
649 | x86_lbr_exclusive_max, | |
650 | }; | |
651 | ||
ccf170e9 | 652 | #define PERF_PEBS_DATA_SOURCE_MAX 0x10 |
38aaf921 | 653 | #define PERF_PEBS_DATA_SOURCE_MASK (PERF_PEBS_DATA_SOURCE_MAX - 1) |
ccf170e9 | 654 | |
d0946a88 KL |
655 | struct x86_hybrid_pmu { |
656 | struct pmu pmu; | |
d9977c43 KL |
657 | const char *name; |
658 | u8 cpu_type; | |
659 | cpumask_t supported_cpus; | |
d0946a88 | 660 | union perf_capabilities intel_cap; |
fc4b8fca | 661 | u64 intel_ctrl; |
d4b294bf KL |
662 | int max_pebs_events; |
663 | int num_counters; | |
664 | int num_counters_fixed; | |
eaacf07d | 665 | struct event_constraint unconstrained; |
0d18f2df KL |
666 | |
667 | u64 hw_cache_event_ids | |
668 | [PERF_COUNT_HW_CACHE_MAX] | |
669 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
670 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | |
671 | u64 hw_cache_extra_regs | |
672 | [PERF_COUNT_HW_CACHE_MAX] | |
673 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
674 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | |
24ee38ff KL |
675 | struct event_constraint *event_constraints; |
676 | struct event_constraint *pebs_constraints; | |
183af736 | 677 | struct extra_reg *extra_regs; |
acade637 KL |
678 | |
679 | unsigned int late_ack :1, | |
680 | mid_ack :1, | |
681 | enabled_ack :1; | |
ccf170e9 KL |
682 | |
683 | u64 pebs_data_source[PERF_PEBS_DATA_SOURCE_MAX]; | |
d0946a88 KL |
684 | }; |
685 | ||
686 | static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu) | |
687 | { | |
688 | return container_of(pmu, struct x86_hybrid_pmu, pmu); | |
689 | } | |
690 | ||
691 | extern struct static_key_false perf_is_hybrid; | |
692 | #define is_hybrid() static_branch_unlikely(&perf_is_hybrid) | |
693 | ||
694 | #define hybrid(_pmu, _field) \ | |
695 | (*({ \ | |
696 | typeof(&x86_pmu._field) __Fp = &x86_pmu._field; \ | |
697 | \ | |
698 | if (is_hybrid() && (_pmu)) \ | |
699 | __Fp = &hybrid_pmu(_pmu)->_field; \ | |
700 | \ | |
701 | __Fp; \ | |
702 | })) | |
703 | ||
eaacf07d KL |
704 | #define hybrid_var(_pmu, _var) \ |
705 | (*({ \ | |
706 | typeof(&_var) __Fp = &_var; \ | |
707 | \ | |
708 | if (is_hybrid() && (_pmu)) \ | |
709 | __Fp = &hybrid_pmu(_pmu)->_var; \ | |
710 | \ | |
711 | __Fp; \ | |
712 | })) | |
713 | ||
acade637 KL |
714 | #define hybrid_bit(_pmu, _field) \ |
715 | ({ \ | |
716 | bool __Fp = x86_pmu._field; \ | |
717 | \ | |
718 | if (is_hybrid() && (_pmu)) \ | |
719 | __Fp = hybrid_pmu(_pmu)->_field; \ | |
720 | \ | |
721 | __Fp; \ | |
722 | }) | |
723 | ||
d9977c43 KL |
724 | enum hybrid_pmu_type { |
725 | hybrid_big = 0x40, | |
726 | hybrid_small = 0x20, | |
727 | ||
728 | hybrid_big_small = hybrid_big | hybrid_small, | |
729 | }; | |
730 | ||
f83d2f91 KL |
731 | #define X86_HYBRID_PMU_ATOM_IDX 0 |
732 | #define X86_HYBRID_PMU_CORE_IDX 1 | |
733 | ||
734 | #define X86_HYBRID_NUM_PMUS 2 | |
735 | ||
de0428a7 KW |
736 | /* |
737 | * struct x86_pmu - generic x86 pmu | |
738 | */ | |
739 | struct x86_pmu { | |
740 | /* | |
741 | * Generic x86 PMC bits | |
742 | */ | |
743 | const char *name; | |
744 | int version; | |
745 | int (*handle_irq)(struct pt_regs *); | |
746 | void (*disable_all)(void); | |
747 | void (*enable_all)(int added); | |
748 | void (*enable)(struct perf_event *); | |
749 | void (*disable)(struct perf_event *); | |
8b8ff8cc | 750 | void (*assign)(struct perf_event *event, int idx); |
68f7082f PZ |
751 | void (*add)(struct perf_event *); |
752 | void (*del)(struct perf_event *); | |
bcfbe5c4 | 753 | void (*read)(struct perf_event *event); |
73759c34 PZ |
754 | int (*set_period)(struct perf_event *event); |
755 | u64 (*update)(struct perf_event *event); | |
de0428a7 KW |
756 | int (*hw_config)(struct perf_event *event); |
757 | int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); | |
758 | unsigned eventsel; | |
759 | unsigned perfctr; | |
4c1fd17a | 760 | int (*addr_offset)(int index, bool eventsel); |
0fbdad07 | 761 | int (*rdpmc_index)(int index); |
de0428a7 KW |
762 | u64 (*event_map)(int); |
763 | int max_events; | |
764 | int num_counters; | |
765 | int num_counters_fixed; | |
766 | int cntval_bits; | |
767 | u64 cntval_mask; | |
ffb871bc GN |
768 | union { |
769 | unsigned long events_maskl; | |
770 | unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)]; | |
771 | }; | |
772 | int events_mask_len; | |
de0428a7 KW |
773 | int apic; |
774 | u64 max_period; | |
775 | struct event_constraint * | |
776 | (*get_event_constraints)(struct cpu_hw_events *cpuc, | |
79cba822 | 777 | int idx, |
de0428a7 KW |
778 | struct perf_event *event); |
779 | ||
780 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, | |
781 | struct perf_event *event); | |
c5362c0c | 782 | |
c5362c0c MD |
783 | void (*start_scheduling)(struct cpu_hw_events *cpuc); |
784 | ||
0c41e756 PZ |
785 | void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr); |
786 | ||
c5362c0c MD |
787 | void (*stop_scheduling)(struct cpu_hw_events *cpuc); |
788 | ||
de0428a7 | 789 | struct event_constraint *event_constraints; |
c1d6f42f | 790 | struct x86_pmu_quirk *quirks; |
28f0f3c4 | 791 | void (*limit_period)(struct perf_event *event, s64 *l); |
de0428a7 | 792 | |
af3bdb99 AK |
793 | /* PMI handler bits */ |
794 | unsigned int late_ack :1, | |
acade637 | 795 | mid_ack :1, |
3daa96d6 | 796 | enabled_ack :1; |
0c9d42ed PZ |
797 | /* |
798 | * sysfs attrs | |
799 | */ | |
e97df763 | 800 | int attr_rdpmc_broken; |
0c9d42ed | 801 | int attr_rdpmc; |
641cc938 | 802 | struct attribute **format_attrs; |
0c9d42ed | 803 | |
a4747393 | 804 | ssize_t (*events_sysfs_show)(char *page, u64 config); |
baa0c833 | 805 | const struct attribute_group **attr_update; |
a4747393 | 806 | |
6089327f | 807 | unsigned long attr_freeze_on_smi; |
6089327f | 808 | |
0c9d42ed PZ |
809 | /* |
810 | * CPU Hotplug hooks | |
811 | */ | |
de0428a7 KW |
812 | int (*cpu_prepare)(int cpu); |
813 | void (*cpu_starting)(int cpu); | |
814 | void (*cpu_dying)(int cpu); | |
815 | void (*cpu_dead)(int cpu); | |
c93dc84c PZ |
816 | |
817 | void (*check_microcode)(void); | |
bd275681 | 818 | void (*sched_task)(struct perf_event_pmu_context *pmu_ctx, |
ba532500 | 819 | bool sched_in); |
de0428a7 KW |
820 | |
821 | /* | |
822 | * Intel Arch Perfmon v2+ | |
823 | */ | |
824 | u64 intel_ctrl; | |
825 | union perf_capabilities intel_cap; | |
826 | ||
827 | /* | |
828 | * Intel DebugStore bits | |
829 | */ | |
9b545c04 AK |
830 | unsigned int bts :1, |
831 | bts_active :1, | |
832 | pebs :1, | |
833 | pebs_active :1, | |
834 | pebs_broken :1, | |
835 | pebs_prec_dist :1, | |
836 | pebs_no_tlb :1, | |
61b985e3 | 837 | pebs_no_isolation :1, |
fb358e0b LX |
838 | pebs_block :1, |
839 | pebs_ept :1; | |
de0428a7 | 840 | int pebs_record_size; |
e72daf3f | 841 | int pebs_buffer_size; |
c22497f5 | 842 | int max_pebs_events; |
9dfa9a5c | 843 | void (*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data); |
de0428a7 | 844 | struct event_constraint *pebs_constraints; |
0780c927 | 845 | void (*pebs_aliases)(struct perf_event *event); |
39a41278 | 846 | u64 (*pebs_latency_data)(struct perf_event *event, u64 status); |
174afc3e | 847 | unsigned long large_pebs_flags; |
c22497f5 | 848 | u64 rtm_abort_event; |
0d23dc34 | 849 | u64 pebs_capable; |
de0428a7 KW |
850 | |
851 | /* | |
852 | * Intel LBR | |
853 | */ | |
3cb9d546 | 854 | unsigned int lbr_tos, lbr_from, lbr_to, |
fda1f99f | 855 | lbr_info, lbr_nr; /* LBR base regs and size */ |
49d8184f KL |
856 | union { |
857 | u64 lbr_sel_mask; /* LBR_SELECT valid bits */ | |
858 | u64 lbr_ctl_mask; /* LBR_CTL valid bits */ | |
859 | }; | |
860 | union { | |
861 | const int *lbr_sel_map; /* lbr_select mappings */ | |
862 | int *lbr_ctl_map; /* LBR_CTL mappings */ | |
863 | }; | |
b7af41a1 | 864 | bool lbr_double_abort; /* duplicated lbr aborts */ |
b0c1ef52 | 865 | bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */ |
de0428a7 | 866 | |
1ac7fd81 PZI |
867 | unsigned int lbr_has_info:1; |
868 | unsigned int lbr_has_tsx:1; | |
869 | unsigned int lbr_from_flags:1; | |
870 | unsigned int lbr_to_cycles:1; | |
871 | ||
af6cf129 KL |
872 | /* |
873 | * Intel Architectural LBR CPUID Enumeration | |
874 | */ | |
875 | unsigned int lbr_depth_mask:8; | |
876 | unsigned int lbr_deep_c_reset:1; | |
877 | unsigned int lbr_lip:1; | |
878 | unsigned int lbr_cpl:1; | |
879 | unsigned int lbr_filter:1; | |
880 | unsigned int lbr_call_stack:1; | |
881 | unsigned int lbr_mispred:1; | |
882 | unsigned int lbr_timed_lbr:1; | |
883 | unsigned int lbr_br_type:1; | |
884 | ||
9f354a72 | 885 | void (*lbr_reset)(void); |
c301b1d8 | 886 | void (*lbr_read)(struct cpu_hw_events *cpuc); |
799571bf KL |
887 | void (*lbr_save)(void *ctx); |
888 | void (*lbr_restore)(void *ctx); | |
9f354a72 | 889 | |
48070342 AS |
890 | /* |
891 | * Intel PT/LBR/BTS are exclusive | |
892 | */ | |
893 | atomic_t lbr_exclusive[x86_lbr_exclusive_max]; | |
894 | ||
7b2c05a1 KL |
895 | /* |
896 | * Intel perf metrics | |
897 | */ | |
1ab5f235 | 898 | int num_topdown_events; |
7b2c05a1 | 899 | |
fc1adfe3 | 900 | /* |
bd275681 | 901 | * perf task context (i.e. struct perf_event_pmu_context::task_ctx_data) |
fc1adfe3 AB |
902 | * switch helper to bridge calls from perf/core to perf/x86. |
903 | * See struct pmu::swap_task_ctx() usage for examples; | |
904 | */ | |
bd275681 PZ |
905 | void (*swap_task_ctx)(struct perf_event_pmu_context *prev_epc, |
906 | struct perf_event_pmu_context *next_epc); | |
fc1adfe3 | 907 | |
32b62f44 PZ |
908 | /* |
909 | * AMD bits | |
910 | */ | |
911 | unsigned int amd_nb_constraints : 1; | |
57388912 | 912 | u64 perf_ctr_pair_en; |
32b62f44 | 913 | |
de0428a7 KW |
914 | /* |
915 | * Extra registers for events | |
916 | */ | |
917 | struct extra_reg *extra_regs; | |
9a5e3fb5 | 918 | unsigned int flags; |
144d31e6 GN |
919 | |
920 | /* | |
921 | * Intel host/guest support (KVM) | |
922 | */ | |
39a4d779 | 923 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr, void *data); |
81ec3f3c JO |
924 | |
925 | /* | |
926 | * Check period value for PERF_EVENT_IOC_PERIOD ioctl. | |
927 | */ | |
928 | int (*check_period) (struct perf_event *event, u64 period); | |
42880f72 AS |
929 | |
930 | int (*aux_output_match) (struct perf_event *event); | |
d0946a88 | 931 | |
bd275681 | 932 | void (*filter)(struct pmu *pmu, int cpu, bool *ret); |
d0946a88 KL |
933 | /* |
934 | * Hybrid support | |
935 | * | |
936 | * Most PMU capabilities are the same among different hybrid PMUs. | |
937 | * The global x86_pmu saves the architecture capabilities, which | |
938 | * are available for all PMUs. The hybrid_pmu only includes the | |
939 | * unique capabilities. | |
940 | */ | |
d4b294bf | 941 | int num_hybrid_pmus; |
d0946a88 | 942 | struct x86_hybrid_pmu *hybrid_pmu; |
d9977c43 | 943 | u8 (*get_hybrid_cpu_type) (void); |
de0428a7 KW |
944 | }; |
945 | ||
530bfff6 KL |
946 | struct x86_perf_task_context_opt { |
947 | int lbr_callstack_users; | |
948 | int lbr_stack_state; | |
949 | int log_id; | |
950 | }; | |
951 | ||
e18bf526 | 952 | struct x86_perf_task_context { |
e1ad1ac2 | 953 | u64 lbr_sel; |
b28ae956 | 954 | int tos; |
0592e57b | 955 | int valid_lbrs; |
530bfff6 | 956 | struct x86_perf_task_context_opt opt; |
5624986d | 957 | struct lbr_entry lbr[MAX_LBR_ENTRIES]; |
e18bf526 YZ |
958 | }; |
959 | ||
47125db2 KL |
960 | struct x86_perf_task_context_arch_lbr { |
961 | struct x86_perf_task_context_opt opt; | |
962 | struct lbr_entry entries[]; | |
963 | }; | |
964 | ||
ce711ea3 KL |
965 | /* |
966 | * Add padding to guarantee the 64-byte alignment of the state buffer. | |
967 | * | |
968 | * The structure is dynamically allocated. The size of the LBR state may vary | |
969 | * based on the number of LBR registers. | |
970 | * | |
971 | * Do not put anything after the LBR state. | |
972 | */ | |
973 | struct x86_perf_task_context_arch_lbr_xsave { | |
974 | struct x86_perf_task_context_opt opt; | |
975 | ||
976 | union { | |
977 | struct xregs_state xsave; | |
978 | struct { | |
979 | struct fxregs_state i387; | |
980 | struct xstate_header header; | |
981 | struct arch_lbr_state lbr; | |
982 | } __attribute__ ((packed, aligned (XSAVE_ALIGNMENT))); | |
983 | }; | |
984 | }; | |
985 | ||
c1d6f42f PZ |
986 | #define x86_add_quirk(func_) \ |
987 | do { \ | |
988 | static struct x86_pmu_quirk __quirk __initdata = { \ | |
989 | .func = func_, \ | |
990 | }; \ | |
991 | __quirk.next = x86_pmu.quirks; \ | |
992 | x86_pmu.quirks = &__quirk; \ | |
993 | } while (0) | |
994 | ||
9a5e3fb5 SE |
995 | /* |
996 | * x86_pmu flags | |
997 | */ | |
998 | #define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */ | |
999 | #define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */ | |
6f6539ca | 1000 | #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */ |
b37609c3 | 1001 | #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */ |
31962340 | 1002 | #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */ |
400816f6 | 1003 | #define PMU_FL_TFA 0x20 /* deal with TSX force abort */ |
471af006 | 1004 | #define PMU_FL_PAIR 0x40 /* merge counters for large incr. events */ |
61b985e3 KL |
1005 | #define PMU_FL_INSTR_LATENCY 0x80 /* Support Instruction Latency in PEBS Memory Info Record */ |
1006 | #define PMU_FL_MEM_LOADS_AUX 0x100 /* Require an auxiliary event for the complete memory info */ | |
c87a3109 | 1007 | #define PMU_FL_RETIRE_LATENCY 0x200 /* Support Retire Latency in PEBS */ |
de0428a7 | 1008 | |
3a54aaa0 SE |
1009 | #define EVENT_VAR(_id) event_attr_##_id |
1010 | #define EVENT_PTR(_id) &event_attr_##_id.attr.attr | |
1011 | ||
1012 | #define EVENT_ATTR(_name, _id) \ | |
1013 | static struct perf_pmu_events_attr EVENT_VAR(_id) = { \ | |
1014 | .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ | |
1015 | .id = PERF_COUNT_HW_##_id, \ | |
1016 | .event_str = NULL, \ | |
1017 | }; | |
1018 | ||
1019 | #define EVENT_ATTR_STR(_name, v, str) \ | |
1020 | static struct perf_pmu_events_attr event_attr_##v = { \ | |
1021 | .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ | |
1022 | .id = 0, \ | |
1023 | .event_str = str, \ | |
1024 | }; | |
1025 | ||
fc07e9f9 AK |
1026 | #define EVENT_ATTR_STR_HT(_name, v, noht, ht) \ |
1027 | static struct perf_pmu_events_ht_attr event_attr_##v = { \ | |
1028 | .attr = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\ | |
1029 | .id = 0, \ | |
1030 | .event_str_noht = noht, \ | |
1031 | .event_str_ht = ht, \ | |
1032 | } | |
1033 | ||
a9c81ccd KL |
1034 | #define EVENT_ATTR_STR_HYBRID(_name, v, str, _pmu) \ |
1035 | static struct perf_pmu_events_hybrid_attr event_attr_##v = { \ | |
1036 | .attr = __ATTR(_name, 0444, events_hybrid_sysfs_show, NULL),\ | |
1037 | .id = 0, \ | |
1038 | .event_str = str, \ | |
1039 | .pmu_type = _pmu, \ | |
1040 | } | |
1041 | ||
1042 | #define FORMAT_HYBRID_PTR(_id) (&format_attr_hybrid_##_id.attr.attr) | |
1043 | ||
1044 | #define FORMAT_ATTR_HYBRID(_name, _pmu) \ | |
1045 | static struct perf_pmu_format_hybrid_attr format_attr_hybrid_##_name = {\ | |
1046 | .attr = __ATTR_RO(_name), \ | |
1047 | .pmu_type = _pmu, \ | |
1048 | } | |
1049 | ||
61e76d53 | 1050 | struct pmu *x86_get_pmu(unsigned int cpu); |
de0428a7 KW |
1051 | extern struct x86_pmu x86_pmu __read_mostly; |
1052 | ||
73759c34 PZ |
1053 | DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period); |
1054 | DECLARE_STATIC_CALL(x86_pmu_update, *x86_pmu.update); | |
1055 | ||
f42be865 KL |
1056 | static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx) |
1057 | { | |
47125db2 KL |
1058 | if (static_cpu_has(X86_FEATURE_ARCH_LBR)) |
1059 | return &((struct x86_perf_task_context_arch_lbr *)ctx)->opt; | |
1060 | ||
f42be865 KL |
1061 | return &((struct x86_perf_task_context *)ctx)->opt; |
1062 | } | |
1063 | ||
e9d7f7cd YZ |
1064 | static inline bool x86_pmu_has_lbr_callstack(void) |
1065 | { | |
1066 | return x86_pmu.lbr_sel_map && | |
1067 | x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0; | |
1068 | } | |
1069 | ||
de0428a7 | 1070 | DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events); |
dbf4e792 | 1071 | DECLARE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); |
de0428a7 KW |
1072 | |
1073 | int x86_perf_event_set_period(struct perf_event *event); | |
1074 | ||
1075 | /* | |
1076 | * Generalized hw caching related hw_event table, filled | |
1077 | * in on a per model basis. A value of 0 means | |
1078 | * 'not supported', -1 means 'hw_event makes no sense on | |
1079 | * this CPU', any other value means the raw hw_event | |
1080 | * ID. | |
1081 | */ | |
1082 | ||
1083 | #define C(x) PERF_COUNT_HW_CACHE_##x | |
1084 | ||
1085 | extern u64 __read_mostly hw_cache_event_ids | |
1086 | [PERF_COUNT_HW_CACHE_MAX] | |
1087 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
1088 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | |
1089 | extern u64 __read_mostly hw_cache_extra_regs | |
1090 | [PERF_COUNT_HW_CACHE_MAX] | |
1091 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
1092 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | |
1093 | ||
1094 | u64 x86_perf_event_update(struct perf_event *event); | |
1095 | ||
de0428a7 KW |
1096 | static inline unsigned int x86_pmu_config_addr(int index) |
1097 | { | |
4c1fd17a JS |
1098 | return x86_pmu.eventsel + (x86_pmu.addr_offset ? |
1099 | x86_pmu.addr_offset(index, true) : index); | |
de0428a7 KW |
1100 | } |
1101 | ||
1102 | static inline unsigned int x86_pmu_event_addr(int index) | |
1103 | { | |
4c1fd17a JS |
1104 | return x86_pmu.perfctr + (x86_pmu.addr_offset ? |
1105 | x86_pmu.addr_offset(index, false) : index); | |
de0428a7 KW |
1106 | } |
1107 | ||
0fbdad07 JS |
1108 | static inline int x86_pmu_rdpmc_index(int index) |
1109 | { | |
1110 | return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; | |
1111 | } | |
1112 | ||
fc4b8fca KL |
1113 | bool check_hw_exists(struct pmu *pmu, int num_counters, |
1114 | int num_counters_fixed); | |
1115 | ||
48070342 AS |
1116 | int x86_add_exclusive(unsigned int what); |
1117 | ||
1118 | void x86_del_exclusive(unsigned int what); | |
1119 | ||
6b099d9b AS |
1120 | int x86_reserve_hardware(void); |
1121 | ||
1122 | void x86_release_hardware(void); | |
1123 | ||
b00233b5 AK |
1124 | int x86_pmu_max_precise(void); |
1125 | ||
48070342 AS |
1126 | void hw_perf_lbr_event_destroy(struct perf_event *event); |
1127 | ||
de0428a7 KW |
1128 | int x86_setup_perfctr(struct perf_event *event); |
1129 | ||
1130 | int x86_pmu_hw_config(struct perf_event *event); | |
1131 | ||
1132 | void x86_pmu_disable_all(void); | |
1133 | ||
ada54345 SE |
1134 | static inline bool has_amd_brs(struct hw_perf_event *hwc) |
1135 | { | |
1136 | return hwc->flags & PERF_X86_EVENT_AMD_BRS; | |
1137 | } | |
1138 | ||
57388912 KP |
1139 | static inline bool is_counter_pair(struct hw_perf_event *hwc) |
1140 | { | |
1141 | return hwc->flags & PERF_X86_EVENT_PAIR; | |
1142 | } | |
1143 | ||
de0428a7 KW |
1144 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, |
1145 | u64 enable_mask) | |
1146 | { | |
1018faa6 JR |
1147 | u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); |
1148 | ||
de0428a7 KW |
1149 | if (hwc->extra_reg.reg) |
1150 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); | |
57388912 KP |
1151 | |
1152 | /* | |
1153 | * Add enabled Merge event on next counter | |
1154 | * if large increment event being enabled on this counter | |
1155 | */ | |
1156 | if (is_counter_pair(hwc)) | |
1157 | wrmsrl(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en); | |
1158 | ||
1018faa6 | 1159 | wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); |
de0428a7 KW |
1160 | } |
1161 | ||
1162 | void x86_pmu_enable_all(int added); | |
1163 | ||
b371b594 | 1164 | int perf_assign_events(struct event_constraint **constraints, int n, |
cc1790cf | 1165 | int wmin, int wmax, int gpmax, int *assign); |
de0428a7 KW |
1166 | int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); |
1167 | ||
1168 | void x86_pmu_stop(struct perf_event *event, int flags); | |
1169 | ||
1170 | static inline void x86_pmu_disable_event(struct perf_event *event) | |
1171 | { | |
df51fe7e | 1172 | u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); |
de0428a7 KW |
1173 | struct hw_perf_event *hwc = &event->hw; |
1174 | ||
df51fe7e | 1175 | wrmsrl(hwc->config_base, hwc->config & ~disable_mask); |
57388912 KP |
1176 | |
1177 | if (is_counter_pair(hwc)) | |
1178 | wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0); | |
de0428a7 KW |
1179 | } |
1180 | ||
1181 | void x86_pmu_enable_event(struct perf_event *event); | |
1182 | ||
1183 | int x86_pmu_handle_irq(struct pt_regs *regs); | |
1184 | ||
e11c1a7e KL |
1185 | void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed, |
1186 | u64 intel_ctrl); | |
1187 | ||
de0428a7 KW |
1188 | extern struct event_constraint emptyconstraint; |
1189 | ||
1190 | extern struct event_constraint unconstrained; | |
1191 | ||
3e702ff6 SE |
1192 | static inline bool kernel_ip(unsigned long ip) |
1193 | { | |
1194 | #ifdef CONFIG_X86_32 | |
1195 | return ip > PAGE_OFFSET; | |
1196 | #else | |
1197 | return (long)ip < 0; | |
1198 | #endif | |
1199 | } | |
1200 | ||
d07bdfd3 PZ |
1201 | /* |
1202 | * Not all PMUs provide the right context information to place the reported IP | |
1203 | * into full context. Specifically segment registers are typically not | |
1204 | * supplied. | |
1205 | * | |
1206 | * Assuming the address is a linear address (it is for IBS), we fake the CS and | |
1207 | * vm86 mode using the known zero-based code segment and 'fix up' the registers | |
1208 | * to reflect this. | |
1209 | * | |
1210 | * Intel PEBS/LBR appear to typically provide the effective address, nothing | |
1211 | * much we can do about that but pray and treat it like a linear address. | |
1212 | */ | |
1213 | static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip) | |
1214 | { | |
1215 | regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; | |
1216 | if (regs->flags & X86_VM_MASK) | |
1217 | regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); | |
1218 | regs->ip = ip; | |
1219 | } | |
1220 | ||
4462fbfe SD |
1221 | /* |
1222 | * x86control flow change classification | |
1223 | * x86control flow changes include branches, interrupts, traps, faults | |
1224 | */ | |
1225 | enum { | |
1226 | X86_BR_NONE = 0, /* unknown */ | |
1227 | ||
1228 | X86_BR_USER = 1 << 0, /* branch target is user */ | |
1229 | X86_BR_KERNEL = 1 << 1, /* branch target is kernel */ | |
1230 | ||
1231 | X86_BR_CALL = 1 << 2, /* call */ | |
1232 | X86_BR_RET = 1 << 3, /* return */ | |
1233 | X86_BR_SYSCALL = 1 << 4, /* syscall */ | |
1234 | X86_BR_SYSRET = 1 << 5, /* syscall return */ | |
1235 | X86_BR_INT = 1 << 6, /* sw interrupt */ | |
1236 | X86_BR_IRET = 1 << 7, /* return from interrupt */ | |
1237 | X86_BR_JCC = 1 << 8, /* conditional */ | |
1238 | X86_BR_JMP = 1 << 9, /* jump */ | |
1239 | X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */ | |
1240 | X86_BR_IND_CALL = 1 << 11,/* indirect calls */ | |
1241 | X86_BR_ABORT = 1 << 12,/* transaction abort */ | |
1242 | X86_BR_IN_TX = 1 << 13,/* in transaction */ | |
1243 | X86_BR_NO_TX = 1 << 14,/* not in transaction */ | |
1244 | X86_BR_ZERO_CALL = 1 << 15,/* zero length call */ | |
1245 | X86_BR_CALL_STACK = 1 << 16,/* call stack */ | |
1246 | X86_BR_IND_JMP = 1 << 17,/* indirect jump */ | |
1247 | ||
1248 | X86_BR_TYPE_SAVE = 1 << 18,/* indicate to save branch type */ | |
1249 | ||
1250 | }; | |
1251 | ||
1252 | #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL) | |
1253 | #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX) | |
1254 | ||
1255 | #define X86_BR_ANY \ | |
1256 | (X86_BR_CALL |\ | |
1257 | X86_BR_RET |\ | |
1258 | X86_BR_SYSCALL |\ | |
1259 | X86_BR_SYSRET |\ | |
1260 | X86_BR_INT |\ | |
1261 | X86_BR_IRET |\ | |
1262 | X86_BR_JCC |\ | |
1263 | X86_BR_JMP |\ | |
1264 | X86_BR_IRQ |\ | |
1265 | X86_BR_ABORT |\ | |
1266 | X86_BR_IND_CALL |\ | |
1267 | X86_BR_IND_JMP |\ | |
1268 | X86_BR_ZERO_CALL) | |
1269 | ||
1270 | #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY) | |
1271 | ||
1272 | #define X86_BR_ANY_CALL \ | |
1273 | (X86_BR_CALL |\ | |
1274 | X86_BR_IND_CALL |\ | |
1275 | X86_BR_ZERO_CALL |\ | |
1276 | X86_BR_SYSCALL |\ | |
1277 | X86_BR_IRQ |\ | |
1278 | X86_BR_INT) | |
1279 | ||
1280 | int common_branch_type(int type); | |
1281 | int branch_type(unsigned long from, unsigned long to, int abort); | |
df3e9612 SD |
1282 | int branch_type_fused(unsigned long from, unsigned long to, int abort, |
1283 | int *offset); | |
4462fbfe | 1284 | |
0bf79d44 | 1285 | ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event); |
20550a43 | 1286 | ssize_t intel_event_sysfs_show(char *page, u64 config); |
43c032fe | 1287 | |
a49ac9f8 HR |
1288 | ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, |
1289 | char *page); | |
fc07e9f9 AK |
1290 | ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr, |
1291 | char *page); | |
a9c81ccd KL |
1292 | ssize_t events_hybrid_sysfs_show(struct device *dev, |
1293 | struct device_attribute *attr, | |
1294 | char *page); | |
a49ac9f8 | 1295 | |
fc4b8fca | 1296 | static inline bool fixed_counter_disabled(int i, struct pmu *pmu) |
32451614 | 1297 | { |
fc4b8fca KL |
1298 | u64 intel_ctrl = hybrid(pmu, intel_ctrl); |
1299 | ||
1300 | return !(intel_ctrl >> (i + INTEL_PMC_IDX_FIXED)); | |
32451614 KL |
1301 | } |
1302 | ||
de0428a7 KW |
1303 | #ifdef CONFIG_CPU_SUP_AMD |
1304 | ||
1305 | int amd_pmu_init(void); | |
cc37e520 | 1306 | |
703fb765 | 1307 | int amd_pmu_lbr_init(void); |
ca5b7c0d SD |
1308 | void amd_pmu_lbr_reset(void); |
1309 | void amd_pmu_lbr_read(void); | |
1310 | void amd_pmu_lbr_add(struct perf_event *event); | |
1311 | void amd_pmu_lbr_del(struct perf_event *event); | |
bd275681 | 1312 | void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in); |
ca5b7c0d SD |
1313 | void amd_pmu_lbr_enable_all(void); |
1314 | void amd_pmu_lbr_disable_all(void); | |
1315 | int amd_pmu_lbr_hw_config(struct perf_event *event); | |
703fb765 | 1316 | |
cc37e520 | 1317 | #ifdef CONFIG_PERF_EVENTS_AMD_BRS |
b40d0156 SD |
1318 | |
1319 | #define AMD_FAM19H_BRS_EVENT 0xc4 /* RETIRED_TAKEN_BRANCH_INSTRUCTIONS */ | |
1320 | ||
ada54345 SE |
1321 | int amd_brs_init(void); |
1322 | void amd_brs_disable(void); | |
1323 | void amd_brs_enable(void); | |
1324 | void amd_brs_enable_all(void); | |
1325 | void amd_brs_disable_all(void); | |
1326 | void amd_brs_drain(void); | |
d5616bac | 1327 | void amd_brs_lopwr_init(void); |
b40d0156 | 1328 | int amd_brs_hw_config(struct perf_event *event); |
ada54345 SE |
1329 | void amd_brs_reset(void); |
1330 | ||
1331 | static inline void amd_pmu_brs_add(struct perf_event *event) | |
1332 | { | |
1333 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | |
1334 | ||
bd275681 | 1335 | perf_sched_cb_inc(event->pmu); |
ada54345 SE |
1336 | cpuc->lbr_users++; |
1337 | /* | |
1338 | * No need to reset BRS because it is reset | |
1339 | * on brs_enable() and it is saturating | |
1340 | */ | |
1341 | } | |
1342 | ||
1343 | static inline void amd_pmu_brs_del(struct perf_event *event) | |
1344 | { | |
1345 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | |
1346 | ||
1347 | cpuc->lbr_users--; | |
1348 | WARN_ON_ONCE(cpuc->lbr_users < 0); | |
1349 | ||
bd275681 | 1350 | perf_sched_cb_dec(event->pmu); |
ada54345 SE |
1351 | } |
1352 | ||
bd275681 | 1353 | void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in); |
cc37e520 SE |
1354 | #else |
1355 | static inline int amd_brs_init(void) | |
1356 | { | |
1357 | return 0; | |
1358 | } | |
1359 | static inline void amd_brs_disable(void) {} | |
1360 | static inline void amd_brs_enable(void) {} | |
1361 | static inline void amd_brs_drain(void) {} | |
1362 | static inline void amd_brs_lopwr_init(void) {} | |
1363 | static inline void amd_brs_disable_all(void) {} | |
b40d0156 | 1364 | static inline int amd_brs_hw_config(struct perf_event *event) |
cc37e520 SE |
1365 | { |
1366 | return 0; | |
1367 | } | |
1368 | static inline void amd_brs_reset(void) {} | |
de0428a7 | 1369 | |
cc37e520 | 1370 | static inline void amd_pmu_brs_add(struct perf_event *event) |
ba2fe750 | 1371 | { |
cc37e520 | 1372 | } |
ba2fe750 | 1373 | |
cc37e520 SE |
1374 | static inline void amd_pmu_brs_del(struct perf_event *event) |
1375 | { | |
1376 | } | |
1377 | ||
bd275681 | 1378 | static inline void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) |
cc37e520 SE |
1379 | { |
1380 | } | |
1381 | ||
cc37e520 SE |
1382 | static inline void amd_brs_enable_all(void) |
1383 | { | |
1384 | } | |
1385 | ||
1386 | #endif | |
1387 | ||
de0428a7 KW |
1388 | #else /* CONFIG_CPU_SUP_AMD */ |
1389 | ||
1390 | static inline int amd_pmu_init(void) | |
1391 | { | |
1392 | return 0; | |
1393 | } | |
1394 | ||
ada54345 SE |
1395 | static inline int amd_brs_init(void) |
1396 | { | |
1397 | return -EOPNOTSUPP; | |
1398 | } | |
1399 | ||
1400 | static inline void amd_brs_drain(void) | |
1401 | { | |
1402 | } | |
1403 | ||
1404 | static inline void amd_brs_enable_all(void) | |
1405 | { | |
1406 | } | |
1407 | ||
1408 | static inline void amd_brs_disable_all(void) | |
1409 | { | |
1410 | } | |
de0428a7 KW |
1411 | #endif /* CONFIG_CPU_SUP_AMD */ |
1412 | ||
42880f72 AS |
1413 | static inline int is_pebs_pt(struct perf_event *event) |
1414 | { | |
1415 | return !!(event->hw.flags & PERF_X86_EVENT_PEBS_VIA_PT); | |
1416 | } | |
1417 | ||
de0428a7 KW |
1418 | #ifdef CONFIG_CPU_SUP_INTEL |
1419 | ||
81ec3f3c | 1420 | static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) |
48070342 | 1421 | { |
67266c10 JO |
1422 | struct hw_perf_event *hwc = &event->hw; |
1423 | unsigned int hw_event, bts_event; | |
1424 | ||
1425 | if (event->attr.freq) | |
1426 | return false; | |
1427 | ||
1428 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; | |
1429 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); | |
48070342 | 1430 | |
81ec3f3c JO |
1431 | return hw_event == bts_event && period == 1; |
1432 | } | |
1433 | ||
1434 | static inline bool intel_pmu_has_bts(struct perf_event *event) | |
1435 | { | |
1436 | struct hw_perf_event *hwc = &event->hw; | |
1437 | ||
1438 | return intel_pmu_has_bts_period(event, hwc->sample_period); | |
48070342 AS |
1439 | } |
1440 | ||
c22ac2a3 SL |
1441 | static __always_inline void __intel_pmu_pebs_disable_all(void) |
1442 | { | |
1443 | wrmsrl(MSR_IA32_PEBS_ENABLE, 0); | |
1444 | } | |
1445 | ||
1446 | static __always_inline void __intel_pmu_arch_lbr_disable(void) | |
1447 | { | |
1448 | wrmsrl(MSR_ARCH_LBR_CTL, 0); | |
1449 | } | |
1450 | ||
1451 | static __always_inline void __intel_pmu_lbr_disable(void) | |
1452 | { | |
1453 | u64 debugctl; | |
1454 | ||
1455 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); | |
1456 | debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); | |
1457 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); | |
1458 | } | |
1459 | ||
de0428a7 KW |
1460 | int intel_pmu_save_and_restart(struct perf_event *event); |
1461 | ||
1462 | struct event_constraint * | |
79cba822 SE |
1463 | x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, |
1464 | struct perf_event *event); | |
de0428a7 | 1465 | |
d01b1f96 PZI |
1466 | extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu); |
1467 | extern void intel_cpuc_finish(struct cpu_hw_events *cpuc); | |
de0428a7 KW |
1468 | |
1469 | int intel_pmu_init(void); | |
1470 | ||
1471 | void init_debug_store_on_cpu(int cpu); | |
1472 | ||
1473 | void fini_debug_store_on_cpu(int cpu); | |
1474 | ||
1475 | void release_ds_buffers(void); | |
1476 | ||
1477 | void reserve_ds_buffers(void); | |
1478 | ||
c085fb87 KL |
1479 | void release_lbr_buffers(void); |
1480 | ||
488e13a4 LX |
1481 | void reserve_lbr_buffers(void); |
1482 | ||
de0428a7 | 1483 | extern struct event_constraint bts_constraint; |
097e4311 | 1484 | extern struct event_constraint vlbr_constraint; |
de0428a7 KW |
1485 | |
1486 | void intel_pmu_enable_bts(u64 config); | |
1487 | ||
1488 | void intel_pmu_disable_bts(void); | |
1489 | ||
1490 | int intel_pmu_drain_bts_buffer(void); | |
1491 | ||
39a41278 KL |
1492 | u64 adl_latency_data_small(struct perf_event *event, u64 status); |
1493 | ||
38aaf921 KL |
1494 | u64 mtl_latency_data_small(struct perf_event *event, u64 status); |
1495 | ||
de0428a7 KW |
1496 | extern struct event_constraint intel_core2_pebs_event_constraints[]; |
1497 | ||
1498 | extern struct event_constraint intel_atom_pebs_event_constraints[]; | |
1499 | ||
1fa64180 YZ |
1500 | extern struct event_constraint intel_slm_pebs_event_constraints[]; |
1501 | ||
8b92c3a7 KL |
1502 | extern struct event_constraint intel_glm_pebs_event_constraints[]; |
1503 | ||
dd0b06b5 KL |
1504 | extern struct event_constraint intel_glp_pebs_event_constraints[]; |
1505 | ||
f83d2f91 KL |
1506 | extern struct event_constraint intel_grt_pebs_event_constraints[]; |
1507 | ||
de0428a7 KW |
1508 | extern struct event_constraint intel_nehalem_pebs_event_constraints[]; |
1509 | ||
1510 | extern struct event_constraint intel_westmere_pebs_event_constraints[]; | |
1511 | ||
1512 | extern struct event_constraint intel_snb_pebs_event_constraints[]; | |
1513 | ||
20a36e39 SE |
1514 | extern struct event_constraint intel_ivb_pebs_event_constraints[]; |
1515 | ||
3044318f AK |
1516 | extern struct event_constraint intel_hsw_pebs_event_constraints[]; |
1517 | ||
b3e62463 SE |
1518 | extern struct event_constraint intel_bdw_pebs_event_constraints[]; |
1519 | ||
9a92e16f AK |
1520 | extern struct event_constraint intel_skl_pebs_event_constraints[]; |
1521 | ||
60176089 KL |
1522 | extern struct event_constraint intel_icl_pebs_event_constraints[]; |
1523 | ||
61b985e3 KL |
1524 | extern struct event_constraint intel_spr_pebs_event_constraints[]; |
1525 | ||
de0428a7 KW |
1526 | struct event_constraint *intel_pebs_constraints(struct perf_event *event); |
1527 | ||
68f7082f PZ |
1528 | void intel_pmu_pebs_add(struct perf_event *event); |
1529 | ||
1530 | void intel_pmu_pebs_del(struct perf_event *event); | |
1531 | ||
de0428a7 KW |
1532 | void intel_pmu_pebs_enable(struct perf_event *event); |
1533 | ||
1534 | void intel_pmu_pebs_disable(struct perf_event *event); | |
1535 | ||
1536 | void intel_pmu_pebs_enable_all(void); | |
1537 | ||
1538 | void intel_pmu_pebs_disable_all(void); | |
1539 | ||
bd275681 | 1540 | void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in); |
9c964efa | 1541 | |
5bee2cc6 KL |
1542 | void intel_pmu_auto_reload_read(struct perf_event *event); |
1543 | ||
5624986d | 1544 | void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr); |
c22497f5 | 1545 | |
de0428a7 KW |
1546 | void intel_ds_init(void); |
1547 | ||
bd275681 PZ |
1548 | void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc, |
1549 | struct perf_event_pmu_context *next_epc); | |
421ca868 | 1550 | |
bd275681 | 1551 | void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in); |
2a0ad3b3 | 1552 | |
19fc9ddd DCC |
1553 | u64 lbr_from_signext_quirk_wr(u64 val); |
1554 | ||
de0428a7 KW |
1555 | void intel_pmu_lbr_reset(void); |
1556 | ||
9f354a72 KL |
1557 | void intel_pmu_lbr_reset_32(void); |
1558 | ||
1559 | void intel_pmu_lbr_reset_64(void); | |
1560 | ||
68f7082f | 1561 | void intel_pmu_lbr_add(struct perf_event *event); |
de0428a7 | 1562 | |
68f7082f | 1563 | void intel_pmu_lbr_del(struct perf_event *event); |
de0428a7 | 1564 | |
1a78d937 | 1565 | void intel_pmu_lbr_enable_all(bool pmi); |
de0428a7 KW |
1566 | |
1567 | void intel_pmu_lbr_disable_all(void); | |
1568 | ||
1569 | void intel_pmu_lbr_read(void); | |
1570 | ||
c301b1d8 KL |
1571 | void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc); |
1572 | ||
1573 | void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc); | |
1574 | ||
799571bf KL |
1575 | void intel_pmu_lbr_save(void *ctx); |
1576 | ||
1577 | void intel_pmu_lbr_restore(void *ctx); | |
1578 | ||
de0428a7 KW |
1579 | void intel_pmu_lbr_init_core(void); |
1580 | ||
1581 | void intel_pmu_lbr_init_nhm(void); | |
1582 | ||
1583 | void intel_pmu_lbr_init_atom(void); | |
1584 | ||
f21d5adc KL |
1585 | void intel_pmu_lbr_init_slm(void); |
1586 | ||
c5cc2cd9 SE |
1587 | void intel_pmu_lbr_init_snb(void); |
1588 | ||
e9d7f7cd YZ |
1589 | void intel_pmu_lbr_init_hsw(void); |
1590 | ||
9a92e16f AK |
1591 | void intel_pmu_lbr_init_skl(void); |
1592 | ||
1e7b9390 HC |
1593 | void intel_pmu_lbr_init_knl(void); |
1594 | ||
1ac7fd81 PZI |
1595 | void intel_pmu_lbr_init(void); |
1596 | ||
47125db2 KL |
1597 | void intel_pmu_arch_lbr_init(void); |
1598 | ||
e17dc653 AK |
1599 | void intel_pmu_pebs_data_source_nhm(void); |
1600 | ||
6ae5fa61 AK |
1601 | void intel_pmu_pebs_data_source_skl(bool pmem); |
1602 | ||
ccf170e9 KL |
1603 | void intel_pmu_pebs_data_source_adl(void); |
1604 | ||
24919fde KL |
1605 | void intel_pmu_pebs_data_source_grt(void); |
1606 | ||
38aaf921 KL |
1607 | void intel_pmu_pebs_data_source_mtl(void); |
1608 | ||
a430021f KL |
1609 | void intel_pmu_pebs_data_source_cmt(void); |
1610 | ||
60ce0fbd SE |
1611 | int intel_pmu_setup_lbr_filter(struct perf_event *event); |
1612 | ||
52ca9ced AS |
1613 | void intel_pt_interrupt(void); |
1614 | ||
8062382c AS |
1615 | int intel_bts_interrupt(void); |
1616 | ||
1617 | void intel_bts_enable_local(void); | |
1618 | ||
1619 | void intel_bts_disable_local(void); | |
1620 | ||
de0428a7 KW |
1621 | int p4_pmu_init(void); |
1622 | ||
1623 | int p6_pmu_init(void); | |
1624 | ||
e717bf4e VW |
1625 | int knc_pmu_init(void); |
1626 | ||
b37609c3 SE |
1627 | static inline int is_ht_workaround_enabled(void) |
1628 | { | |
1629 | return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED); | |
1630 | } | |
47732d88 | 1631 | |
de0428a7 KW |
1632 | #else /* CONFIG_CPU_SUP_INTEL */ |
1633 | ||
1634 | static inline void reserve_ds_buffers(void) | |
1635 | { | |
1636 | } | |
1637 | ||
1638 | static inline void release_ds_buffers(void) | |
1639 | { | |
1640 | } | |
1641 | ||
c085fb87 KL |
1642 | static inline void release_lbr_buffers(void) |
1643 | { | |
1644 | } | |
1645 | ||
488e13a4 LX |
1646 | static inline void reserve_lbr_buffers(void) |
1647 | { | |
1648 | } | |
1649 | ||
de0428a7 KW |
1650 | static inline int intel_pmu_init(void) |
1651 | { | |
1652 | return 0; | |
1653 | } | |
1654 | ||
f764c58b | 1655 | static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) |
d01b1f96 PZI |
1656 | { |
1657 | return 0; | |
1658 | } | |
1659 | ||
f764c58b | 1660 | static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc) |
de0428a7 | 1661 | { |
de0428a7 KW |
1662 | } |
1663 | ||
cc1790cf PZ |
1664 | static inline int is_ht_workaround_enabled(void) |
1665 | { | |
1666 | return 0; | |
1667 | } | |
de0428a7 | 1668 | #endif /* CONFIG_CPU_SUP_INTEL */ |
3a4ac121 C |
1669 | |
1670 | #if ((defined CONFIG_CPU_SUP_CENTAUR) || (defined CONFIG_CPU_SUP_ZHAOXIN)) | |
1671 | int zhaoxin_pmu_init(void); | |
1672 | #else | |
1673 | static inline int zhaoxin_pmu_init(void) | |
1674 | { | |
1675 | return 0; | |
1676 | } | |
1677 | #endif /*CONFIG_CPU_SUP_CENTAUR or CONFIG_CPU_SUP_ZHAOXIN*/ |