Commit | Line | Data |
---|---|---|
de0428a7 KW |
1 | /* |
2 | * Performance events x86 architecture header | |
3 | * | |
4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | |
6 | * Copyright (C) 2009 Jaswinder Singh Rajput | |
7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | |
90eec103 | 8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra |
de0428a7 KW |
9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> |
10 | * Copyright (C) 2009 Google, Inc., Stephane Eranian | |
11 | * | |
12 | * For licencing details see kernel-base/COPYING | |
13 | */ | |
14 | ||
15 | #include <linux/perf_event.h> | |
16 | ||
10043e02 TG |
17 | #include <asm/intel_ds.h> |
18 | ||
f1ad4488 | 19 | /* To enable MSR tracing please use the generic trace points. */ |
1c2ac3fd | 20 | |
de0428a7 KW |
21 | /* |
22 | * | NHM/WSM | SNB | | |
23 | * register ------------------------------- | |
24 | * | HT | no HT | HT | no HT | | |
25 | *----------------------------------------- | |
26 | * offcore | core | core | cpu | core | | |
27 | * lbr_sel | core | core | cpu | core | | |
28 | * ld_lat | cpu | core | cpu | core | | |
29 | *----------------------------------------- | |
30 | * | |
31 | * Given that there is a small number of shared regs, | |
32 | * we can pre-allocate their slot in the per-cpu | |
33 | * per-core reg tables. | |
34 | */ | |
35 | enum extra_reg_type { | |
36 | EXTRA_REG_NONE = -1, /* not used */ | |
37 | ||
38 | EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ | |
39 | EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ | |
b36817e8 | 40 | EXTRA_REG_LBR = 2, /* lbr_select */ |
f20093ee | 41 | EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ |
d0dc8494 | 42 | EXTRA_REG_FE = 4, /* fe_* */ |
de0428a7 KW |
43 | |
44 | EXTRA_REG_MAX /* number of entries needed */ | |
45 | }; | |
46 | ||
47 | struct event_constraint { | |
48 | union { | |
49 | unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | |
50 | u64 idxmsk64; | |
51 | }; | |
63b79f6e PZ |
52 | u64 code; |
53 | u64 cmask; | |
54 | int weight; | |
55 | int overlap; | |
56 | int flags; | |
57 | unsigned int size; | |
de0428a7 | 58 | }; |
1f6a1e2d | 59 | |
63b79f6e PZ |
60 | static inline bool constraint_match(struct event_constraint *c, u64 ecode) |
61 | { | |
62 | return ((ecode & c->cmask) - c->code) <= (u64)c->size; | |
63 | } | |
64 | ||
f20093ee | 65 | /* |
2f7f73a5 | 66 | * struct hw_perf_event.flags flags |
f20093ee | 67 | */ |
c857eb56 PZ |
68 | #define PERF_X86_EVENT_PEBS_LDLAT 0x0001 /* ld+ldlat data address sampling */ |
69 | #define PERF_X86_EVENT_PEBS_ST 0x0002 /* st data address sampling */ | |
70 | #define PERF_X86_EVENT_PEBS_ST_HSW 0x0004 /* haswell style datala, store */ | |
1f6a1e2d PZ |
71 | #define PERF_X86_EVENT_PEBS_LD_HSW 0x0008 /* haswell style datala, load */ |
72 | #define PERF_X86_EVENT_PEBS_NA_HSW 0x0010 /* haswell style datala, unknown */ | |
73 | #define PERF_X86_EVENT_EXCL 0x0020 /* HT exclusivity on counter */ | |
74 | #define PERF_X86_EVENT_DYNAMIC 0x0040 /* dynamic alloc'd constraint */ | |
75 | #define PERF_X86_EVENT_RDPMC_ALLOWED 0x0080 /* grant rdpmc permission */ | |
76 | #define PERF_X86_EVENT_EXCL_ACCT 0x0100 /* accounted EXCL event */ | |
77 | #define PERF_X86_EVENT_AUTO_RELOAD 0x0200 /* use PEBS auto-reload */ | |
78 | #define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */ | |
de0428a7 KW |
79 | |
80 | struct amd_nb { | |
81 | int nb_id; /* NorthBridge id */ | |
82 | int refcnt; /* reference count */ | |
83 | struct perf_event *owners[X86_PMC_IDX_MAX]; | |
84 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; | |
85 | }; | |
86 | ||
fd583ad1 | 87 | #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1) |
de0428a7 | 88 | |
3569c0d7 YZ |
89 | /* |
90 | * Flags PEBS can handle without an PMI. | |
91 | * | |
9c964efa | 92 | * TID can only be handled by flushing at context switch. |
2fe1bc1f | 93 | * REGS_USER can be handled for events limited to ring 3. |
9c964efa | 94 | * |
3569c0d7 | 95 | */ |
174afc3e | 96 | #define LARGE_PEBS_FLAGS \ |
9c964efa | 97 | (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \ |
3569c0d7 YZ |
98 | PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \ |
99 | PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \ | |
2fe1bc1f | 100 | PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \ |
11974914 JO |
101 | PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \ |
102 | PERF_SAMPLE_PERIOD) | |
3569c0d7 | 103 | |
9d5dcc93 KL |
104 | #define PEBS_GP_REGS \ |
105 | ((1ULL << PERF_REG_X86_AX) | \ | |
106 | (1ULL << PERF_REG_X86_BX) | \ | |
107 | (1ULL << PERF_REG_X86_CX) | \ | |
108 | (1ULL << PERF_REG_X86_DX) | \ | |
109 | (1ULL << PERF_REG_X86_DI) | \ | |
110 | (1ULL << PERF_REG_X86_SI) | \ | |
111 | (1ULL << PERF_REG_X86_SP) | \ | |
112 | (1ULL << PERF_REG_X86_BP) | \ | |
113 | (1ULL << PERF_REG_X86_IP) | \ | |
114 | (1ULL << PERF_REG_X86_FLAGS) | \ | |
115 | (1ULL << PERF_REG_X86_R8) | \ | |
116 | (1ULL << PERF_REG_X86_R9) | \ | |
117 | (1ULL << PERF_REG_X86_R10) | \ | |
118 | (1ULL << PERF_REG_X86_R11) | \ | |
119 | (1ULL << PERF_REG_X86_R12) | \ | |
120 | (1ULL << PERF_REG_X86_R13) | \ | |
121 | (1ULL << PERF_REG_X86_R14) | \ | |
122 | (1ULL << PERF_REG_X86_R15)) | |
2fe1bc1f | 123 | |
de0428a7 KW |
124 | /* |
125 | * Per register state. | |
126 | */ | |
127 | struct er_account { | |
b8000586 | 128 | raw_spinlock_t lock; /* per-core: protect structure */ |
de0428a7 KW |
129 | u64 config; /* extra MSR config */ |
130 | u64 reg; /* extra MSR number */ | |
131 | atomic_t ref; /* reference count */ | |
132 | }; | |
133 | ||
134 | /* | |
135 | * Per core/cpu state | |
136 | * | |
137 | * Used to coordinate shared registers between HT threads or | |
138 | * among events on a single PMU. | |
139 | */ | |
140 | struct intel_shared_regs { | |
141 | struct er_account regs[EXTRA_REG_MAX]; | |
142 | int refcnt; /* per-core: #HT threads */ | |
143 | unsigned core_id; /* per-core: core id */ | |
144 | }; | |
145 | ||
6f6539ca MD |
146 | enum intel_excl_state_type { |
147 | INTEL_EXCL_UNUSED = 0, /* counter is unused */ | |
148 | INTEL_EXCL_SHARED = 1, /* counter can be used by both threads */ | |
149 | INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */ | |
150 | }; | |
151 | ||
152 | struct intel_excl_states { | |
6f6539ca | 153 | enum intel_excl_state_type state[X86_PMC_IDX_MAX]; |
e979121b | 154 | bool sched_started; /* true if scheduling has started */ |
6f6539ca MD |
155 | }; |
156 | ||
157 | struct intel_excl_cntrs { | |
158 | raw_spinlock_t lock; | |
159 | ||
160 | struct intel_excl_states states[2]; | |
161 | ||
cc1790cf PZ |
162 | union { |
163 | u16 has_exclusive[2]; | |
164 | u32 exclusive_present; | |
165 | }; | |
166 | ||
6f6539ca MD |
167 | int refcnt; /* per-core: #HT threads */ |
168 | unsigned core_id; /* per-core: core id */ | |
169 | }; | |
170 | ||
8b077e4a | 171 | struct x86_perf_task_context; |
9a92e16f | 172 | #define MAX_LBR_ENTRIES 32 |
de0428a7 | 173 | |
90413464 SE |
174 | enum { |
175 | X86_PERF_KFREE_SHARED = 0, | |
176 | X86_PERF_KFREE_EXCL = 1, | |
177 | X86_PERF_KFREE_MAX | |
178 | }; | |
179 | ||
de0428a7 KW |
180 | struct cpu_hw_events { |
181 | /* | |
182 | * Generic x86 PMC bits | |
183 | */ | |
184 | struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ | |
185 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | |
186 | unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | |
187 | int enabled; | |
188 | ||
c347a2f1 PZ |
189 | int n_events; /* the # of events in the below arrays */ |
190 | int n_added; /* the # last events in the below arrays; | |
191 | they've never been enabled yet */ | |
192 | int n_txn; /* the # last events in the below arrays; | |
193 | added in the current transaction */ | |
de0428a7 KW |
194 | int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ |
195 | u64 tags[X86_PMC_IDX_MAX]; | |
b371b594 | 196 | |
de0428a7 | 197 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ |
b371b594 PZ |
198 | struct event_constraint *event_constraint[X86_PMC_IDX_MAX]; |
199 | ||
cc1790cf | 200 | int n_excl; /* the number of exclusive events */ |
de0428a7 | 201 | |
fbbe0701 | 202 | unsigned int txn_flags; |
5a425294 | 203 | int is_fake; |
de0428a7 KW |
204 | |
205 | /* | |
206 | * Intel DebugStore bits | |
207 | */ | |
208 | struct debug_store *ds; | |
c1961a46 HD |
209 | void *ds_pebs_vaddr; |
210 | void *ds_bts_vaddr; | |
de0428a7 | 211 | u64 pebs_enabled; |
09e61b4f PZ |
212 | int n_pebs; |
213 | int n_large_pebs; | |
de0428a7 | 214 | |
c22497f5 KL |
215 | /* Current super set of events hardware configuration */ |
216 | u64 pebs_data_cfg; | |
217 | u64 active_pebs_data_cfg; | |
218 | int pebs_record_size; | |
219 | ||
de0428a7 KW |
220 | /* |
221 | * Intel LBR bits | |
222 | */ | |
223 | int lbr_users; | |
d3617b98 | 224 | int lbr_pebs_users; |
de0428a7 KW |
225 | struct perf_branch_stack lbr_stack; |
226 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; | |
b36817e8 | 227 | struct er_account *lbr_sel; |
3e702ff6 | 228 | u64 br_sel; |
8b077e4a KL |
229 | struct x86_perf_task_context *last_task_ctx; |
230 | int last_log_id; | |
de0428a7 | 231 | |
144d31e6 GN |
232 | /* |
233 | * Intel host/guest exclude bits | |
234 | */ | |
235 | u64 intel_ctrl_guest_mask; | |
236 | u64 intel_ctrl_host_mask; | |
237 | struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX]; | |
238 | ||
2b9e344d PZ |
239 | /* |
240 | * Intel checkpoint mask | |
241 | */ | |
242 | u64 intel_cp_status; | |
243 | ||
de0428a7 KW |
244 | /* |
245 | * manage shared (per-core, per-cpu) registers | |
246 | * used on Intel NHM/WSM/SNB | |
247 | */ | |
248 | struct intel_shared_regs *shared_regs; | |
6f6539ca MD |
249 | /* |
250 | * manage exclusive counter access between hyperthread | |
251 | */ | |
252 | struct event_constraint *constraint_list; /* in enable order */ | |
253 | struct intel_excl_cntrs *excl_cntrs; | |
254 | int excl_thread_id; /* 0 or 1 */ | |
de0428a7 | 255 | |
400816f6 PZI |
256 | /* |
257 | * SKL TSX_FORCE_ABORT shadow | |
258 | */ | |
259 | u64 tfa_shadow; | |
260 | ||
de0428a7 KW |
261 | /* |
262 | * AMD specific bits | |
263 | */ | |
1018faa6 JR |
264 | struct amd_nb *amd_nb; |
265 | /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ | |
266 | u64 perf_ctr_virt_mask; | |
de0428a7 | 267 | |
90413464 | 268 | void *kfree_on_online[X86_PERF_KFREE_MAX]; |
de0428a7 KW |
269 | }; |
270 | ||
63b79f6e | 271 | #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \ |
de0428a7 KW |
272 | { .idxmsk64 = (n) }, \ |
273 | .code = (c), \ | |
63b79f6e | 274 | .size = (e) - (c), \ |
de0428a7 KW |
275 | .cmask = (m), \ |
276 | .weight = (w), \ | |
bc1738f6 | 277 | .overlap = (o), \ |
9fac2cf3 | 278 | .flags = f, \ |
de0428a7 KW |
279 | } |
280 | ||
63b79f6e PZ |
281 | #define __EVENT_CONSTRAINT(c, n, m, w, o, f) \ |
282 | __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f) | |
283 | ||
de0428a7 | 284 | #define EVENT_CONSTRAINT(c, n, m) \ |
9fac2cf3 | 285 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0) |
bc1738f6 | 286 | |
63b79f6e PZ |
287 | /* |
288 | * The constraint_match() function only works for 'simple' event codes | |
289 | * and not for extended (AMD64_EVENTSEL_EVENT) events codes. | |
290 | */ | |
291 | #define EVENT_CONSTRAINT_RANGE(c, e, n, m) \ | |
292 | __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0) | |
293 | ||
6f6539ca MD |
294 | #define INTEL_EXCLEVT_CONSTRAINT(c, n) \ |
295 | __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\ | |
296 | 0, PERF_X86_EVENT_EXCL) | |
297 | ||
bc1738f6 RR |
298 | /* |
299 | * The overlap flag marks event constraints with overlapping counter | |
300 | * masks. This is the case if the counter mask of such an event is not | |
301 | * a subset of any other counter mask of a constraint with an equal or | |
302 | * higher weight, e.g.: | |
303 | * | |
304 | * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); | |
305 | * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0); | |
306 | * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0); | |
307 | * | |
308 | * The event scheduler may not select the correct counter in the first | |
309 | * cycle because it needs to know which subsequent events will be | |
310 | * scheduled. It may fail to schedule the events then. So we set the | |
311 | * overlap flag for such constraints to give the scheduler a hint which | |
312 | * events to select for counter rescheduling. | |
313 | * | |
314 | * Care must be taken as the rescheduling algorithm is O(n!) which | |
6a6256f9 | 315 | * will increase scheduling cycles for an over-committed system |
bc1738f6 RR |
316 | * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros |
317 | * and its counter masks must be kept at a minimum. | |
318 | */ | |
319 | #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ | |
9fac2cf3 | 320 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0) |
de0428a7 KW |
321 | |
322 | /* | |
323 | * Constraint on the Event code. | |
324 | */ | |
325 | #define INTEL_EVENT_CONSTRAINT(c, n) \ | |
326 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) | |
327 | ||
63b79f6e PZ |
328 | /* |
329 | * Constraint on a range of Event codes | |
330 | */ | |
331 | #define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \ | |
332 | EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT) | |
333 | ||
de0428a7 KW |
334 | /* |
335 | * Constraint on the Event code + UMask + fixed-mask | |
336 | * | |
337 | * filter mask to validate fixed counter events. | |
338 | * the following filters disqualify for fixed counters: | |
339 | * - inv | |
340 | * - edge | |
341 | * - cnt-mask | |
3a632cb2 AK |
342 | * - in_tx |
343 | * - in_tx_checkpointed | |
de0428a7 KW |
344 | * The other filters are supported by fixed counters. |
345 | * The any-thread option is supported starting with v3. | |
346 | */ | |
3a632cb2 | 347 | #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED) |
de0428a7 | 348 | #define FIXED_EVENT_CONSTRAINT(c, n) \ |
3a632cb2 | 349 | EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS) |
de0428a7 KW |
350 | |
351 | /* | |
352 | * Constraint on the Event code + UMask | |
353 | */ | |
354 | #define INTEL_UEVENT_CONSTRAINT(c, n) \ | |
355 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) | |
356 | ||
b7883a1c AK |
357 | /* Constraint on specific umask bit only + event */ |
358 | #define INTEL_UBIT_EVENT_CONSTRAINT(c, n) \ | |
359 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c)) | |
360 | ||
7550ddff AK |
361 | /* Like UEVENT_CONSTRAINT, but match flags too */ |
362 | #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \ | |
363 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) | |
364 | ||
e979121b MD |
365 | #define INTEL_EXCLUEVT_CONSTRAINT(c, n) \ |
366 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ | |
367 | HWEIGHT(n), 0, PERF_X86_EVENT_EXCL) | |
368 | ||
f20093ee | 369 | #define INTEL_PLD_CONSTRAINT(c, n) \ |
86a04461 | 370 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
f20093ee SE |
371 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) |
372 | ||
9ad64c0f | 373 | #define INTEL_PST_CONSTRAINT(c, n) \ |
86a04461 | 374 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
9ad64c0f SE |
375 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST) |
376 | ||
86a04461 AK |
377 | /* Event constraint, but match on all event flags too. */ |
378 | #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \ | |
6b89d4c1 | 379 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS) |
86a04461 | 380 | |
63b79f6e | 381 | #define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \ |
6b89d4c1 | 382 | EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS) |
63b79f6e | 383 | |
86a04461 AK |
384 | /* Check only flags, but allow all event/umask */ |
385 | #define INTEL_ALL_EVENT_CONSTRAINT(code, n) \ | |
386 | EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS) | |
387 | ||
388 | /* Check flags and event code, and set the HSW store flag */ | |
389 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \ | |
390 | __EVENT_CONSTRAINT(code, n, \ | |
391 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ | |
392 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) | |
393 | ||
394 | /* Check flags and event code, and set the HSW load flag */ | |
395 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \ | |
b63b4b45 | 396 | __EVENT_CONSTRAINT(code, n, \ |
86a04461 AK |
397 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ |
398 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) | |
399 | ||
63b79f6e PZ |
400 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \ |
401 | __EVENT_CONSTRAINT_RANGE(code, end, n, \ | |
402 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ | |
403 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) | |
404 | ||
b63b4b45 MD |
405 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \ |
406 | __EVENT_CONSTRAINT(code, n, \ | |
407 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ | |
408 | HWEIGHT(n), 0, \ | |
409 | PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) | |
410 | ||
86a04461 AK |
411 | /* Check flags and event code/umask, and set the HSW store flag */ |
412 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \ | |
413 | __EVENT_CONSTRAINT(code, n, \ | |
414 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
f9134f36 AK |
415 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) |
416 | ||
b63b4b45 MD |
417 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \ |
418 | __EVENT_CONSTRAINT(code, n, \ | |
419 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
420 | HWEIGHT(n), 0, \ | |
421 | PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL) | |
422 | ||
86a04461 AK |
423 | /* Check flags and event code/umask, and set the HSW load flag */ |
424 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \ | |
425 | __EVENT_CONSTRAINT(code, n, \ | |
426 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
427 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) | |
428 | ||
b63b4b45 MD |
429 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \ |
430 | __EVENT_CONSTRAINT(code, n, \ | |
431 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
432 | HWEIGHT(n), 0, \ | |
433 | PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) | |
434 | ||
86a04461 AK |
435 | /* Check flags and event code/umask, and set the HSW N/A flag */ |
436 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ | |
437 | __EVENT_CONSTRAINT(code, n, \ | |
169b932a | 438 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
86a04461 AK |
439 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) |
440 | ||
441 | ||
cf30d52e MD |
442 | /* |
443 | * We define the end marker as having a weight of -1 | |
444 | * to enable blacklisting of events using a counter bitmask | |
445 | * of zero and thus a weight of zero. | |
446 | * The end marker has a weight that cannot possibly be | |
447 | * obtained from counting the bits in the bitmask. | |
448 | */ | |
449 | #define EVENT_CONSTRAINT_END { .weight = -1 } | |
de0428a7 | 450 | |
cf30d52e MD |
451 | /* |
452 | * Check for end marker with weight == -1 | |
453 | */ | |
de0428a7 | 454 | #define for_each_event_constraint(e, c) \ |
cf30d52e | 455 | for ((e) = (c); (e)->weight != -1; (e)++) |
de0428a7 KW |
456 | |
457 | /* | |
458 | * Extra registers for specific events. | |
459 | * | |
460 | * Some events need large masks and require external MSRs. | |
461 | * Those extra MSRs end up being shared for all events on | |
462 | * a PMU and sometimes between PMU of sibling HT threads. | |
463 | * In either case, the kernel needs to handle conflicting | |
464 | * accesses to those extra, shared, regs. The data structure | |
465 | * to manage those registers is stored in cpu_hw_event. | |
466 | */ | |
467 | struct extra_reg { | |
468 | unsigned int event; | |
469 | unsigned int msr; | |
470 | u64 config_mask; | |
471 | u64 valid_mask; | |
472 | int idx; /* per_xxx->regs[] reg index */ | |
338b522c | 473 | bool extra_msr_access; |
de0428a7 KW |
474 | }; |
475 | ||
476 | #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ | |
338b522c KL |
477 | .event = (e), \ |
478 | .msr = (ms), \ | |
479 | .config_mask = (m), \ | |
480 | .valid_mask = (vm), \ | |
481 | .idx = EXTRA_REG_##i, \ | |
482 | .extra_msr_access = true, \ | |
de0428a7 KW |
483 | } |
484 | ||
485 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ | |
486 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) | |
487 | ||
f20093ee SE |
488 | #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \ |
489 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \ | |
490 | ARCH_PERFMON_EVENTSEL_UMASK, vm, idx) | |
491 | ||
492 | #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \ | |
493 | INTEL_UEVENT_EXTRA_REG(c, \ | |
494 | MSR_PEBS_LD_LAT_THRESHOLD, \ | |
495 | 0xffff, \ | |
496 | LDLAT) | |
497 | ||
de0428a7 KW |
498 | #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) |
499 | ||
500 | union perf_capabilities { | |
501 | struct { | |
502 | u64 lbr_format:6; | |
503 | u64 pebs_trap:1; | |
504 | u64 pebs_arch_reg:1; | |
505 | u64 pebs_format:4; | |
506 | u64 smm_freeze:1; | |
069e0c3c AK |
507 | /* |
508 | * PMU supports separate counter range for writing | |
509 | * values > 32bit. | |
510 | */ | |
511 | u64 full_width_write:1; | |
c22497f5 | 512 | u64 pebs_baseline:1; |
de0428a7 KW |
513 | }; |
514 | u64 capabilities; | |
515 | }; | |
516 | ||
c1d6f42f PZ |
517 | struct x86_pmu_quirk { |
518 | struct x86_pmu_quirk *next; | |
519 | void (*func)(void); | |
520 | }; | |
521 | ||
f9b4eeb8 PZ |
522 | union x86_pmu_config { |
523 | struct { | |
524 | u64 event:8, | |
525 | umask:8, | |
526 | usr:1, | |
527 | os:1, | |
528 | edge:1, | |
529 | pc:1, | |
530 | interrupt:1, | |
531 | __reserved1:1, | |
532 | en:1, | |
533 | inv:1, | |
534 | cmask:8, | |
535 | event2:4, | |
536 | __reserved2:4, | |
537 | go:1, | |
538 | ho:1; | |
539 | } bits; | |
540 | u64 value; | |
541 | }; | |
542 | ||
543 | #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value | |
544 | ||
48070342 AS |
545 | enum { |
546 | x86_lbr_exclusive_lbr, | |
8062382c | 547 | x86_lbr_exclusive_bts, |
48070342 AS |
548 | x86_lbr_exclusive_pt, |
549 | x86_lbr_exclusive_max, | |
550 | }; | |
551 | ||
de0428a7 KW |
552 | /* |
553 | * struct x86_pmu - generic x86 pmu | |
554 | */ | |
555 | struct x86_pmu { | |
556 | /* | |
557 | * Generic x86 PMC bits | |
558 | */ | |
559 | const char *name; | |
560 | int version; | |
561 | int (*handle_irq)(struct pt_regs *); | |
562 | void (*disable_all)(void); | |
563 | void (*enable_all)(int added); | |
564 | void (*enable)(struct perf_event *); | |
565 | void (*disable)(struct perf_event *); | |
68f7082f PZ |
566 | void (*add)(struct perf_event *); |
567 | void (*del)(struct perf_event *); | |
bcfbe5c4 | 568 | void (*read)(struct perf_event *event); |
de0428a7 KW |
569 | int (*hw_config)(struct perf_event *event); |
570 | int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); | |
571 | unsigned eventsel; | |
572 | unsigned perfctr; | |
4c1fd17a | 573 | int (*addr_offset)(int index, bool eventsel); |
0fbdad07 | 574 | int (*rdpmc_index)(int index); |
de0428a7 KW |
575 | u64 (*event_map)(int); |
576 | int max_events; | |
577 | int num_counters; | |
578 | int num_counters_fixed; | |
579 | int cntval_bits; | |
580 | u64 cntval_mask; | |
ffb871bc GN |
581 | union { |
582 | unsigned long events_maskl; | |
583 | unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)]; | |
584 | }; | |
585 | int events_mask_len; | |
de0428a7 KW |
586 | int apic; |
587 | u64 max_period; | |
588 | struct event_constraint * | |
589 | (*get_event_constraints)(struct cpu_hw_events *cpuc, | |
79cba822 | 590 | int idx, |
de0428a7 KW |
591 | struct perf_event *event); |
592 | ||
593 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, | |
594 | struct perf_event *event); | |
c5362c0c | 595 | |
c5362c0c MD |
596 | void (*start_scheduling)(struct cpu_hw_events *cpuc); |
597 | ||
0c41e756 PZ |
598 | void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr); |
599 | ||
c5362c0c MD |
600 | void (*stop_scheduling)(struct cpu_hw_events *cpuc); |
601 | ||
de0428a7 | 602 | struct event_constraint *event_constraints; |
c1d6f42f | 603 | struct x86_pmu_quirk *quirks; |
de0428a7 | 604 | int perfctr_second_write; |
f605cfca | 605 | u64 (*limit_period)(struct perf_event *event, u64 l); |
de0428a7 | 606 | |
af3bdb99 AK |
607 | /* PMI handler bits */ |
608 | unsigned int late_ack :1, | |
609 | counter_freezing :1; | |
0c9d42ed PZ |
610 | /* |
611 | * sysfs attrs | |
612 | */ | |
e97df763 | 613 | int attr_rdpmc_broken; |
0c9d42ed | 614 | int attr_rdpmc; |
641cc938 | 615 | struct attribute **format_attrs; |
f20093ee | 616 | struct attribute **event_attrs; |
b00233b5 | 617 | struct attribute **caps_attrs; |
0c9d42ed | 618 | |
a4747393 | 619 | ssize_t (*events_sysfs_show)(char *page, u64 config); |
1a6461b1 | 620 | struct attribute **cpu_events; |
a4747393 | 621 | |
6089327f KL |
622 | unsigned long attr_freeze_on_smi; |
623 | struct attribute **attrs; | |
624 | ||
0c9d42ed PZ |
625 | /* |
626 | * CPU Hotplug hooks | |
627 | */ | |
de0428a7 KW |
628 | int (*cpu_prepare)(int cpu); |
629 | void (*cpu_starting)(int cpu); | |
630 | void (*cpu_dying)(int cpu); | |
631 | void (*cpu_dead)(int cpu); | |
c93dc84c PZ |
632 | |
633 | void (*check_microcode)(void); | |
ba532500 YZ |
634 | void (*sched_task)(struct perf_event_context *ctx, |
635 | bool sched_in); | |
de0428a7 KW |
636 | |
637 | /* | |
638 | * Intel Arch Perfmon v2+ | |
639 | */ | |
640 | u64 intel_ctrl; | |
641 | union perf_capabilities intel_cap; | |
642 | ||
643 | /* | |
644 | * Intel DebugStore bits | |
645 | */ | |
9b545c04 AK |
646 | unsigned int bts :1, |
647 | bts_active :1, | |
648 | pebs :1, | |
649 | pebs_active :1, | |
650 | pebs_broken :1, | |
651 | pebs_prec_dist :1, | |
652 | pebs_no_tlb :1, | |
cd6b984f | 653 | pebs_no_isolation :1; |
de0428a7 | 654 | int pebs_record_size; |
e72daf3f | 655 | int pebs_buffer_size; |
c22497f5 | 656 | int max_pebs_events; |
de0428a7 KW |
657 | void (*drain_pebs)(struct pt_regs *regs); |
658 | struct event_constraint *pebs_constraints; | |
0780c927 | 659 | void (*pebs_aliases)(struct perf_event *event); |
174afc3e | 660 | unsigned long large_pebs_flags; |
c22497f5 | 661 | u64 rtm_abort_event; |
de0428a7 KW |
662 | |
663 | /* | |
664 | * Intel LBR | |
665 | */ | |
666 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ | |
667 | int lbr_nr; /* hardware stack size */ | |
b36817e8 SE |
668 | u64 lbr_sel_mask; /* LBR_SELECT valid bits */ |
669 | const int *lbr_sel_map; /* lbr_select mappings */ | |
b7af41a1 | 670 | bool lbr_double_abort; /* duplicated lbr aborts */ |
b0c1ef52 | 671 | bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */ |
de0428a7 | 672 | |
48070342 AS |
673 | /* |
674 | * Intel PT/LBR/BTS are exclusive | |
675 | */ | |
676 | atomic_t lbr_exclusive[x86_lbr_exclusive_max]; | |
677 | ||
32b62f44 PZ |
678 | /* |
679 | * AMD bits | |
680 | */ | |
681 | unsigned int amd_nb_constraints : 1; | |
682 | ||
de0428a7 KW |
683 | /* |
684 | * Extra registers for events | |
685 | */ | |
686 | struct extra_reg *extra_regs; | |
9a5e3fb5 | 687 | unsigned int flags; |
144d31e6 GN |
688 | |
689 | /* | |
690 | * Intel host/guest support (KVM) | |
691 | */ | |
692 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); | |
81ec3f3c JO |
693 | |
694 | /* | |
695 | * Check period value for PERF_EVENT_IOC_PERIOD ioctl. | |
696 | */ | |
697 | int (*check_period) (struct perf_event *event, u64 period); | |
de0428a7 KW |
698 | }; |
699 | ||
e18bf526 YZ |
700 | struct x86_perf_task_context { |
701 | u64 lbr_from[MAX_LBR_ENTRIES]; | |
702 | u64 lbr_to[MAX_LBR_ENTRIES]; | |
50eab8f6 | 703 | u64 lbr_info[MAX_LBR_ENTRIES]; |
b28ae956 | 704 | int tos; |
0592e57b | 705 | int valid_lbrs; |
e18bf526 YZ |
706 | int lbr_callstack_users; |
707 | int lbr_stack_state; | |
8b077e4a | 708 | int log_id; |
e18bf526 YZ |
709 | }; |
710 | ||
c1d6f42f PZ |
711 | #define x86_add_quirk(func_) \ |
712 | do { \ | |
713 | static struct x86_pmu_quirk __quirk __initdata = { \ | |
714 | .func = func_, \ | |
715 | }; \ | |
716 | __quirk.next = x86_pmu.quirks; \ | |
717 | x86_pmu.quirks = &__quirk; \ | |
718 | } while (0) | |
719 | ||
9a5e3fb5 SE |
720 | /* |
721 | * x86_pmu flags | |
722 | */ | |
723 | #define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */ | |
724 | #define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */ | |
6f6539ca | 725 | #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */ |
b37609c3 | 726 | #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */ |
31962340 | 727 | #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */ |
400816f6 | 728 | #define PMU_FL_TFA 0x20 /* deal with TSX force abort */ |
de0428a7 | 729 | |
3a54aaa0 SE |
730 | #define EVENT_VAR(_id) event_attr_##_id |
731 | #define EVENT_PTR(_id) &event_attr_##_id.attr.attr | |
732 | ||
733 | #define EVENT_ATTR(_name, _id) \ | |
734 | static struct perf_pmu_events_attr EVENT_VAR(_id) = { \ | |
735 | .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ | |
736 | .id = PERF_COUNT_HW_##_id, \ | |
737 | .event_str = NULL, \ | |
738 | }; | |
739 | ||
740 | #define EVENT_ATTR_STR(_name, v, str) \ | |
741 | static struct perf_pmu_events_attr event_attr_##v = { \ | |
742 | .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ | |
743 | .id = 0, \ | |
744 | .event_str = str, \ | |
745 | }; | |
746 | ||
fc07e9f9 AK |
747 | #define EVENT_ATTR_STR_HT(_name, v, noht, ht) \ |
748 | static struct perf_pmu_events_ht_attr event_attr_##v = { \ | |
749 | .attr = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\ | |
750 | .id = 0, \ | |
751 | .event_str_noht = noht, \ | |
752 | .event_str_ht = ht, \ | |
753 | } | |
754 | ||
f447e4eb | 755 | struct pmu *x86_get_pmu(void); |
de0428a7 KW |
756 | extern struct x86_pmu x86_pmu __read_mostly; |
757 | ||
e9d7f7cd YZ |
758 | static inline bool x86_pmu_has_lbr_callstack(void) |
759 | { | |
760 | return x86_pmu.lbr_sel_map && | |
761 | x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0; | |
762 | } | |
763 | ||
de0428a7 KW |
764 | DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events); |
765 | ||
766 | int x86_perf_event_set_period(struct perf_event *event); | |
767 | ||
768 | /* | |
769 | * Generalized hw caching related hw_event table, filled | |
770 | * in on a per model basis. A value of 0 means | |
771 | * 'not supported', -1 means 'hw_event makes no sense on | |
772 | * this CPU', any other value means the raw hw_event | |
773 | * ID. | |
774 | */ | |
775 | ||
776 | #define C(x) PERF_COUNT_HW_CACHE_##x | |
777 | ||
778 | extern u64 __read_mostly hw_cache_event_ids | |
779 | [PERF_COUNT_HW_CACHE_MAX] | |
780 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
781 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | |
782 | extern u64 __read_mostly hw_cache_extra_regs | |
783 | [PERF_COUNT_HW_CACHE_MAX] | |
784 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
785 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | |
786 | ||
787 | u64 x86_perf_event_update(struct perf_event *event); | |
788 | ||
de0428a7 KW |
789 | static inline unsigned int x86_pmu_config_addr(int index) |
790 | { | |
4c1fd17a JS |
791 | return x86_pmu.eventsel + (x86_pmu.addr_offset ? |
792 | x86_pmu.addr_offset(index, true) : index); | |
de0428a7 KW |
793 | } |
794 | ||
795 | static inline unsigned int x86_pmu_event_addr(int index) | |
796 | { | |
4c1fd17a JS |
797 | return x86_pmu.perfctr + (x86_pmu.addr_offset ? |
798 | x86_pmu.addr_offset(index, false) : index); | |
de0428a7 KW |
799 | } |
800 | ||
0fbdad07 JS |
801 | static inline int x86_pmu_rdpmc_index(int index) |
802 | { | |
803 | return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; | |
804 | } | |
805 | ||
48070342 AS |
806 | int x86_add_exclusive(unsigned int what); |
807 | ||
808 | void x86_del_exclusive(unsigned int what); | |
809 | ||
6b099d9b AS |
810 | int x86_reserve_hardware(void); |
811 | ||
812 | void x86_release_hardware(void); | |
813 | ||
b00233b5 AK |
814 | int x86_pmu_max_precise(void); |
815 | ||
48070342 AS |
816 | void hw_perf_lbr_event_destroy(struct perf_event *event); |
817 | ||
de0428a7 KW |
818 | int x86_setup_perfctr(struct perf_event *event); |
819 | ||
820 | int x86_pmu_hw_config(struct perf_event *event); | |
821 | ||
822 | void x86_pmu_disable_all(void); | |
823 | ||
824 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | |
825 | u64 enable_mask) | |
826 | { | |
1018faa6 JR |
827 | u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); |
828 | ||
de0428a7 KW |
829 | if (hwc->extra_reg.reg) |
830 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); | |
1018faa6 | 831 | wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); |
de0428a7 KW |
832 | } |
833 | ||
834 | void x86_pmu_enable_all(int added); | |
835 | ||
b371b594 | 836 | int perf_assign_events(struct event_constraint **constraints, int n, |
cc1790cf | 837 | int wmin, int wmax, int gpmax, int *assign); |
de0428a7 KW |
838 | int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); |
839 | ||
840 | void x86_pmu_stop(struct perf_event *event, int flags); | |
841 | ||
842 | static inline void x86_pmu_disable_event(struct perf_event *event) | |
843 | { | |
844 | struct hw_perf_event *hwc = &event->hw; | |
845 | ||
846 | wrmsrl(hwc->config_base, hwc->config); | |
847 | } | |
848 | ||
849 | void x86_pmu_enable_event(struct perf_event *event); | |
850 | ||
851 | int x86_pmu_handle_irq(struct pt_regs *regs); | |
852 | ||
853 | extern struct event_constraint emptyconstraint; | |
854 | ||
855 | extern struct event_constraint unconstrained; | |
856 | ||
3e702ff6 SE |
857 | static inline bool kernel_ip(unsigned long ip) |
858 | { | |
859 | #ifdef CONFIG_X86_32 | |
860 | return ip > PAGE_OFFSET; | |
861 | #else | |
862 | return (long)ip < 0; | |
863 | #endif | |
864 | } | |
865 | ||
d07bdfd3 PZ |
866 | /* |
867 | * Not all PMUs provide the right context information to place the reported IP | |
868 | * into full context. Specifically segment registers are typically not | |
869 | * supplied. | |
870 | * | |
871 | * Assuming the address is a linear address (it is for IBS), we fake the CS and | |
872 | * vm86 mode using the known zero-based code segment and 'fix up' the registers | |
873 | * to reflect this. | |
874 | * | |
875 | * Intel PEBS/LBR appear to typically provide the effective address, nothing | |
876 | * much we can do about that but pray and treat it like a linear address. | |
877 | */ | |
878 | static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip) | |
879 | { | |
880 | regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; | |
881 | if (regs->flags & X86_VM_MASK) | |
882 | regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); | |
883 | regs->ip = ip; | |
884 | } | |
885 | ||
0bf79d44 | 886 | ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event); |
20550a43 | 887 | ssize_t intel_event_sysfs_show(char *page, u64 config); |
43c032fe | 888 | |
47732d88 AK |
889 | struct attribute **merge_attr(struct attribute **a, struct attribute **b); |
890 | ||
a49ac9f8 HR |
891 | ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, |
892 | char *page); | |
fc07e9f9 AK |
893 | ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr, |
894 | char *page); | |
a49ac9f8 | 895 | |
de0428a7 KW |
896 | #ifdef CONFIG_CPU_SUP_AMD |
897 | ||
898 | int amd_pmu_init(void); | |
899 | ||
900 | #else /* CONFIG_CPU_SUP_AMD */ | |
901 | ||
902 | static inline int amd_pmu_init(void) | |
903 | { | |
904 | return 0; | |
905 | } | |
906 | ||
907 | #endif /* CONFIG_CPU_SUP_AMD */ | |
908 | ||
909 | #ifdef CONFIG_CPU_SUP_INTEL | |
910 | ||
81ec3f3c | 911 | static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) |
48070342 | 912 | { |
67266c10 JO |
913 | struct hw_perf_event *hwc = &event->hw; |
914 | unsigned int hw_event, bts_event; | |
915 | ||
916 | if (event->attr.freq) | |
917 | return false; | |
918 | ||
919 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; | |
920 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); | |
48070342 | 921 | |
81ec3f3c JO |
922 | return hw_event == bts_event && period == 1; |
923 | } | |
924 | ||
925 | static inline bool intel_pmu_has_bts(struct perf_event *event) | |
926 | { | |
927 | struct hw_perf_event *hwc = &event->hw; | |
928 | ||
929 | return intel_pmu_has_bts_period(event, hwc->sample_period); | |
48070342 AS |
930 | } |
931 | ||
de0428a7 KW |
932 | int intel_pmu_save_and_restart(struct perf_event *event); |
933 | ||
934 | struct event_constraint * | |
79cba822 SE |
935 | x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, |
936 | struct perf_event *event); | |
de0428a7 | 937 | |
d01b1f96 PZI |
938 | extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu); |
939 | extern void intel_cpuc_finish(struct cpu_hw_events *cpuc); | |
de0428a7 KW |
940 | |
941 | int intel_pmu_init(void); | |
942 | ||
943 | void init_debug_store_on_cpu(int cpu); | |
944 | ||
945 | void fini_debug_store_on_cpu(int cpu); | |
946 | ||
947 | void release_ds_buffers(void); | |
948 | ||
949 | void reserve_ds_buffers(void); | |
950 | ||
951 | extern struct event_constraint bts_constraint; | |
952 | ||
953 | void intel_pmu_enable_bts(u64 config); | |
954 | ||
955 | void intel_pmu_disable_bts(void); | |
956 | ||
957 | int intel_pmu_drain_bts_buffer(void); | |
958 | ||
959 | extern struct event_constraint intel_core2_pebs_event_constraints[]; | |
960 | ||
961 | extern struct event_constraint intel_atom_pebs_event_constraints[]; | |
962 | ||
1fa64180 YZ |
963 | extern struct event_constraint intel_slm_pebs_event_constraints[]; |
964 | ||
8b92c3a7 KL |
965 | extern struct event_constraint intel_glm_pebs_event_constraints[]; |
966 | ||
dd0b06b5 KL |
967 | extern struct event_constraint intel_glp_pebs_event_constraints[]; |
968 | ||
de0428a7 KW |
969 | extern struct event_constraint intel_nehalem_pebs_event_constraints[]; |
970 | ||
971 | extern struct event_constraint intel_westmere_pebs_event_constraints[]; | |
972 | ||
973 | extern struct event_constraint intel_snb_pebs_event_constraints[]; | |
974 | ||
20a36e39 SE |
975 | extern struct event_constraint intel_ivb_pebs_event_constraints[]; |
976 | ||
3044318f AK |
977 | extern struct event_constraint intel_hsw_pebs_event_constraints[]; |
978 | ||
b3e62463 SE |
979 | extern struct event_constraint intel_bdw_pebs_event_constraints[]; |
980 | ||
9a92e16f AK |
981 | extern struct event_constraint intel_skl_pebs_event_constraints[]; |
982 | ||
60176089 KL |
983 | extern struct event_constraint intel_icl_pebs_event_constraints[]; |
984 | ||
de0428a7 KW |
985 | struct event_constraint *intel_pebs_constraints(struct perf_event *event); |
986 | ||
68f7082f PZ |
987 | void intel_pmu_pebs_add(struct perf_event *event); |
988 | ||
989 | void intel_pmu_pebs_del(struct perf_event *event); | |
990 | ||
de0428a7 KW |
991 | void intel_pmu_pebs_enable(struct perf_event *event); |
992 | ||
993 | void intel_pmu_pebs_disable(struct perf_event *event); | |
994 | ||
995 | void intel_pmu_pebs_enable_all(void); | |
996 | ||
997 | void intel_pmu_pebs_disable_all(void); | |
998 | ||
9c964efa YZ |
999 | void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in); |
1000 | ||
5bee2cc6 KL |
1001 | void intel_pmu_auto_reload_read(struct perf_event *event); |
1002 | ||
c22497f5 KL |
1003 | void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr); |
1004 | ||
de0428a7 KW |
1005 | void intel_ds_init(void); |
1006 | ||
2a0ad3b3 YZ |
1007 | void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in); |
1008 | ||
19fc9ddd DCC |
1009 | u64 lbr_from_signext_quirk_wr(u64 val); |
1010 | ||
de0428a7 KW |
1011 | void intel_pmu_lbr_reset(void); |
1012 | ||
68f7082f | 1013 | void intel_pmu_lbr_add(struct perf_event *event); |
de0428a7 | 1014 | |
68f7082f | 1015 | void intel_pmu_lbr_del(struct perf_event *event); |
de0428a7 | 1016 | |
1a78d937 | 1017 | void intel_pmu_lbr_enable_all(bool pmi); |
de0428a7 KW |
1018 | |
1019 | void intel_pmu_lbr_disable_all(void); | |
1020 | ||
1021 | void intel_pmu_lbr_read(void); | |
1022 | ||
1023 | void intel_pmu_lbr_init_core(void); | |
1024 | ||
1025 | void intel_pmu_lbr_init_nhm(void); | |
1026 | ||
1027 | void intel_pmu_lbr_init_atom(void); | |
1028 | ||
f21d5adc KL |
1029 | void intel_pmu_lbr_init_slm(void); |
1030 | ||
c5cc2cd9 SE |
1031 | void intel_pmu_lbr_init_snb(void); |
1032 | ||
e9d7f7cd YZ |
1033 | void intel_pmu_lbr_init_hsw(void); |
1034 | ||
9a92e16f AK |
1035 | void intel_pmu_lbr_init_skl(void); |
1036 | ||
1e7b9390 HC |
1037 | void intel_pmu_lbr_init_knl(void); |
1038 | ||
e17dc653 AK |
1039 | void intel_pmu_pebs_data_source_nhm(void); |
1040 | ||
6ae5fa61 AK |
1041 | void intel_pmu_pebs_data_source_skl(bool pmem); |
1042 | ||
60ce0fbd SE |
1043 | int intel_pmu_setup_lbr_filter(struct perf_event *event); |
1044 | ||
52ca9ced AS |
1045 | void intel_pt_interrupt(void); |
1046 | ||
8062382c AS |
1047 | int intel_bts_interrupt(void); |
1048 | ||
1049 | void intel_bts_enable_local(void); | |
1050 | ||
1051 | void intel_bts_disable_local(void); | |
1052 | ||
de0428a7 KW |
1053 | int p4_pmu_init(void); |
1054 | ||
1055 | int p6_pmu_init(void); | |
1056 | ||
e717bf4e VW |
1057 | int knc_pmu_init(void); |
1058 | ||
b37609c3 SE |
1059 | static inline int is_ht_workaround_enabled(void) |
1060 | { | |
1061 | return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED); | |
1062 | } | |
47732d88 | 1063 | |
de0428a7 KW |
1064 | #else /* CONFIG_CPU_SUP_INTEL */ |
1065 | ||
1066 | static inline void reserve_ds_buffers(void) | |
1067 | { | |
1068 | } | |
1069 | ||
1070 | static inline void release_ds_buffers(void) | |
1071 | { | |
1072 | } | |
1073 | ||
1074 | static inline int intel_pmu_init(void) | |
1075 | { | |
1076 | return 0; | |
1077 | } | |
1078 | ||
f764c58b | 1079 | static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) |
d01b1f96 PZI |
1080 | { |
1081 | return 0; | |
1082 | } | |
1083 | ||
f764c58b | 1084 | static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc) |
de0428a7 | 1085 | { |
de0428a7 KW |
1086 | } |
1087 | ||
cc1790cf PZ |
1088 | static inline int is_ht_workaround_enabled(void) |
1089 | { | |
1090 | return 0; | |
1091 | } | |
de0428a7 | 1092 | #endif /* CONFIG_CPU_SUP_INTEL */ |