Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
cdd6c482 IM |
2 | #ifndef _ASM_X86_PERF_EVENT_H |
3 | #define _ASM_X86_PERF_EVENT_H | |
003a46cf | 4 | |
d5616bac SE |
5 | #include <linux/static_call.h> |
6 | ||
eb2b8618 | 7 | /* |
cdd6c482 | 8 | * Performance event hw details: |
eb2b8618 IM |
9 | */ |
10 | ||
15c7ad51 | 11 | #define INTEL_PMC_MAX_GENERIC 32 |
ee28855a | 12 | #define INTEL_PMC_MAX_FIXED 16 |
15c7ad51 | 13 | #define INTEL_PMC_IDX_FIXED 32 |
eb2b8618 | 14 | |
862a1a5f IM |
15 | #define X86_PMC_IDX_MAX 64 |
16 | ||
241771ef IM |
17 | #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 |
18 | #define MSR_ARCH_PERFMON_PERFCTR1 0xc2 | |
003a46cf | 19 | |
241771ef IM |
20 | #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 |
21 | #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 | |
003a46cf | 22 | |
a098f448 RR |
23 | #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL |
24 | #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL | |
25 | #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16) | |
26 | #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17) | |
27 | #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18) | |
a7b9d2cc | 28 | #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19) |
a098f448 RR |
29 | #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20) |
30 | #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21) | |
31 | #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22) | |
32 | #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) | |
33 | #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL | |
33744916 | 34 | #define ARCH_PERFMON_EVENTSEL_BR_CNTR (1ULL << 35) |
dce0c74d KL |
35 | #define ARCH_PERFMON_EVENTSEL_EQ (1ULL << 36) |
36 | #define ARCH_PERFMON_EVENTSEL_UMASK2 (0xFFULL << 40) | |
a098f448 | 37 | |
10d95a31 DM |
38 | #define INTEL_FIXED_BITS_MASK 0xFULL |
39 | #define INTEL_FIXED_BITS_STRIDE 4 | |
40 | #define INTEL_FIXED_0_KERNEL (1ULL << 0) | |
41 | #define INTEL_FIXED_0_USER (1ULL << 1) | |
42 | #define INTEL_FIXED_0_ANYTHREAD (1ULL << 2) | |
43 | #define INTEL_FIXED_0_ENABLE_PMI (1ULL << 3) | |
0e45818e | 44 | #define INTEL_FIXED_3_METRICS_CLEAR (1ULL << 2) |
10d95a31 | 45 | |
3a632cb2 AK |
46 | #define HSW_IN_TX (1ULL << 32) |
47 | #define HSW_IN_TX_CHECKPOINTED (1ULL << 33) | |
c22497f5 KL |
48 | #define ICL_EVENTSEL_ADAPTIVE (1ULL << 34) |
49 | #define ICL_FIXED_0_ADAPTIVE (1ULL << 32) | |
3a632cb2 | 50 | |
10d95a31 DM |
51 | #define intel_fixed_bits_by_idx(_idx, _bits) \ |
52 | ((_bits) << ((_idx) * INTEL_FIXED_BITS_STRIDE)) | |
53 | ||
e259514e | 54 | #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36) |
9f19010a JS |
55 | #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40) |
56 | #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41) | |
011af857 | 57 | |
e259514e JS |
58 | #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37 |
59 | #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \ | |
60 | (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT) | |
61 | ||
a098f448 RR |
62 | #define AMD64_EVENTSEL_EVENT \ |
63 | (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) | |
64 | #define INTEL_ARCH_EVENT_MASK \ | |
65 | (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) | |
66 | ||
d7cbbe49 NJ |
67 | #define AMD64_L3_SLICE_SHIFT 48 |
68 | #define AMD64_L3_SLICE_MASK \ | |
e48667b8 KP |
69 | (0xFULL << AMD64_L3_SLICE_SHIFT) |
70 | #define AMD64_L3_SLICEID_MASK \ | |
71 | (0x7ULL << AMD64_L3_SLICE_SHIFT) | |
d7cbbe49 NJ |
72 | |
73 | #define AMD64_L3_THREAD_SHIFT 56 | |
74 | #define AMD64_L3_THREAD_MASK \ | |
e48667b8 KP |
75 | (0xFFULL << AMD64_L3_THREAD_SHIFT) |
76 | #define AMD64_L3_F19H_THREAD_MASK \ | |
77 | (0x3ULL << AMD64_L3_THREAD_SHIFT) | |
78 | ||
79 | #define AMD64_L3_EN_ALL_CORES BIT_ULL(47) | |
80 | #define AMD64_L3_EN_ALL_SLICES BIT_ULL(46) | |
81 | ||
82 | #define AMD64_L3_COREID_SHIFT 42 | |
83 | #define AMD64_L3_COREID_MASK \ | |
84 | (0x7ULL << AMD64_L3_COREID_SHIFT) | |
d7cbbe49 | 85 | |
a098f448 RR |
86 | #define X86_RAW_EVENT_MASK \ |
87 | (ARCH_PERFMON_EVENTSEL_EVENT | \ | |
88 | ARCH_PERFMON_EVENTSEL_UMASK | \ | |
89 | ARCH_PERFMON_EVENTSEL_EDGE | \ | |
90 | ARCH_PERFMON_EVENTSEL_INV | \ | |
91 | ARCH_PERFMON_EVENTSEL_CMASK) | |
86a04461 AK |
92 | #define X86_ALL_EVENT_FLAGS \ |
93 | (ARCH_PERFMON_EVENTSEL_EDGE | \ | |
94 | ARCH_PERFMON_EVENTSEL_INV | \ | |
95 | ARCH_PERFMON_EVENTSEL_CMASK | \ | |
96 | ARCH_PERFMON_EVENTSEL_ANY | \ | |
97 | ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \ | |
98 | HSW_IN_TX | \ | |
99 | HSW_IN_TX_CHECKPOINTED) | |
a098f448 RR |
100 | #define AMD64_RAW_EVENT_MASK \ |
101 | (X86_RAW_EVENT_MASK | \ | |
102 | AMD64_EVENTSEL_EVENT) | |
e259514e JS |
103 | #define AMD64_RAW_EVENT_MASK_NB \ |
104 | (AMD64_EVENTSEL_EVENT | \ | |
105 | ARCH_PERFMON_EVENTSEL_UMASK) | |
c390241a SD |
106 | |
107 | #define AMD64_PERFMON_V2_EVENTSEL_EVENT_NB \ | |
108 | (AMD64_EVENTSEL_EVENT | \ | |
109 | GENMASK_ULL(37, 36)) | |
110 | ||
111 | #define AMD64_PERFMON_V2_EVENTSEL_UMASK_NB \ | |
112 | (ARCH_PERFMON_EVENTSEL_UMASK | \ | |
113 | GENMASK_ULL(27, 24)) | |
114 | ||
115 | #define AMD64_PERFMON_V2_RAW_EVENT_MASK_NB \ | |
116 | (AMD64_PERFMON_V2_EVENTSEL_EVENT_NB | \ | |
117 | AMD64_PERFMON_V2_EVENTSEL_UMASK_NB) | |
118 | ||
25e56847 SD |
119 | #define AMD64_PERFMON_V2_ENABLE_UMC BIT_ULL(31) |
120 | #define AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC GENMASK_ULL(7, 0) | |
121 | #define AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC GENMASK_ULL(9, 8) | |
122 | #define AMD64_PERFMON_V2_RAW_EVENT_MASK_UMC \ | |
123 | (AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC | \ | |
124 | AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC) | |
125 | ||
ee5789db | 126 | #define AMD64_NUM_COUNTERS 4 |
b1dc3c48 | 127 | #define AMD64_NUM_COUNTERS_CORE 6 |
e259514e | 128 | #define AMD64_NUM_COUNTERS_NB 4 |
04a705df | 129 | |
ee5789db | 130 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c |
241771ef | 131 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) |
ee5789db | 132 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 |
003a46cf | 133 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ |
241771ef IM |
134 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) |
135 | ||
ee5789db | 136 | #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 |
ffb871bc | 137 | #define ARCH_PERFMON_EVENTS_COUNT 7 |
003a46cf | 138 | |
c22497f5 KL |
139 | #define PEBS_DATACFG_MEMINFO BIT_ULL(0) |
140 | #define PEBS_DATACFG_GP BIT_ULL(1) | |
141 | #define PEBS_DATACFG_XMMS BIT_ULL(2) | |
142 | #define PEBS_DATACFG_LBRS BIT_ULL(3) | |
143 | #define PEBS_DATACFG_LBR_SHIFT 24 | |
e02e9b03 KL |
144 | #define PEBS_DATACFG_CNTR BIT_ULL(4) |
145 | #define PEBS_DATACFG_CNTR_SHIFT 32 | |
146 | #define PEBS_DATACFG_CNTR_MASK GENMASK_ULL(15, 0) | |
147 | #define PEBS_DATACFG_FIX_SHIFT 48 | |
148 | #define PEBS_DATACFG_FIX_MASK GENMASK_ULL(7, 0) | |
149 | #define PEBS_DATACFG_METRICS BIT_ULL(5) | |
c22497f5 | 150 | |
b752ea0c KL |
151 | /* Steal the highest bit of pebs_data_cfg for SW usage */ |
152 | #define PEBS_UPDATE_DS_SW BIT_ULL(63) | |
153 | ||
eb2b8618 IM |
154 | /* |
155 | * Intel "Architectural Performance Monitoring" CPUID | |
156 | * detection/enumeration details: | |
157 | */ | |
003a46cf TG |
158 | union cpuid10_eax { |
159 | struct { | |
160 | unsigned int version_id:8; | |
948b1bb8 | 161 | unsigned int num_counters:8; |
003a46cf TG |
162 | unsigned int bit_width:8; |
163 | unsigned int mask_length:8; | |
164 | } split; | |
165 | unsigned int full; | |
166 | }; | |
167 | ||
ffb871bc GN |
168 | union cpuid10_ebx { |
169 | struct { | |
170 | unsigned int no_unhalted_core_cycles:1; | |
171 | unsigned int no_instructions_retired:1; | |
172 | unsigned int no_unhalted_reference_cycles:1; | |
173 | unsigned int no_llc_reference:1; | |
174 | unsigned int no_llc_misses:1; | |
175 | unsigned int no_branch_instruction_retired:1; | |
176 | unsigned int no_branch_misses_retired:1; | |
177 | } split; | |
178 | unsigned int full; | |
179 | }; | |
180 | ||
703e937c IM |
181 | union cpuid10_edx { |
182 | struct { | |
e768aee8 LS |
183 | unsigned int num_counters_fixed:5; |
184 | unsigned int bit_width_fixed:8; | |
cadbaa03 SE |
185 | unsigned int reserved1:2; |
186 | unsigned int anythread_deprecated:1; | |
187 | unsigned int reserved2:16; | |
703e937c IM |
188 | } split; |
189 | unsigned int full; | |
190 | }; | |
191 | ||
eb467aaa KL |
192 | /* |
193 | * Intel "Architectural Performance Monitoring extension" CPUID | |
194 | * detection/enumeration details: | |
195 | */ | |
196 | #define ARCH_PERFMON_EXT_LEAF 0x00000023 | |
eb467aaa | 197 | #define ARCH_PERFMON_NUM_COUNTER_LEAF 0x1 |
1856c6c2 | 198 | #define ARCH_PERFMON_ACR_LEAF 0x2 |
eb467aaa | 199 | |
47a973fd KL |
200 | union cpuid35_eax { |
201 | struct { | |
202 | unsigned int leaf0:1; | |
203 | /* Counters Sub-Leaf */ | |
204 | unsigned int cntr_subleaf:1; | |
205 | /* Auto Counter Reload Sub-Leaf */ | |
206 | unsigned int acr_subleaf:1; | |
207 | /* Events Sub-Leaf */ | |
208 | unsigned int events_subleaf:1; | |
209 | unsigned int reserved:28; | |
210 | } split; | |
211 | unsigned int full; | |
212 | }; | |
213 | ||
214 | union cpuid35_ebx { | |
215 | struct { | |
216 | /* UnitMask2 Supported */ | |
217 | unsigned int umask2:1; | |
218 | /* EQ-bit Supported */ | |
219 | unsigned int eq:1; | |
220 | unsigned int reserved:30; | |
221 | } split; | |
222 | unsigned int full; | |
223 | }; | |
224 | ||
af6cf129 KL |
225 | /* |
226 | * Intel Architectural LBR CPUID detection/enumeration details: | |
227 | */ | |
228 | union cpuid28_eax { | |
229 | struct { | |
230 | /* Supported LBR depth values */ | |
231 | unsigned int lbr_depth_mask:8; | |
232 | unsigned int reserved:22; | |
233 | /* Deep C-state Reset */ | |
234 | unsigned int lbr_deep_c_reset:1; | |
235 | /* IP values contain LIP */ | |
236 | unsigned int lbr_lip:1; | |
237 | } split; | |
238 | unsigned int full; | |
239 | }; | |
240 | ||
241 | union cpuid28_ebx { | |
242 | struct { | |
243 | /* CPL Filtering Supported */ | |
244 | unsigned int lbr_cpl:1; | |
245 | /* Branch Filtering Supported */ | |
246 | unsigned int lbr_filter:1; | |
247 | /* Call-stack Mode Supported */ | |
248 | unsigned int lbr_call_stack:1; | |
249 | } split; | |
250 | unsigned int full; | |
251 | }; | |
252 | ||
253 | union cpuid28_ecx { | |
254 | struct { | |
255 | /* Mispredict Bit Supported */ | |
256 | unsigned int lbr_mispred:1; | |
257 | /* Timed LBRs Supported */ | |
258 | unsigned int lbr_timed_lbr:1; | |
259 | /* Branch Type Field Supported */ | |
260 | unsigned int lbr_br_type:1; | |
33744916 KL |
261 | unsigned int reserved:13; |
262 | /* Branch counters (Event Logging) Supported */ | |
263 | unsigned int lbr_counters:4; | |
af6cf129 KL |
264 | } split; |
265 | unsigned int full; | |
266 | }; | |
267 | ||
56e026a7 SD |
268 | /* |
269 | * AMD "Extended Performance Monitoring and Debug" CPUID | |
270 | * detection/enumeration details: | |
271 | */ | |
272 | union cpuid_0x80000022_ebx { | |
273 | struct { | |
274 | /* Number of Core Performance Counters */ | |
275 | unsigned int num_core_pmc:4; | |
703fb765 SD |
276 | /* Number of available LBR Stack Entries */ |
277 | unsigned int lbr_v2_stack_sz:6; | |
16b48c3f SD |
278 | /* Number of Data Fabric Counters */ |
279 | unsigned int num_df_pmc:6; | |
25e56847 SD |
280 | /* Number of Unified Memory Controller Counters */ |
281 | unsigned int num_umc_pmc:6; | |
56e026a7 SD |
282 | } split; |
283 | unsigned int full; | |
284 | }; | |
285 | ||
b3d9468a GN |
286 | struct x86_pmu_capability { |
287 | int version; | |
288 | int num_counters_gp; | |
289 | int num_counters_fixed; | |
290 | int bit_width_gp; | |
291 | int bit_width_fixed; | |
292 | unsigned int events_mask; | |
293 | int events_mask_len; | |
fb358e0b | 294 | unsigned int pebs_ept :1; |
b3d9468a | 295 | }; |
703e937c IM |
296 | |
297 | /* | |
cdd6c482 | 298 | * Fixed-purpose performance events: |
703e937c IM |
299 | */ |
300 | ||
0e2e45e2 KL |
301 | /* RDPMC offset for Fixed PMCs */ |
302 | #define INTEL_PMC_FIXED_RDPMC_BASE (1 << 30) | |
59a854e2 | 303 | #define INTEL_PMC_FIXED_RDPMC_METRICS (1 << 29) |
0e2e45e2 | 304 | |
862a1a5f | 305 | /* |
6f722509 | 306 | * All the fixed-mode PMCs are configured via this single MSR: |
862a1a5f | 307 | */ |
cd09c0c4 | 308 | #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d |
862a1a5f IM |
309 | |
310 | /* | |
6f722509 KL |
311 | * There is no event-code assigned to the fixed-mode PMCs. |
312 | * | |
313 | * For a fixed-mode PMC, which has an equivalent event on a general-purpose | |
314 | * PMC, the event-code of the equivalent event is used for the fixed-mode PMC, | |
315 | * e.g., Instr_Retired.Any and CPU_CLK_Unhalted.Core. | |
316 | * | |
317 | * For a fixed-mode PMC, which doesn't have an equivalent event, a | |
318 | * pseudo-encoding is used, e.g., CPU_CLK_Unhalted.Ref and TOPDOWN.SLOTS. | |
319 | * The pseudo event-code for a fixed-mode PMC must be 0x00. | |
320 | * The pseudo umask-code is 0xX. The X equals the index of the fixed | |
321 | * counter + 1, e.g., the fixed counter 2 has the pseudo-encoding 0x0300. | |
322 | * | |
323 | * The counts are available in separate MSRs: | |
862a1a5f IM |
324 | */ |
325 | ||
703e937c | 326 | /* Instr_Retired.Any: */ |
cd09c0c4 | 327 | #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 |
15c7ad51 | 328 | #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0) |
703e937c IM |
329 | |
330 | /* CPU_CLK_Unhalted.Core: */ | |
cd09c0c4 | 331 | #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a |
15c7ad51 | 332 | #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1) |
703e937c | 333 | |
6f722509 | 334 | /* CPU_CLK_Unhalted.Ref: event=0x00,umask=0x3 (pseudo-encoding) */ |
cd09c0c4 | 335 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b |
15c7ad51 RR |
336 | #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2) |
337 | #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES) | |
703e937c | 338 | |
6f722509 KL |
339 | /* TOPDOWN.SLOTS: event=0x00,umask=0x4 (pseudo-encoding) */ |
340 | #define MSR_ARCH_PERFMON_FIXED_CTR3 0x30c | |
341 | #define INTEL_PMC_IDX_FIXED_SLOTS (INTEL_PMC_IDX_FIXED + 3) | |
342 | #define INTEL_PMC_MSK_FIXED_SLOTS (1ULL << INTEL_PMC_IDX_FIXED_SLOTS) | |
343 | ||
a932aa0e KL |
344 | /* TOPDOWN_BAD_SPECULATION.ALL: fixed counter 4 (Atom only) */ |
345 | /* TOPDOWN_FE_BOUND.ALL: fixed counter 5 (Atom only) */ | |
346 | /* TOPDOWN_RETIRING.ALL: fixed counter 6 (Atom only) */ | |
347 | ||
4a263bf3 KL |
348 | static inline bool use_fixed_pseudo_encoding(u64 code) |
349 | { | |
350 | return !(code & 0xff); | |
351 | } | |
352 | ||
30dd568c MM |
353 | /* |
354 | * We model BTS tracing as another fixed-mode PMC. | |
355 | * | |
d39fcc32 | 356 | * We choose the value 47 for the fixed index of BTS, since lower |
cdd6c482 | 357 | * values are used by actual fixed events and higher values are used |
30dd568c MM |
358 | * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. |
359 | */ | |
d39fcc32 | 360 | #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 15) |
60a2a271 | 361 | |
7b2c05a1 KL |
362 | /* |
363 | * The PERF_METRICS MSR is modeled as several magic fixed-mode PMCs, one for | |
364 | * each TopDown metric event. | |
365 | * | |
366 | * Internally the TopDown metric events are mapped to the FxCtr 3 (SLOTS). | |
367 | */ | |
368 | #define INTEL_PMC_IDX_METRIC_BASE (INTEL_PMC_IDX_FIXED + 16) | |
369 | #define INTEL_PMC_IDX_TD_RETIRING (INTEL_PMC_IDX_METRIC_BASE + 0) | |
370 | #define INTEL_PMC_IDX_TD_BAD_SPEC (INTEL_PMC_IDX_METRIC_BASE + 1) | |
371 | #define INTEL_PMC_IDX_TD_FE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 2) | |
372 | #define INTEL_PMC_IDX_TD_BE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 3) | |
61b985e3 KL |
373 | #define INTEL_PMC_IDX_TD_HEAVY_OPS (INTEL_PMC_IDX_METRIC_BASE + 4) |
374 | #define INTEL_PMC_IDX_TD_BR_MISPREDICT (INTEL_PMC_IDX_METRIC_BASE + 5) | |
375 | #define INTEL_PMC_IDX_TD_FETCH_LAT (INTEL_PMC_IDX_METRIC_BASE + 6) | |
376 | #define INTEL_PMC_IDX_TD_MEM_BOUND (INTEL_PMC_IDX_METRIC_BASE + 7) | |
377 | #define INTEL_PMC_IDX_METRIC_END INTEL_PMC_IDX_TD_MEM_BOUND | |
378 | #define INTEL_PMC_MSK_TOPDOWN ((0xffull << INTEL_PMC_IDX_METRIC_BASE) | \ | |
7b2c05a1 KL |
379 | INTEL_PMC_MSK_FIXED_SLOTS) |
380 | ||
381 | /* | |
382 | * There is no event-code assigned to the TopDown events. | |
383 | * | |
384 | * For the slots event, use the pseudo code of the fixed counter 3. | |
385 | * | |
386 | * For the metric events, the pseudo event-code is 0x00. | |
387 | * The pseudo umask-code starts from the middle of the pseudo event | |
388 | * space, 0x80. | |
389 | */ | |
390 | #define INTEL_TD_SLOTS 0x0400 /* TOPDOWN.SLOTS */ | |
391 | /* Level 1 metrics */ | |
392 | #define INTEL_TD_METRIC_RETIRING 0x8000 /* Retiring metric */ | |
393 | #define INTEL_TD_METRIC_BAD_SPEC 0x8100 /* Bad speculation metric */ | |
394 | #define INTEL_TD_METRIC_FE_BOUND 0x8200 /* FE bound metric */ | |
395 | #define INTEL_TD_METRIC_BE_BOUND 0x8300 /* BE bound metric */ | |
1ab5f235 KL |
396 | /* Level 2 metrics */ |
397 | #define INTEL_TD_METRIC_HEAVY_OPS 0x8400 /* Heavy Operations metric */ | |
398 | #define INTEL_TD_METRIC_BR_MISPREDICT 0x8500 /* Branch Mispredict metric */ | |
399 | #define INTEL_TD_METRIC_FETCH_LAT 0x8600 /* Fetch Latency metric */ | |
400 | #define INTEL_TD_METRIC_MEM_BOUND 0x8700 /* Memory bound metric */ | |
401 | ||
402 | #define INTEL_TD_METRIC_MAX INTEL_TD_METRIC_MEM_BOUND | |
403 | #define INTEL_TD_METRIC_NUM 8 | |
7b2c05a1 | 404 | |
0e45818e KL |
405 | #define INTEL_TD_CFG_METRIC_CLEAR_BIT 0 |
406 | #define INTEL_TD_CFG_METRIC_CLEAR BIT_ULL(INTEL_TD_CFG_METRIC_CLEAR_BIT) | |
407 | ||
7b2c05a1 KL |
408 | static inline bool is_metric_idx(int idx) |
409 | { | |
410 | return (unsigned)(idx - INTEL_PMC_IDX_METRIC_BASE) < INTEL_TD_METRIC_NUM; | |
411 | } | |
412 | ||
413 | static inline bool is_topdown_idx(int idx) | |
414 | { | |
415 | return is_metric_idx(idx) || idx == INTEL_PMC_IDX_FIXED_SLOTS; | |
416 | } | |
417 | ||
418 | #define INTEL_PMC_OTHER_TOPDOWN_BITS(bit) \ | |
419 | (~(0x1ull << bit) & INTEL_PMC_MSK_TOPDOWN) | |
420 | ||
60a2a271 KL |
421 | #define GLOBAL_STATUS_COND_CHG BIT_ULL(63) |
422 | #define GLOBAL_STATUS_BUFFER_OVF_BIT 62 | |
423 | #define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(GLOBAL_STATUS_BUFFER_OVF_BIT) | |
424 | #define GLOBAL_STATUS_UNC_OVF BIT_ULL(61) | |
425 | #define GLOBAL_STATUS_ASIF BIT_ULL(60) | |
426 | #define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59) | |
427 | #define GLOBAL_STATUS_LBRS_FROZEN_BIT 58 | |
428 | #define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT) | |
429 | #define GLOBAL_STATUS_TRACE_TOPAPMI_BIT 55 | |
430 | #define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT) | |
7b2c05a1 | 431 | #define GLOBAL_STATUS_PERF_METRICS_OVF_BIT 48 |
b83ff1c8 | 432 | |
59a854e2 | 433 | #define GLOBAL_CTRL_EN_PERF_METRICS 48 |
097e4311 LX |
434 | /* |
435 | * We model guest LBR event tracing as another fixed-mode PMC like BTS. | |
436 | * | |
437 | * We choose bit 58 because it's used to indicate LBR stack frozen state | |
438 | * for architectural perfmon v4, also we unconditionally mask that bit in | |
439 | * the handle_pmi_common(), so it'll never be set in the overflow handling. | |
440 | * | |
441 | * With this fake counter assigned, the guest LBR event user (such as KVM), | |
442 | * can program the LBR registers on its own, and we don't actually do anything | |
443 | * with then in the host context. | |
444 | */ | |
445 | #define INTEL_PMC_IDX_FIXED_VLBR (GLOBAL_STATUS_LBRS_FROZEN_BIT) | |
446 | ||
447 | /* | |
448 | * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b, | |
449 | * since it would claim bit 58 which is effectively Fixed26. | |
450 | */ | |
451 | #define INTEL_FIXED_VLBR_EVENT 0x1b00 | |
452 | ||
c22497f5 KL |
453 | /* |
454 | * Adaptive PEBS v4 | |
455 | */ | |
456 | ||
457 | struct pebs_basic { | |
7087bfb0 KL |
458 | u64 format_group:32, |
459 | retire_latency:16, | |
460 | format_size:16; | |
c22497f5 KL |
461 | u64 ip; |
462 | u64 applicable_counters; | |
463 | u64 tsc; | |
464 | }; | |
465 | ||
466 | struct pebs_meminfo { | |
467 | u64 address; | |
468 | u64 aux; | |
7087bfb0 KL |
469 | union { |
470 | /* pre Alder Lake */ | |
471 | u64 mem_latency; | |
472 | /* Alder Lake and later */ | |
473 | struct { | |
474 | u64 instr_latency:16; | |
475 | u64 pad2:16; | |
476 | u64 cache_latency:16; | |
477 | u64 pad3:16; | |
478 | }; | |
479 | }; | |
c22497f5 KL |
480 | u64 tsx_tuning; |
481 | }; | |
482 | ||
483 | struct pebs_gprs { | |
484 | u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di; | |
485 | u64 r8, r9, r10, r11, r12, r13, r14, r15; | |
486 | }; | |
487 | ||
488 | struct pebs_xmm { | |
489 | u64 xmm[16*2]; /* two entries for each register */ | |
490 | }; | |
491 | ||
e02e9b03 KL |
492 | struct pebs_cntr_header { |
493 | u32 cntr; | |
494 | u32 fixed; | |
495 | u32 metrics; | |
496 | u32 reserved; | |
497 | }; | |
498 | ||
499 | #define INTEL_CNTR_METRICS 0x3 | |
500 | ||
56e026a7 SD |
501 | /* |
502 | * AMD Extended Performance Monitoring and Debug cpuid feature detection | |
503 | */ | |
504 | #define EXT_PERFMON_DEBUG_FEATURES 0x80000022 | |
505 | ||
ee5789db RR |
506 | /* |
507 | * IBS cpuid feature detection | |
508 | */ | |
509 | ||
510 | #define IBS_CPUID_FEATURES 0x8000001b | |
511 | ||
512 | /* | |
513 | * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but | |
514 | * bit 0 is used to indicate the existence of IBS. | |
515 | */ | |
516 | #define IBS_CAPS_AVAIL (1U<<0) | |
517 | #define IBS_CAPS_FETCHSAM (1U<<1) | |
518 | #define IBS_CAPS_OPSAM (1U<<2) | |
519 | #define IBS_CAPS_RDWROPCNT (1U<<3) | |
520 | #define IBS_CAPS_OPCNT (1U<<4) | |
521 | #define IBS_CAPS_BRNTRGT (1U<<5) | |
522 | #define IBS_CAPS_OPCNTEXT (1U<<6) | |
d47e8238 | 523 | #define IBS_CAPS_RIPINVALIDCHK (1U<<7) |
904cb367 AG |
524 | #define IBS_CAPS_OPBRNFUSE (1U<<8) |
525 | #define IBS_CAPS_FETCHCTLEXTD (1U<<9) | |
526 | #define IBS_CAPS_OPDATA4 (1U<<10) | |
ba5d35b4 | 527 | #define IBS_CAPS_ZEN4 (1U<<11) |
d20610c1 | 528 | #define IBS_CAPS_OPLDLAT (1U<<12) |
0b347a42 | 529 | #define IBS_CAPS_OPDTLBPGSIZE (1U<<19) |
ee5789db RR |
530 | |
531 | #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \ | |
532 | | IBS_CAPS_FETCHSAM \ | |
533 | | IBS_CAPS_OPSAM) | |
534 | ||
535 | /* | |
536 | * IBS APIC setup | |
537 | */ | |
538 | #define IBSCTL 0x1cc | |
539 | #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) | |
540 | #define IBSCTL_LVT_OFFSET_MASK 0x0F | |
541 | ||
0f4cd769 | 542 | /* IBS fetch bits/masks */ |
ba5d35b4 | 543 | #define IBS_FETCH_L3MISSONLY (1ULL<<59) |
b47fad3b RR |
544 | #define IBS_FETCH_RAND_EN (1ULL<<57) |
545 | #define IBS_FETCH_VAL (1ULL<<49) | |
546 | #define IBS_FETCH_ENABLE (1ULL<<48) | |
547 | #define IBS_FETCH_CNT 0xFFFF0000ULL | |
548 | #define IBS_FETCH_MAX_CNT 0x0000FFFFULL | |
1d6040f1 | 549 | |
0f4cd769 KP |
550 | /* |
551 | * IBS op bits/masks | |
552 | * The lower 7 bits of the current count are random bits | |
553 | * preloaded by hardware and ignored in software | |
554 | */ | |
d20610c1 RB |
555 | #define IBS_OP_LDLAT_EN (1ULL<<63) |
556 | #define IBS_OP_LDLAT_THRSH (0xFULL<<59) | |
0f4cd769 KP |
557 | #define IBS_OP_CUR_CNT (0xFFF80ULL<<32) |
558 | #define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32) | |
46dcf855 | 559 | #define IBS_OP_CUR_CNT_EXT_MASK (0x7FULL<<52) |
b47fad3b RR |
560 | #define IBS_OP_CNT_CTL (1ULL<<19) |
561 | #define IBS_OP_VAL (1ULL<<18) | |
562 | #define IBS_OP_ENABLE (1ULL<<17) | |
ba5d35b4 | 563 | #define IBS_OP_L3MISSONLY (1ULL<<16) |
b47fad3b RR |
564 | #define IBS_OP_MAX_CNT 0x0000FFFFULL |
565 | #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ | |
8b0bed7d | 566 | #define IBS_OP_MAX_CNT_EXT_MASK (0x7FULL<<20) /* separate upper 7 bits */ |
d47e8238 | 567 | #define IBS_RIP_INVALID (1ULL<<38) |
30dd568c | 568 | |
978da300 | 569 | #ifdef CONFIG_X86_LOCAL_APIC |
b7169166 | 570 | extern u32 get_ibs_caps(void); |
2fad201f | 571 | extern int forward_event_to_ibs(struct perf_event *event); |
978da300 RR |
572 | #else |
573 | static inline u32 get_ibs_caps(void) { return 0; } | |
2fad201f | 574 | static inline int forward_event_to_ibs(struct perf_event *event) { return -ENOENT; } |
978da300 | 575 | #endif |
b7169166 | 576 | |
cdd6c482 | 577 | #ifdef CONFIG_PERF_EVENTS |
cdd6c482 | 578 | extern void perf_events_lapic_init(void); |
194002b2 | 579 | |
ef21f683 | 580 | /* |
d07bdfd3 PZ |
581 | * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise |
582 | * unused and ABI specified to be 0, so nobody should care what we do with | |
583 | * them. | |
584 | * | |
585 | * EXACT - the IP points to the exact instruction that triggered the | |
586 | * event (HW bugs exempt). | |
587 | * VM - original X86_VM_MASK; see set_linear_ip(). | |
ef21f683 PZ |
588 | */ |
589 | #define PERF_EFLAGS_EXACT (1UL << 3) | |
d07bdfd3 | 590 | #define PERF_EFLAGS_VM (1UL << 5) |
ef21f683 | 591 | |
39447b38 | 592 | struct pt_regs; |
878068ea KL |
593 | struct x86_perf_regs { |
594 | struct pt_regs regs; | |
595 | u64 *xmm_regs; | |
596 | }; | |
597 | ||
04782e63 CL |
598 | extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs); |
599 | extern unsigned long perf_arch_misc_flags(struct pt_regs *regs); | |
baff01f3 | 600 | extern unsigned long perf_arch_guest_misc_flags(struct pt_regs *regs); |
04782e63 | 601 | #define perf_arch_misc_flags(regs) perf_arch_misc_flags(regs) |
baff01f3 | 602 | #define perf_arch_guest_misc_flags(regs) perf_arch_guest_misc_flags(regs) |
ef21f683 | 603 | |
b0f82b81 FW |
604 | #include <asm/stacktrace.h> |
605 | ||
606 | /* | |
04782e63 CL |
607 | * We abuse bit 3 from flags to pass exact information, see |
608 | * perf_arch_misc_flags() and the comment with PERF_EFLAGS_EXACT. | |
b0f82b81 FW |
609 | */ |
610 | #define perf_arch_fetch_caller_regs(regs, __ip) { \ | |
611 | (regs)->ip = (__ip); \ | |
d15d3568 | 612 | (regs)->sp = (unsigned long)__builtin_frame_address(0); \ |
b0f82b81 FW |
613 | (regs)->cs = __KERNEL_CS; \ |
614 | regs->flags = 0; \ | |
615 | } | |
616 | ||
144d31e6 GN |
617 | struct perf_guest_switch_msr { |
618 | unsigned msr; | |
619 | u64 host, guest; | |
620 | }; | |
621 | ||
b2d65047 LX |
622 | struct x86_pmu_lbr { |
623 | unsigned int nr; | |
624 | unsigned int from; | |
625 | unsigned int to; | |
626 | unsigned int info; | |
0d0b6086 | 627 | bool has_callstack; |
b2d65047 LX |
628 | }; |
629 | ||
b3d9468a | 630 | extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); |
dc852ff5 | 631 | extern u64 perf_get_hw_event_config(int hw_event); |
c93dc84c | 632 | extern void perf_check_microcode(void); |
5471eea5 | 633 | extern void perf_clear_dirty_counters(void); |
1182a495 | 634 | extern int x86_perf_rdpmc_index(struct perf_event *event); |
241771ef | 635 | #else |
b3d9468a GN |
636 | static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) |
637 | { | |
638 | memset(cap, 0, sizeof(*cap)); | |
639 | } | |
640 | ||
dc852ff5 LX |
641 | static inline u64 perf_get_hw_event_config(int hw_event) |
642 | { | |
643 | return 0; | |
644 | } | |
645 | ||
cdd6c482 | 646 | static inline void perf_events_lapic_init(void) { } |
c93dc84c | 647 | static inline void perf_check_microcode(void) { } |
241771ef IM |
648 | #endif |
649 | ||
616c59b5 | 650 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) |
39a4d779 | 651 | extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data); |
0b9ca98b | 652 | extern void x86_perf_get_lbr(struct x86_pmu_lbr *lbr); |
616c59b5 | 653 | #else |
39a4d779 | 654 | struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data); |
0b9ca98b | 655 | static inline void x86_perf_get_lbr(struct x86_pmu_lbr *lbr) |
b2d65047 | 656 | { |
0b9ca98b | 657 | memset(lbr, 0, sizeof(*lbr)); |
b2d65047 | 658 | } |
616c59b5 SC |
659 | #endif |
660 | ||
1c5ac21a AS |
661 | #ifdef CONFIG_CPU_SUP_INTEL |
662 | extern void intel_pt_handle_vmx(int on); | |
616c59b5 SC |
663 | #else |
664 | static inline void intel_pt_handle_vmx(int on) | |
665 | { | |
666 | ||
667 | } | |
1c5ac21a AS |
668 | #endif |
669 | ||
1018faa6 JR |
670 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) |
671 | extern void amd_pmu_enable_virt(void); | |
672 | extern void amd_pmu_disable_virt(void); | |
d5616bac SE |
673 | |
674 | #if defined(CONFIG_PERF_EVENTS_AMD_BRS) | |
675 | ||
676 | #define PERF_NEEDS_LOPWR_CB 1 | |
677 | ||
678 | /* | |
679 | * architectural low power callback impacts | |
680 | * drivers/acpi/processor_idle.c | |
681 | * drivers/acpi/acpi_pad.c | |
682 | */ | |
683 | extern void perf_amd_brs_lopwr_cb(bool lopwr_in); | |
684 | ||
685 | DECLARE_STATIC_CALL(perf_lopwr_cb, perf_amd_brs_lopwr_cb); | |
686 | ||
1f7c232e | 687 | static __always_inline void perf_lopwr_cb(bool lopwr_in) |
d5616bac SE |
688 | { |
689 | static_call_mod(perf_lopwr_cb)(lopwr_in); | |
690 | } | |
691 | ||
692 | #endif /* PERF_NEEDS_LOPWR_CB */ | |
693 | ||
1018faa6 JR |
694 | #else |
695 | static inline void amd_pmu_enable_virt(void) { } | |
696 | static inline void amd_pmu_disable_virt(void) { } | |
697 | #endif | |
698 | ||
91d7753a FW |
699 | #define arch_perf_out_copy_user copy_from_user_nmi |
700 | ||
cdd6c482 | 701 | #endif /* _ASM_X86_PERF_EVENT_H */ |