4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
16 #include <asm/cpufeature.h>
17 #include <asm/hardirq.h>
20 #include "perf_event.h"
23 * Intel PerfMon, used on Core and later.
25 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
27 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
28 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
29 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
30 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
31 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
32 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
33 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
34 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
37 static struct event_constraint intel_core_event_constraints[] __read_mostly =
39 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
40 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
41 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
42 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
43 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
44 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
48 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
50 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
51 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
52 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
53 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
54 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
55 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
56 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
57 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
58 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
59 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
60 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
61 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
62 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
66 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
68 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
69 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
70 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
71 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
72 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
73 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
74 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
75 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
76 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
77 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
78 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
82 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
84 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
85 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
86 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
90 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
92 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
93 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
94 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
95 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
96 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
97 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
98 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
102 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
104 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
105 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
106 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
107 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
108 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
109 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
110 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
111 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
112 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
113 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
114 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
115 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
119 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
121 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
122 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
123 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
124 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
125 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
126 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
127 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
128 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
129 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
130 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
131 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
132 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
133 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
135 * Errata BV98 -- MEM_*_RETIRED events can leak between counters of SMT
136 * siblings; disable these events because they can corrupt unrelated
139 INTEL_EVENT_CONSTRAINT(0xd0, 0x0), /* MEM_UOPS_RETIRED.* */
140 INTEL_EVENT_CONSTRAINT(0xd1, 0x0), /* MEM_LOAD_UOPS_RETIRED.* */
141 INTEL_EVENT_CONSTRAINT(0xd2, 0x0), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
142 INTEL_EVENT_CONSTRAINT(0xd3, 0x0), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
146 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
148 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
149 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
150 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
151 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
155 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
160 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
162 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
163 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
164 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
168 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
170 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
171 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
172 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
176 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
177 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
178 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
179 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
180 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
184 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
185 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
186 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
187 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
188 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
192 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
193 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
194 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
196 struct attribute *nhm_events_attrs[] = {
197 EVENT_PTR(mem_ld_nhm),
201 struct attribute *snb_events_attrs[] = {
202 EVENT_PTR(mem_ld_snb),
203 EVENT_PTR(mem_st_snb),
207 static struct event_constraint intel_hsw_event_constraints[] = {
208 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
209 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
210 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
211 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
212 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
213 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
214 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
215 INTEL_EVENT_CONSTRAINT(0x08a3, 0x4),
216 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
217 INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4),
218 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
219 INTEL_EVENT_CONSTRAINT(0x04a3, 0xf),
223 struct event_constraint intel_bdw_event_constraints[] = {
224 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
225 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
226 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
227 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
228 INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */
232 static u64 intel_pmu_event_map(int hw_event)
234 return intel_perfmon_event_map[hw_event];
237 #define SNB_DMND_DATA_RD (1ULL << 0)
238 #define SNB_DMND_RFO (1ULL << 1)
239 #define SNB_DMND_IFETCH (1ULL << 2)
240 #define SNB_DMND_WB (1ULL << 3)
241 #define SNB_PF_DATA_RD (1ULL << 4)
242 #define SNB_PF_RFO (1ULL << 5)
243 #define SNB_PF_IFETCH (1ULL << 6)
244 #define SNB_LLC_DATA_RD (1ULL << 7)
245 #define SNB_LLC_RFO (1ULL << 8)
246 #define SNB_LLC_IFETCH (1ULL << 9)
247 #define SNB_BUS_LOCKS (1ULL << 10)
248 #define SNB_STRM_ST (1ULL << 11)
249 #define SNB_OTHER (1ULL << 15)
250 #define SNB_RESP_ANY (1ULL << 16)
251 #define SNB_NO_SUPP (1ULL << 17)
252 #define SNB_LLC_HITM (1ULL << 18)
253 #define SNB_LLC_HITE (1ULL << 19)
254 #define SNB_LLC_HITS (1ULL << 20)
255 #define SNB_LLC_HITF (1ULL << 21)
256 #define SNB_LOCAL (1ULL << 22)
257 #define SNB_REMOTE (0xffULL << 23)
258 #define SNB_SNP_NONE (1ULL << 31)
259 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
260 #define SNB_SNP_MISS (1ULL << 33)
261 #define SNB_NO_FWD (1ULL << 34)
262 #define SNB_SNP_FWD (1ULL << 35)
263 #define SNB_HITM (1ULL << 36)
264 #define SNB_NON_DRAM (1ULL << 37)
266 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
267 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
268 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
270 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
271 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
274 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
275 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
277 #define SNB_L3_ACCESS SNB_RESP_ANY
278 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
280 static __initconst const u64 snb_hw_cache_extra_regs
281 [PERF_COUNT_HW_CACHE_MAX]
282 [PERF_COUNT_HW_CACHE_OP_MAX]
283 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
287 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
288 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
291 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
292 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
294 [ C(OP_PREFETCH) ] = {
295 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
296 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
301 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
302 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
305 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
306 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
308 [ C(OP_PREFETCH) ] = {
309 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
310 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
315 static __initconst const u64 snb_hw_cache_event_ids
316 [PERF_COUNT_HW_CACHE_MAX]
317 [PERF_COUNT_HW_CACHE_OP_MAX]
318 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
322 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
323 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
326 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
327 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
329 [ C(OP_PREFETCH) ] = {
330 [ C(RESULT_ACCESS) ] = 0x0,
331 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
336 [ C(RESULT_ACCESS) ] = 0x0,
337 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
340 [ C(RESULT_ACCESS) ] = -1,
341 [ C(RESULT_MISS) ] = -1,
343 [ C(OP_PREFETCH) ] = {
344 [ C(RESULT_ACCESS) ] = 0x0,
345 [ C(RESULT_MISS) ] = 0x0,
350 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
351 [ C(RESULT_ACCESS) ] = 0x01b7,
352 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
353 [ C(RESULT_MISS) ] = 0x01b7,
356 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
357 [ C(RESULT_ACCESS) ] = 0x01b7,
358 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
359 [ C(RESULT_MISS) ] = 0x01b7,
361 [ C(OP_PREFETCH) ] = {
362 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
363 [ C(RESULT_ACCESS) ] = 0x01b7,
364 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
365 [ C(RESULT_MISS) ] = 0x01b7,
370 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
371 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
374 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
375 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
377 [ C(OP_PREFETCH) ] = {
378 [ C(RESULT_ACCESS) ] = 0x0,
379 [ C(RESULT_MISS) ] = 0x0,
384 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
385 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
388 [ C(RESULT_ACCESS) ] = -1,
389 [ C(RESULT_MISS) ] = -1,
391 [ C(OP_PREFETCH) ] = {
392 [ C(RESULT_ACCESS) ] = -1,
393 [ C(RESULT_MISS) ] = -1,
398 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
399 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
402 [ C(RESULT_ACCESS) ] = -1,
403 [ C(RESULT_MISS) ] = -1,
405 [ C(OP_PREFETCH) ] = {
406 [ C(RESULT_ACCESS) ] = -1,
407 [ C(RESULT_MISS) ] = -1,
412 [ C(RESULT_ACCESS) ] = 0x01b7,
413 [ C(RESULT_MISS) ] = 0x01b7,
416 [ C(RESULT_ACCESS) ] = 0x01b7,
417 [ C(RESULT_MISS) ] = 0x01b7,
419 [ C(OP_PREFETCH) ] = {
420 [ C(RESULT_ACCESS) ] = 0x01b7,
421 [ C(RESULT_MISS) ] = 0x01b7,
428 * Notes on the events:
429 * - data reads do not include code reads (comparable to earlier tables)
430 * - data counts include speculative execution (except L1 write, dtlb, bpu)
431 * - remote node access includes remote memory, remote cache, remote mmio.
432 * - prefetches are not included in the counts because they are not
436 #define HSW_DEMAND_DATA_RD BIT_ULL(0)
437 #define HSW_DEMAND_RFO BIT_ULL(1)
438 #define HSW_ANY_RESPONSE BIT_ULL(16)
439 #define HSW_SUPPLIER_NONE BIT_ULL(17)
440 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
441 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
442 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
443 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
444 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
445 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
446 HSW_L3_MISS_REMOTE_HOP2P)
447 #define HSW_SNOOP_NONE BIT_ULL(31)
448 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
449 #define HSW_SNOOP_MISS BIT_ULL(33)
450 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
451 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
452 #define HSW_SNOOP_HITM BIT_ULL(36)
453 #define HSW_SNOOP_NON_DRAM BIT_ULL(37)
454 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
455 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
456 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
457 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
458 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
459 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
460 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO
461 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
462 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
463 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE
465 #define BDW_L3_MISS_LOCAL BIT(26)
466 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
467 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
468 HSW_L3_MISS_REMOTE_HOP2P)
471 static __initconst const u64 hsw_hw_cache_event_ids
472 [PERF_COUNT_HW_CACHE_MAX]
473 [PERF_COUNT_HW_CACHE_OP_MAX]
474 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
478 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
479 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
482 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
483 [ C(RESULT_MISS) ] = 0x0,
485 [ C(OP_PREFETCH) ] = {
486 [ C(RESULT_ACCESS) ] = 0x0,
487 [ C(RESULT_MISS) ] = 0x0,
492 [ C(RESULT_ACCESS) ] = 0x0,
493 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
496 [ C(RESULT_ACCESS) ] = -1,
497 [ C(RESULT_MISS) ] = -1,
499 [ C(OP_PREFETCH) ] = {
500 [ C(RESULT_ACCESS) ] = 0x0,
501 [ C(RESULT_MISS) ] = 0x0,
506 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
507 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
510 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
511 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
513 [ C(OP_PREFETCH) ] = {
514 [ C(RESULT_ACCESS) ] = 0x0,
515 [ C(RESULT_MISS) ] = 0x0,
520 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
521 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
524 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
525 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
527 [ C(OP_PREFETCH) ] = {
528 [ C(RESULT_ACCESS) ] = 0x0,
529 [ C(RESULT_MISS) ] = 0x0,
534 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
535 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
538 [ C(RESULT_ACCESS) ] = -1,
539 [ C(RESULT_MISS) ] = -1,
541 [ C(OP_PREFETCH) ] = {
542 [ C(RESULT_ACCESS) ] = -1,
543 [ C(RESULT_MISS) ] = -1,
548 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
549 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
552 [ C(RESULT_ACCESS) ] = -1,
553 [ C(RESULT_MISS) ] = -1,
555 [ C(OP_PREFETCH) ] = {
556 [ C(RESULT_ACCESS) ] = -1,
557 [ C(RESULT_MISS) ] = -1,
562 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
563 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
566 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
567 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
569 [ C(OP_PREFETCH) ] = {
570 [ C(RESULT_ACCESS) ] = 0x0,
571 [ C(RESULT_MISS) ] = 0x0,
576 static __initconst const u64 hsw_hw_cache_extra_regs
577 [PERF_COUNT_HW_CACHE_MAX]
578 [PERF_COUNT_HW_CACHE_OP_MAX]
579 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
583 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
585 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
586 HSW_L3_MISS|HSW_ANY_SNOOP,
589 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
591 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
592 HSW_L3_MISS|HSW_ANY_SNOOP,
594 [ C(OP_PREFETCH) ] = {
595 [ C(RESULT_ACCESS) ] = 0x0,
596 [ C(RESULT_MISS) ] = 0x0,
601 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
602 HSW_L3_MISS_LOCAL_DRAM|
604 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
609 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
610 HSW_L3_MISS_LOCAL_DRAM|
612 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
616 [ C(OP_PREFETCH) ] = {
617 [ C(RESULT_ACCESS) ] = 0x0,
618 [ C(RESULT_MISS) ] = 0x0,
623 static __initconst const u64 westmere_hw_cache_event_ids
624 [PERF_COUNT_HW_CACHE_MAX]
625 [PERF_COUNT_HW_CACHE_OP_MAX]
626 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
630 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
631 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
634 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
635 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
637 [ C(OP_PREFETCH) ] = {
638 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
639 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
644 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
645 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
648 [ C(RESULT_ACCESS) ] = -1,
649 [ C(RESULT_MISS) ] = -1,
651 [ C(OP_PREFETCH) ] = {
652 [ C(RESULT_ACCESS) ] = 0x0,
653 [ C(RESULT_MISS) ] = 0x0,
658 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
659 [ C(RESULT_ACCESS) ] = 0x01b7,
660 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
661 [ C(RESULT_MISS) ] = 0x01b7,
664 * Use RFO, not WRITEBACK, because a write miss would typically occur
668 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
669 [ C(RESULT_ACCESS) ] = 0x01b7,
670 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
671 [ C(RESULT_MISS) ] = 0x01b7,
673 [ C(OP_PREFETCH) ] = {
674 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
675 [ C(RESULT_ACCESS) ] = 0x01b7,
676 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
677 [ C(RESULT_MISS) ] = 0x01b7,
682 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
683 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
686 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
687 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
689 [ C(OP_PREFETCH) ] = {
690 [ C(RESULT_ACCESS) ] = 0x0,
691 [ C(RESULT_MISS) ] = 0x0,
696 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
697 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
700 [ C(RESULT_ACCESS) ] = -1,
701 [ C(RESULT_MISS) ] = -1,
703 [ C(OP_PREFETCH) ] = {
704 [ C(RESULT_ACCESS) ] = -1,
705 [ C(RESULT_MISS) ] = -1,
710 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
711 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
714 [ C(RESULT_ACCESS) ] = -1,
715 [ C(RESULT_MISS) ] = -1,
717 [ C(OP_PREFETCH) ] = {
718 [ C(RESULT_ACCESS) ] = -1,
719 [ C(RESULT_MISS) ] = -1,
724 [ C(RESULT_ACCESS) ] = 0x01b7,
725 [ C(RESULT_MISS) ] = 0x01b7,
728 [ C(RESULT_ACCESS) ] = 0x01b7,
729 [ C(RESULT_MISS) ] = 0x01b7,
731 [ C(OP_PREFETCH) ] = {
732 [ C(RESULT_ACCESS) ] = 0x01b7,
733 [ C(RESULT_MISS) ] = 0x01b7,
739 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
740 * See IA32 SDM Vol 3B 30.6.1.3
743 #define NHM_DMND_DATA_RD (1 << 0)
744 #define NHM_DMND_RFO (1 << 1)
745 #define NHM_DMND_IFETCH (1 << 2)
746 #define NHM_DMND_WB (1 << 3)
747 #define NHM_PF_DATA_RD (1 << 4)
748 #define NHM_PF_DATA_RFO (1 << 5)
749 #define NHM_PF_IFETCH (1 << 6)
750 #define NHM_OFFCORE_OTHER (1 << 7)
751 #define NHM_UNCORE_HIT (1 << 8)
752 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
753 #define NHM_OTHER_CORE_HITM (1 << 10)
755 #define NHM_REMOTE_CACHE_FWD (1 << 12)
756 #define NHM_REMOTE_DRAM (1 << 13)
757 #define NHM_LOCAL_DRAM (1 << 14)
758 #define NHM_NON_DRAM (1 << 15)
760 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
761 #define NHM_REMOTE (NHM_REMOTE_DRAM)
763 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
764 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
765 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
767 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
768 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
769 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
771 static __initconst const u64 nehalem_hw_cache_extra_regs
772 [PERF_COUNT_HW_CACHE_MAX]
773 [PERF_COUNT_HW_CACHE_OP_MAX]
774 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
778 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
779 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
782 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
783 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
785 [ C(OP_PREFETCH) ] = {
786 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
787 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
792 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
793 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
796 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
797 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
799 [ C(OP_PREFETCH) ] = {
800 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
801 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
806 static __initconst const u64 nehalem_hw_cache_event_ids
807 [PERF_COUNT_HW_CACHE_MAX]
808 [PERF_COUNT_HW_CACHE_OP_MAX]
809 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
813 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
814 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
817 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
818 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
820 [ C(OP_PREFETCH) ] = {
821 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
822 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
827 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
828 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
831 [ C(RESULT_ACCESS) ] = -1,
832 [ C(RESULT_MISS) ] = -1,
834 [ C(OP_PREFETCH) ] = {
835 [ C(RESULT_ACCESS) ] = 0x0,
836 [ C(RESULT_MISS) ] = 0x0,
841 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
842 [ C(RESULT_ACCESS) ] = 0x01b7,
843 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
844 [ C(RESULT_MISS) ] = 0x01b7,
847 * Use RFO, not WRITEBACK, because a write miss would typically occur
851 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
852 [ C(RESULT_ACCESS) ] = 0x01b7,
853 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
854 [ C(RESULT_MISS) ] = 0x01b7,
856 [ C(OP_PREFETCH) ] = {
857 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
858 [ C(RESULT_ACCESS) ] = 0x01b7,
859 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
860 [ C(RESULT_MISS) ] = 0x01b7,
865 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
866 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
869 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
870 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
872 [ C(OP_PREFETCH) ] = {
873 [ C(RESULT_ACCESS) ] = 0x0,
874 [ C(RESULT_MISS) ] = 0x0,
879 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
880 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
883 [ C(RESULT_ACCESS) ] = -1,
884 [ C(RESULT_MISS) ] = -1,
886 [ C(OP_PREFETCH) ] = {
887 [ C(RESULT_ACCESS) ] = -1,
888 [ C(RESULT_MISS) ] = -1,
893 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
894 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
897 [ C(RESULT_ACCESS) ] = -1,
898 [ C(RESULT_MISS) ] = -1,
900 [ C(OP_PREFETCH) ] = {
901 [ C(RESULT_ACCESS) ] = -1,
902 [ C(RESULT_MISS) ] = -1,
907 [ C(RESULT_ACCESS) ] = 0x01b7,
908 [ C(RESULT_MISS) ] = 0x01b7,
911 [ C(RESULT_ACCESS) ] = 0x01b7,
912 [ C(RESULT_MISS) ] = 0x01b7,
914 [ C(OP_PREFETCH) ] = {
915 [ C(RESULT_ACCESS) ] = 0x01b7,
916 [ C(RESULT_MISS) ] = 0x01b7,
921 static __initconst const u64 core2_hw_cache_event_ids
922 [PERF_COUNT_HW_CACHE_MAX]
923 [PERF_COUNT_HW_CACHE_OP_MAX]
924 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
928 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
929 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
932 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
933 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
935 [ C(OP_PREFETCH) ] = {
936 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
937 [ C(RESULT_MISS) ] = 0,
942 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
943 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
946 [ C(RESULT_ACCESS) ] = -1,
947 [ C(RESULT_MISS) ] = -1,
949 [ C(OP_PREFETCH) ] = {
950 [ C(RESULT_ACCESS) ] = 0,
951 [ C(RESULT_MISS) ] = 0,
956 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
957 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
960 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
961 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
963 [ C(OP_PREFETCH) ] = {
964 [ C(RESULT_ACCESS) ] = 0,
965 [ C(RESULT_MISS) ] = 0,
970 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
971 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
974 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
975 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
977 [ C(OP_PREFETCH) ] = {
978 [ C(RESULT_ACCESS) ] = 0,
979 [ C(RESULT_MISS) ] = 0,
984 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
985 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
988 [ C(RESULT_ACCESS) ] = -1,
989 [ C(RESULT_MISS) ] = -1,
991 [ C(OP_PREFETCH) ] = {
992 [ C(RESULT_ACCESS) ] = -1,
993 [ C(RESULT_MISS) ] = -1,
998 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
999 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1002 [ C(RESULT_ACCESS) ] = -1,
1003 [ C(RESULT_MISS) ] = -1,
1005 [ C(OP_PREFETCH) ] = {
1006 [ C(RESULT_ACCESS) ] = -1,
1007 [ C(RESULT_MISS) ] = -1,
1012 static __initconst const u64 atom_hw_cache_event_ids
1013 [PERF_COUNT_HW_CACHE_MAX]
1014 [PERF_COUNT_HW_CACHE_OP_MAX]
1015 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1019 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
1020 [ C(RESULT_MISS) ] = 0,
1023 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
1024 [ C(RESULT_MISS) ] = 0,
1026 [ C(OP_PREFETCH) ] = {
1027 [ C(RESULT_ACCESS) ] = 0x0,
1028 [ C(RESULT_MISS) ] = 0,
1033 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1034 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1037 [ C(RESULT_ACCESS) ] = -1,
1038 [ C(RESULT_MISS) ] = -1,
1040 [ C(OP_PREFETCH) ] = {
1041 [ C(RESULT_ACCESS) ] = 0,
1042 [ C(RESULT_MISS) ] = 0,
1047 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1048 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1051 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1052 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1054 [ C(OP_PREFETCH) ] = {
1055 [ C(RESULT_ACCESS) ] = 0,
1056 [ C(RESULT_MISS) ] = 0,
1061 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1062 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1065 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1066 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1068 [ C(OP_PREFETCH) ] = {
1069 [ C(RESULT_ACCESS) ] = 0,
1070 [ C(RESULT_MISS) ] = 0,
1075 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1076 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1079 [ C(RESULT_ACCESS) ] = -1,
1080 [ C(RESULT_MISS) ] = -1,
1082 [ C(OP_PREFETCH) ] = {
1083 [ C(RESULT_ACCESS) ] = -1,
1084 [ C(RESULT_MISS) ] = -1,
1089 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1090 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1093 [ C(RESULT_ACCESS) ] = -1,
1094 [ C(RESULT_MISS) ] = -1,
1096 [ C(OP_PREFETCH) ] = {
1097 [ C(RESULT_ACCESS) ] = -1,
1098 [ C(RESULT_MISS) ] = -1,
1103 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1105 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1106 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1107 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffffull, RSP_1),
1111 #define SLM_DMND_READ SNB_DMND_DATA_RD
1112 #define SLM_DMND_WRITE SNB_DMND_RFO
1113 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1115 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1116 #define SLM_LLC_ACCESS SNB_RESP_ANY
1117 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1119 static __initconst const u64 slm_hw_cache_extra_regs
1120 [PERF_COUNT_HW_CACHE_MAX]
1121 [PERF_COUNT_HW_CACHE_OP_MAX]
1122 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1126 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1127 [ C(RESULT_MISS) ] = SLM_DMND_READ|SLM_LLC_MISS,
1130 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1131 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1133 [ C(OP_PREFETCH) ] = {
1134 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1135 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1140 static __initconst const u64 slm_hw_cache_event_ids
1141 [PERF_COUNT_HW_CACHE_MAX]
1142 [PERF_COUNT_HW_CACHE_OP_MAX]
1143 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1147 [ C(RESULT_ACCESS) ] = 0,
1148 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
1151 [ C(RESULT_ACCESS) ] = 0,
1152 [ C(RESULT_MISS) ] = 0,
1154 [ C(OP_PREFETCH) ] = {
1155 [ C(RESULT_ACCESS) ] = 0,
1156 [ C(RESULT_MISS) ] = 0,
1161 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1162 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
1165 [ C(RESULT_ACCESS) ] = -1,
1166 [ C(RESULT_MISS) ] = -1,
1168 [ C(OP_PREFETCH) ] = {
1169 [ C(RESULT_ACCESS) ] = 0,
1170 [ C(RESULT_MISS) ] = 0,
1175 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1176 [ C(RESULT_ACCESS) ] = 0x01b7,
1177 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1178 [ C(RESULT_MISS) ] = 0x01b7,
1181 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1182 [ C(RESULT_ACCESS) ] = 0x01b7,
1183 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1184 [ C(RESULT_MISS) ] = 0x01b7,
1186 [ C(OP_PREFETCH) ] = {
1187 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1188 [ C(RESULT_ACCESS) ] = 0x01b7,
1189 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1190 [ C(RESULT_MISS) ] = 0x01b7,
1195 [ C(RESULT_ACCESS) ] = 0,
1196 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
1199 [ C(RESULT_ACCESS) ] = 0,
1200 [ C(RESULT_MISS) ] = 0,
1202 [ C(OP_PREFETCH) ] = {
1203 [ C(RESULT_ACCESS) ] = 0,
1204 [ C(RESULT_MISS) ] = 0,
1209 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1210 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1213 [ C(RESULT_ACCESS) ] = -1,
1214 [ C(RESULT_MISS) ] = -1,
1216 [ C(OP_PREFETCH) ] = {
1217 [ C(RESULT_ACCESS) ] = -1,
1218 [ C(RESULT_MISS) ] = -1,
1223 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1224 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1227 [ C(RESULT_ACCESS) ] = -1,
1228 [ C(RESULT_MISS) ] = -1,
1230 [ C(OP_PREFETCH) ] = {
1231 [ C(RESULT_ACCESS) ] = -1,
1232 [ C(RESULT_MISS) ] = -1,
1237 static void intel_pmu_disable_all(void)
1239 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1241 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1243 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1244 intel_pmu_disable_bts();
1246 intel_pmu_pebs_disable_all();
1247 intel_pmu_lbr_disable_all();
1250 static void intel_pmu_enable_all(int added)
1252 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1254 intel_pmu_pebs_enable_all();
1255 intel_pmu_lbr_enable_all();
1256 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
1257 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
1259 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1260 struct perf_event *event =
1261 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
1263 if (WARN_ON_ONCE(!event))
1266 intel_pmu_enable_bts(event->hw.config);
1272 * Intel Errata AAK100 (model 26)
1273 * Intel Errata AAP53 (model 30)
1274 * Intel Errata BD53 (model 44)
1276 * The official story:
1277 * These chips need to be 'reset' when adding counters by programming the
1278 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
1279 * in sequence on the same PMC or on different PMCs.
1281 * In practise it appears some of these events do in fact count, and
1282 * we need to programm all 4 events.
1284 static void intel_pmu_nhm_workaround(void)
1286 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1287 static const unsigned long nhm_magic[4] = {
1293 struct perf_event *event;
1297 * The Errata requires below steps:
1298 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
1299 * 2) Configure 4 PERFEVTSELx with the magic events and clear
1300 * the corresponding PMCx;
1301 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
1302 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
1303 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
1307 * The real steps we choose are a little different from above.
1308 * A) To reduce MSR operations, we don't run step 1) as they
1309 * are already cleared before this function is called;
1310 * B) Call x86_perf_event_update to save PMCx before configuring
1311 * PERFEVTSELx with magic number;
1312 * C) With step 5), we do clear only when the PERFEVTSELx is
1313 * not used currently.
1314 * D) Call x86_perf_event_set_period to restore PMCx;
1317 /* We always operate 4 pairs of PERF Counters */
1318 for (i = 0; i < 4; i++) {
1319 event = cpuc->events[i];
1321 x86_perf_event_update(event);
1324 for (i = 0; i < 4; i++) {
1325 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
1326 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
1329 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
1330 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
1332 for (i = 0; i < 4; i++) {
1333 event = cpuc->events[i];
1336 x86_perf_event_set_period(event);
1337 __x86_pmu_enable_event(&event->hw,
1338 ARCH_PERFMON_EVENTSEL_ENABLE);
1340 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
1344 static void intel_pmu_nhm_enable_all(int added)
1347 intel_pmu_nhm_workaround();
1348 intel_pmu_enable_all(added);
1351 static inline u64 intel_pmu_get_status(void)
1355 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1360 static inline void intel_pmu_ack_status(u64 ack)
1362 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1365 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
1367 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
1370 mask = 0xfULL << (idx * 4);
1372 rdmsrl(hwc->config_base, ctrl_val);
1374 wrmsrl(hwc->config_base, ctrl_val);
1377 static inline bool event_is_checkpointed(struct perf_event *event)
1379 return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
1382 static void intel_pmu_disable_event(struct perf_event *event)
1384 struct hw_perf_event *hwc = &event->hw;
1385 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1387 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
1388 intel_pmu_disable_bts();
1389 intel_pmu_drain_bts_buffer();
1393 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
1394 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
1395 cpuc->intel_cp_status &= ~(1ull << hwc->idx);
1398 * must disable before any actual event
1399 * because any event may be combined with LBR
1401 if (needs_branch_stack(event))
1402 intel_pmu_lbr_disable(event);
1404 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1405 intel_pmu_disable_fixed(hwc);
1409 x86_pmu_disable_event(event);
1411 if (unlikely(event->attr.precise_ip))
1412 intel_pmu_pebs_disable(event);
1415 static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
1417 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
1418 u64 ctrl_val, bits, mask;
1421 * Enable IRQ generation (0x8),
1422 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1426 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1428 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1432 * ANY bit is supported in v3 and up
1434 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1438 mask = 0xfULL << (idx * 4);
1440 rdmsrl(hwc->config_base, ctrl_val);
1443 wrmsrl(hwc->config_base, ctrl_val);
1446 static void intel_pmu_enable_event(struct perf_event *event)
1448 struct hw_perf_event *hwc = &event->hw;
1449 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1451 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
1452 if (!__this_cpu_read(cpu_hw_events.enabled))
1455 intel_pmu_enable_bts(hwc->config);
1459 * must enabled before any actual event
1460 * because any event may be combined with LBR
1462 if (needs_branch_stack(event))
1463 intel_pmu_lbr_enable(event);
1465 if (event->attr.exclude_host)
1466 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
1467 if (event->attr.exclude_guest)
1468 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
1470 if (unlikely(event_is_checkpointed(event)))
1471 cpuc->intel_cp_status |= (1ull << hwc->idx);
1473 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1474 intel_pmu_enable_fixed(hwc);
1478 if (unlikely(event->attr.precise_ip))
1479 intel_pmu_pebs_enable(event);
1481 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1485 * Save and restart an expired event. Called by NMI contexts,
1486 * so it has to be careful about preempting normal event ops:
1488 int intel_pmu_save_and_restart(struct perf_event *event)
1490 x86_perf_event_update(event);
1492 * For a checkpointed counter always reset back to 0. This
1493 * avoids a situation where the counter overflows, aborts the
1494 * transaction and is then set back to shortly before the
1495 * overflow, and overflows and aborts again.
1497 if (unlikely(event_is_checkpointed(event))) {
1498 /* No race with NMIs because the counter should not be armed */
1499 wrmsrl(event->hw.event_base, 0);
1500 local64_set(&event->hw.prev_count, 0);
1502 return x86_perf_event_set_period(event);
1505 static void intel_pmu_reset(void)
1507 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
1508 unsigned long flags;
1511 if (!x86_pmu.num_counters)
1514 local_irq_save(flags);
1516 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1518 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1519 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
1520 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
1522 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
1523 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1526 ds->bts_index = ds->bts_buffer_base;
1528 local_irq_restore(flags);
1532 * This handler is triggered by the local APIC, so the APIC IRQ handling
1535 static int intel_pmu_handle_irq(struct pt_regs *regs)
1537 struct perf_sample_data data;
1538 struct cpu_hw_events *cpuc;
1543 cpuc = this_cpu_ptr(&cpu_hw_events);
1546 * No known reason to not always do late ACK,
1547 * but just in case do it opt-in.
1549 if (!x86_pmu.late_ack)
1550 apic_write(APIC_LVTPC, APIC_DM_NMI);
1551 intel_pmu_disable_all();
1552 handled = intel_pmu_drain_bts_buffer();
1553 status = intel_pmu_get_status();
1559 intel_pmu_ack_status(status);
1560 if (++loops > 100) {
1561 static bool warned = false;
1563 WARN(1, "perfevents: irq loop stuck!\n");
1564 perf_event_print_debug();
1571 inc_irq_stat(apic_perf_irqs);
1573 intel_pmu_lbr_read();
1576 * CondChgd bit 63 doesn't mean any overflow status. Ignore
1577 * and clear the bit.
1579 if (__test_and_clear_bit(63, (unsigned long *)&status)) {
1585 * PEBS overflow sets bit 62 in the global status register
1587 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
1589 x86_pmu.drain_pebs(regs);
1593 * Checkpointed counters can lead to 'spurious' PMIs because the
1594 * rollback caused by the PMI will have cleared the overflow status
1595 * bit. Therefore always force probe these counters.
1597 status |= cpuc->intel_cp_status;
1599 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1600 struct perf_event *event = cpuc->events[bit];
1604 if (!test_bit(bit, cpuc->active_mask))
1607 if (!intel_pmu_save_and_restart(event))
1610 perf_sample_data_init(&data, 0, event->hw.last_period);
1612 if (has_branch_stack(event))
1613 data.br_stack = &cpuc->lbr_stack;
1615 if (perf_event_overflow(event, &data, regs))
1616 x86_pmu_stop(event, 0);
1620 * Repeat if there is more work to be done:
1622 status = intel_pmu_get_status();
1627 intel_pmu_enable_all(0);
1629 * Only unmask the NMI after the overflow counters
1630 * have been reset. This avoids spurious NMIs on
1633 if (x86_pmu.late_ack)
1634 apic_write(APIC_LVTPC, APIC_DM_NMI);
1638 static struct event_constraint *
1639 intel_bts_constraints(struct perf_event *event)
1641 struct hw_perf_event *hwc = &event->hw;
1642 unsigned int hw_event, bts_event;
1644 if (event->attr.freq)
1647 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1648 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
1650 if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
1651 return &bts_constraint;
1656 static int intel_alt_er(int idx)
1658 if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
1661 if (idx == EXTRA_REG_RSP_0)
1662 return EXTRA_REG_RSP_1;
1664 if (idx == EXTRA_REG_RSP_1)
1665 return EXTRA_REG_RSP_0;
1670 static void intel_fixup_er(struct perf_event *event, int idx)
1672 event->hw.extra_reg.idx = idx;
1674 if (idx == EXTRA_REG_RSP_0) {
1675 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1676 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
1677 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
1678 } else if (idx == EXTRA_REG_RSP_1) {
1679 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1680 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
1681 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
1686 * manage allocation of shared extra msr for certain events
1689 * per-cpu: to be shared between the various events on a single PMU
1690 * per-core: per-cpu + shared by HT threads
1692 static struct event_constraint *
1693 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
1694 struct perf_event *event,
1695 struct hw_perf_event_extra *reg)
1697 struct event_constraint *c = &emptyconstraint;
1698 struct er_account *era;
1699 unsigned long flags;
1703 * reg->alloc can be set due to existing state, so for fake cpuc we
1704 * need to ignore this, otherwise we might fail to allocate proper fake
1705 * state for this extra reg constraint. Also see the comment below.
1707 if (reg->alloc && !cpuc->is_fake)
1708 return NULL; /* call x86_get_event_constraint() */
1711 era = &cpuc->shared_regs->regs[idx];
1713 * we use spin_lock_irqsave() to avoid lockdep issues when
1714 * passing a fake cpuc
1716 raw_spin_lock_irqsave(&era->lock, flags);
1718 if (!atomic_read(&era->ref) || era->config == reg->config) {
1721 * If its a fake cpuc -- as per validate_{group,event}() we
1722 * shouldn't touch event state and we can avoid doing so
1723 * since both will only call get_event_constraints() once
1724 * on each event, this avoids the need for reg->alloc.
1726 * Not doing the ER fixup will only result in era->reg being
1727 * wrong, but since we won't actually try and program hardware
1728 * this isn't a problem either.
1730 if (!cpuc->is_fake) {
1731 if (idx != reg->idx)
1732 intel_fixup_er(event, idx);
1735 * x86_schedule_events() can call get_event_constraints()
1736 * multiple times on events in the case of incremental
1737 * scheduling(). reg->alloc ensures we only do the ER
1743 /* lock in msr value */
1744 era->config = reg->config;
1745 era->reg = reg->reg;
1748 atomic_inc(&era->ref);
1751 * need to call x86_get_event_constraint()
1752 * to check if associated event has constraints
1756 idx = intel_alt_er(idx);
1757 if (idx != reg->idx) {
1758 raw_spin_unlock_irqrestore(&era->lock, flags);
1762 raw_spin_unlock_irqrestore(&era->lock, flags);
1768 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
1769 struct hw_perf_event_extra *reg)
1771 struct er_account *era;
1774 * Only put constraint if extra reg was actually allocated. Also takes
1775 * care of event which do not use an extra shared reg.
1777 * Also, if this is a fake cpuc we shouldn't touch any event state
1778 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1779 * either since it'll be thrown out.
1781 if (!reg->alloc || cpuc->is_fake)
1784 era = &cpuc->shared_regs->regs[reg->idx];
1786 /* one fewer user */
1787 atomic_dec(&era->ref);
1789 /* allocate again next time */
1793 static struct event_constraint *
1794 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
1795 struct perf_event *event)
1797 struct event_constraint *c = NULL, *d;
1798 struct hw_perf_event_extra *xreg, *breg;
1800 xreg = &event->hw.extra_reg;
1801 if (xreg->idx != EXTRA_REG_NONE) {
1802 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
1803 if (c == &emptyconstraint)
1806 breg = &event->hw.branch_reg;
1807 if (breg->idx != EXTRA_REG_NONE) {
1808 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
1809 if (d == &emptyconstraint) {
1810 __intel_shared_reg_put_constraints(cpuc, xreg);
1817 struct event_constraint *
1818 x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1820 struct event_constraint *c;
1822 if (x86_pmu.event_constraints) {
1823 for_each_event_constraint(c, x86_pmu.event_constraints) {
1824 if ((event->hw.config & c->cmask) == c->code) {
1825 event->hw.flags |= c->flags;
1831 return &unconstrained;
1834 static struct event_constraint *
1835 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1837 struct event_constraint *c;
1839 c = intel_bts_constraints(event);
1843 c = intel_pebs_constraints(event);
1847 c = intel_shared_regs_constraints(cpuc, event);
1851 return x86_get_event_constraints(cpuc, event);
1855 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
1856 struct perf_event *event)
1858 struct hw_perf_event_extra *reg;
1860 reg = &event->hw.extra_reg;
1861 if (reg->idx != EXTRA_REG_NONE)
1862 __intel_shared_reg_put_constraints(cpuc, reg);
1864 reg = &event->hw.branch_reg;
1865 if (reg->idx != EXTRA_REG_NONE)
1866 __intel_shared_reg_put_constraints(cpuc, reg);
1869 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
1870 struct perf_event *event)
1872 intel_put_shared_regs_event_constraints(cpuc, event);
1875 static void intel_pebs_aliases_core2(struct perf_event *event)
1877 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1879 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1880 * (0x003c) so that we can use it with PEBS.
1882 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1883 * PEBS capable. However we can use INST_RETIRED.ANY_P
1884 * (0x00c0), which is a PEBS capable event, to get the same
1887 * INST_RETIRED.ANY_P counts the number of cycles that retires
1888 * CNTMASK instructions. By setting CNTMASK to a value (16)
1889 * larger than the maximum number of instructions that can be
1890 * retired per cycle (4) and then inverting the condition, we
1891 * count all cycles that retire 16 or less instructions, which
1894 * Thereby we gain a PEBS capable cycle counter.
1896 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
1898 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1899 event->hw.config = alt_config;
1903 static void intel_pebs_aliases_snb(struct perf_event *event)
1905 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1907 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1908 * (0x003c) so that we can use it with PEBS.
1910 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1911 * PEBS capable. However we can use UOPS_RETIRED.ALL
1912 * (0x01c2), which is a PEBS capable event, to get the same
1915 * UOPS_RETIRED.ALL counts the number of cycles that retires
1916 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1917 * larger than the maximum number of micro-ops that can be
1918 * retired per cycle (4) and then inverting the condition, we
1919 * count all cycles that retire 16 or less micro-ops, which
1922 * Thereby we gain a PEBS capable cycle counter.
1924 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
1926 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1927 event->hw.config = alt_config;
1931 static int intel_pmu_hw_config(struct perf_event *event)
1933 int ret = x86_pmu_hw_config(event);
1938 if (event->attr.precise_ip && x86_pmu.pebs_aliases)
1939 x86_pmu.pebs_aliases(event);
1941 if (needs_branch_stack(event)) {
1942 ret = intel_pmu_setup_lbr_filter(event);
1947 if (event->attr.type != PERF_TYPE_RAW)
1950 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
1953 if (x86_pmu.version < 3)
1956 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1959 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
1964 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
1966 if (x86_pmu.guest_get_msrs)
1967 return x86_pmu.guest_get_msrs(nr);
1971 EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
1973 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
1975 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1976 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1978 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
1979 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
1980 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
1982 * If PMU counter has PEBS enabled it is not enough to disable counter
1983 * on a guest entry since PEBS memory write can overshoot guest entry
1984 * and corrupt guest memory. Disabling PEBS solves the problem.
1986 arr[1].msr = MSR_IA32_PEBS_ENABLE;
1987 arr[1].host = cpuc->pebs_enabled;
1994 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
1996 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1997 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
2000 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
2001 struct perf_event *event = cpuc->events[idx];
2003 arr[idx].msr = x86_pmu_config_addr(idx);
2004 arr[idx].host = arr[idx].guest = 0;
2006 if (!test_bit(idx, cpuc->active_mask))
2009 arr[idx].host = arr[idx].guest =
2010 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
2012 if (event->attr.exclude_host)
2013 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
2014 else if (event->attr.exclude_guest)
2015 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
2018 *nr = x86_pmu.num_counters;
2022 static void core_pmu_enable_event(struct perf_event *event)
2024 if (!event->attr.exclude_host)
2025 x86_pmu_enable_event(event);
2028 static void core_pmu_enable_all(int added)
2030 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2033 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
2034 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
2036 if (!test_bit(idx, cpuc->active_mask) ||
2037 cpuc->events[idx]->attr.exclude_host)
2040 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2044 static int hsw_hw_config(struct perf_event *event)
2046 int ret = intel_pmu_hw_config(event);
2050 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
2052 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
2055 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
2056 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
2059 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
2060 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
2061 event->attr.precise_ip > 0))
2064 if (event_is_checkpointed(event)) {
2066 * Sampling of checkpointed events can cause situations where
2067 * the CPU constantly aborts because of a overflow, which is
2068 * then checkpointed back and ignored. Forbid checkpointing
2071 * But still allow a long sampling period, so that perf stat
2074 if (event->attr.sample_period > 0 &&
2075 event->attr.sample_period < 0x7fffffff)
2081 static struct event_constraint counter2_constraint =
2082 EVENT_CONSTRAINT(0, 0x4, 0);
2084 static struct event_constraint *
2085 hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2087 struct event_constraint *c = intel_get_event_constraints(cpuc, event);
2089 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
2090 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
2091 if (c->idxmsk64 & (1U << 2))
2092 return &counter2_constraint;
2093 return &emptyconstraint;
2102 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
2103 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
2104 * the two to enforce a minimum period of 128 (the smallest value that has bits
2105 * 0-5 cleared and >= 100).
2107 * Because of how the code in x86_perf_event_set_period() works, the truncation
2108 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
2109 * to make up for the 'lost' events due to carrying the 'error' in period_left.
2111 * Therefore the effective (average) period matches the requested period,
2112 * despite coarser hardware granularity.
2114 static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
2116 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
2117 X86_CONFIG(.event=0xc0, .umask=0x01)) {
2125 PMU_FORMAT_ATTR(event, "config:0-7" );
2126 PMU_FORMAT_ATTR(umask, "config:8-15" );
2127 PMU_FORMAT_ATTR(edge, "config:18" );
2128 PMU_FORMAT_ATTR(pc, "config:19" );
2129 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
2130 PMU_FORMAT_ATTR(inv, "config:23" );
2131 PMU_FORMAT_ATTR(cmask, "config:24-31" );
2132 PMU_FORMAT_ATTR(in_tx, "config:32");
2133 PMU_FORMAT_ATTR(in_tx_cp, "config:33");
2135 static struct attribute *intel_arch_formats_attr[] = {
2136 &format_attr_event.attr,
2137 &format_attr_umask.attr,
2138 &format_attr_edge.attr,
2139 &format_attr_pc.attr,
2140 &format_attr_inv.attr,
2141 &format_attr_cmask.attr,
2145 ssize_t intel_event_sysfs_show(char *page, u64 config)
2147 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
2149 return x86_event_sysfs_show(page, config, event);
2152 static __initconst const struct x86_pmu core_pmu = {
2154 .handle_irq = x86_pmu_handle_irq,
2155 .disable_all = x86_pmu_disable_all,
2156 .enable_all = core_pmu_enable_all,
2157 .enable = core_pmu_enable_event,
2158 .disable = x86_pmu_disable_event,
2159 .hw_config = x86_pmu_hw_config,
2160 .schedule_events = x86_schedule_events,
2161 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2162 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
2163 .event_map = intel_pmu_event_map,
2164 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
2167 * Intel PMCs cannot be accessed sanely above 32 bit width,
2168 * so we install an artificial 1<<31 period regardless of
2169 * the generic event period:
2171 .max_period = (1ULL << 31) - 1,
2172 .get_event_constraints = intel_get_event_constraints,
2173 .put_event_constraints = intel_put_event_constraints,
2174 .event_constraints = intel_core_event_constraints,
2175 .guest_get_msrs = core_guest_get_msrs,
2176 .format_attrs = intel_arch_formats_attr,
2177 .events_sysfs_show = intel_event_sysfs_show,
2180 struct intel_shared_regs *allocate_shared_regs(int cpu)
2182 struct intel_shared_regs *regs;
2185 regs = kzalloc_node(sizeof(struct intel_shared_regs),
2186 GFP_KERNEL, cpu_to_node(cpu));
2189 * initialize the locks to keep lockdep happy
2191 for (i = 0; i < EXTRA_REG_MAX; i++)
2192 raw_spin_lock_init(®s->regs[i].lock);
2199 static int intel_pmu_cpu_prepare(int cpu)
2201 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2203 if (!(x86_pmu.extra_regs || x86_pmu.lbr_sel_map))
2206 cpuc->shared_regs = allocate_shared_regs(cpu);
2207 if (!cpuc->shared_regs)
2213 static void intel_pmu_cpu_starting(int cpu)
2215 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2216 int core_id = topology_core_id(cpu);
2219 init_debug_store_on_cpu(cpu);
2221 * Deal with CPUs that don't clear their LBRs on power-up.
2223 intel_pmu_lbr_reset();
2225 cpuc->lbr_sel = NULL;
2227 if (!cpuc->shared_regs)
2230 if (!(x86_pmu.er_flags & ERF_NO_HT_SHARING)) {
2231 for_each_cpu(i, topology_thread_cpumask(cpu)) {
2232 struct intel_shared_regs *pc;
2234 pc = per_cpu(cpu_hw_events, i).shared_regs;
2235 if (pc && pc->core_id == core_id) {
2236 cpuc->kfree_on_online = cpuc->shared_regs;
2237 cpuc->shared_regs = pc;
2241 cpuc->shared_regs->core_id = core_id;
2242 cpuc->shared_regs->refcnt++;
2245 if (x86_pmu.lbr_sel_map)
2246 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
2249 static void intel_pmu_cpu_dying(int cpu)
2251 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2252 struct intel_shared_regs *pc;
2254 pc = cpuc->shared_regs;
2256 if (pc->core_id == -1 || --pc->refcnt == 0)
2258 cpuc->shared_regs = NULL;
2261 fini_debug_store_on_cpu(cpu);
2264 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
2266 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
2268 static struct attribute *intel_arch3_formats_attr[] = {
2269 &format_attr_event.attr,
2270 &format_attr_umask.attr,
2271 &format_attr_edge.attr,
2272 &format_attr_pc.attr,
2273 &format_attr_any.attr,
2274 &format_attr_inv.attr,
2275 &format_attr_cmask.attr,
2276 &format_attr_in_tx.attr,
2277 &format_attr_in_tx_cp.attr,
2279 &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
2280 &format_attr_ldlat.attr, /* PEBS load latency */
2284 static __initconst const struct x86_pmu intel_pmu = {
2286 .handle_irq = intel_pmu_handle_irq,
2287 .disable_all = intel_pmu_disable_all,
2288 .enable_all = intel_pmu_enable_all,
2289 .enable = intel_pmu_enable_event,
2290 .disable = intel_pmu_disable_event,
2291 .hw_config = intel_pmu_hw_config,
2292 .schedule_events = x86_schedule_events,
2293 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2294 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
2295 .event_map = intel_pmu_event_map,
2296 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
2299 * Intel PMCs cannot be accessed sanely above 32 bit width,
2300 * so we install an artificial 1<<31 period regardless of
2301 * the generic event period:
2303 .max_period = (1ULL << 31) - 1,
2304 .get_event_constraints = intel_get_event_constraints,
2305 .put_event_constraints = intel_put_event_constraints,
2306 .pebs_aliases = intel_pebs_aliases_core2,
2308 .format_attrs = intel_arch3_formats_attr,
2309 .events_sysfs_show = intel_event_sysfs_show,
2311 .cpu_prepare = intel_pmu_cpu_prepare,
2312 .cpu_starting = intel_pmu_cpu_starting,
2313 .cpu_dying = intel_pmu_cpu_dying,
2314 .guest_get_msrs = intel_guest_get_msrs,
2315 .sched_task = intel_pmu_lbr_sched_task,
2318 static __init void intel_clovertown_quirk(void)
2321 * PEBS is unreliable due to:
2323 * AJ67 - PEBS may experience CPL leaks
2324 * AJ68 - PEBS PMI may be delayed by one event
2325 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
2326 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
2328 * AJ67 could be worked around by restricting the OS/USR flags.
2329 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
2331 * AJ106 could possibly be worked around by not allowing LBR
2332 * usage from PEBS, including the fixup.
2333 * AJ68 could possibly be worked around by always programming
2334 * a pebs_event_reset[0] value and coping with the lost events.
2336 * But taken together it might just make sense to not enable PEBS on
2339 pr_warn("PEBS disabled due to CPU errata\n");
2341 x86_pmu.pebs_constraints = NULL;
2344 static int intel_snb_pebs_broken(int cpu)
2346 u32 rev = UINT_MAX; /* default to broken for unknown models */
2348 switch (cpu_data(cpu).x86_model) {
2353 case 45: /* SNB-EP */
2354 switch (cpu_data(cpu).x86_mask) {
2355 case 6: rev = 0x618; break;
2356 case 7: rev = 0x70c; break;
2360 return (cpu_data(cpu).microcode < rev);
2363 static void intel_snb_check_microcode(void)
2365 int pebs_broken = 0;
2369 for_each_online_cpu(cpu) {
2370 if ((pebs_broken = intel_snb_pebs_broken(cpu)))
2375 if (pebs_broken == x86_pmu.pebs_broken)
2379 * Serialized by the microcode lock..
2381 if (x86_pmu.pebs_broken) {
2382 pr_info("PEBS enabled due to microcode update\n");
2383 x86_pmu.pebs_broken = 0;
2385 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
2386 x86_pmu.pebs_broken = 1;
2391 * Under certain circumstances, access certain MSR may cause #GP.
2392 * The function tests if the input MSR can be safely accessed.
2394 static bool check_msr(unsigned long msr, u64 mask)
2396 u64 val_old, val_new, val_tmp;
2399 * Read the current value, change it and read it back to see if it
2400 * matches, this is needed to detect certain hardware emulators
2401 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
2403 if (rdmsrl_safe(msr, &val_old))
2407 * Only change the bits which can be updated by wrmsrl.
2409 val_tmp = val_old ^ mask;
2410 if (wrmsrl_safe(msr, val_tmp) ||
2411 rdmsrl_safe(msr, &val_new))
2414 if (val_new != val_tmp)
2417 /* Here it's sure that the MSR can be safely accessed.
2418 * Restore the old value and return.
2420 wrmsrl(msr, val_old);
2425 static __init void intel_sandybridge_quirk(void)
2427 x86_pmu.check_microcode = intel_snb_check_microcode;
2428 intel_snb_check_microcode();
2431 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
2432 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
2433 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
2434 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
2435 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
2436 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
2437 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
2438 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
2441 static __init void intel_arch_events_quirk(void)
2445 /* disable event that reported as not presend by cpuid */
2446 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
2447 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
2448 pr_warn("CPUID marked event: \'%s\' unavailable\n",
2449 intel_arch_events_map[bit].name);
2453 static __init void intel_nehalem_quirk(void)
2455 union cpuid10_ebx ebx;
2457 ebx.full = x86_pmu.events_maskl;
2458 if (ebx.split.no_branch_misses_retired) {
2460 * Erratum AAJ80 detected, we work it around by using
2461 * the BR_MISP_EXEC.ANY event. This will over-count
2462 * branch-misses, but it's still much better than the
2463 * architectural event which is often completely bogus:
2465 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
2466 ebx.split.no_branch_misses_retired = 0;
2467 x86_pmu.events_maskl = ebx.full;
2468 pr_info("CPU erratum AAJ80 worked around\n");
2472 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
2473 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
2475 /* Haswell special events */
2476 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
2477 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
2478 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
2479 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
2480 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
2481 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
2482 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
2483 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
2484 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
2485 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
2486 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
2487 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
2489 static struct attribute *hsw_events_attrs[] = {
2490 EVENT_PTR(tx_start),
2491 EVENT_PTR(tx_commit),
2492 EVENT_PTR(tx_abort),
2493 EVENT_PTR(tx_capacity),
2494 EVENT_PTR(tx_conflict),
2495 EVENT_PTR(el_start),
2496 EVENT_PTR(el_commit),
2497 EVENT_PTR(el_abort),
2498 EVENT_PTR(el_capacity),
2499 EVENT_PTR(el_conflict),
2500 EVENT_PTR(cycles_t),
2501 EVENT_PTR(cycles_ct),
2502 EVENT_PTR(mem_ld_hsw),
2503 EVENT_PTR(mem_st_hsw),
2507 __init int intel_pmu_init(void)
2509 union cpuid10_edx edx;
2510 union cpuid10_eax eax;
2511 union cpuid10_ebx ebx;
2512 struct event_constraint *c;
2513 unsigned int unused;
2514 struct extra_reg *er;
2517 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2518 switch (boot_cpu_data.x86) {
2520 return p6_pmu_init();
2522 return knc_pmu_init();
2524 return p4_pmu_init();
2530 * Check whether the Architectural PerfMon supports
2531 * Branch Misses Retired hw_event or not.
2533 cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
2534 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
2537 version = eax.split.version_id;
2541 x86_pmu = intel_pmu;
2543 x86_pmu.version = version;
2544 x86_pmu.num_counters = eax.split.num_counters;
2545 x86_pmu.cntval_bits = eax.split.bit_width;
2546 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
2548 x86_pmu.events_maskl = ebx.full;
2549 x86_pmu.events_mask_len = eax.split.mask_length;
2551 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
2554 * Quirk: v2 perfmon does not report fixed-purpose events, so
2555 * assume at least 3 events:
2558 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
2560 if (boot_cpu_has(X86_FEATURE_PDCM)) {
2563 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
2564 x86_pmu.intel_cap.capabilities = capabilities;
2569 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
2572 * Install the hw-cache-events table:
2574 switch (boot_cpu_data.x86_model) {
2575 case 14: /* 65nm Core "Yonah" */
2576 pr_cont("Core events, ");
2579 case 15: /* 65nm Core2 "Merom" */
2580 x86_add_quirk(intel_clovertown_quirk);
2581 case 22: /* 65nm Core2 "Merom-L" */
2582 case 23: /* 45nm Core2 "Penryn" */
2583 case 29: /* 45nm Core2 "Dunnington (MP) */
2584 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2585 sizeof(hw_cache_event_ids));
2587 intel_pmu_lbr_init_core();
2589 x86_pmu.event_constraints = intel_core2_event_constraints;
2590 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
2591 pr_cont("Core2 events, ");
2594 case 30: /* 45nm Nehalem */
2595 case 26: /* 45nm Nehalem-EP */
2596 case 46: /* 45nm Nehalem-EX */
2597 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2598 sizeof(hw_cache_event_ids));
2599 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
2600 sizeof(hw_cache_extra_regs));
2602 intel_pmu_lbr_init_nhm();
2604 x86_pmu.event_constraints = intel_nehalem_event_constraints;
2605 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
2606 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
2607 x86_pmu.extra_regs = intel_nehalem_extra_regs;
2609 x86_pmu.cpu_events = nhm_events_attrs;
2611 /* UOPS_ISSUED.STALLED_CYCLES */
2612 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2613 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2614 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2615 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2616 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
2618 x86_add_quirk(intel_nehalem_quirk);
2620 pr_cont("Nehalem events, ");
2623 case 28: /* 45nm Atom "Pineview" */
2624 case 38: /* 45nm Atom "Lincroft" */
2625 case 39: /* 32nm Atom "Penwell" */
2626 case 53: /* 32nm Atom "Cloverview" */
2627 case 54: /* 32nm Atom "Cedarview" */
2628 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2629 sizeof(hw_cache_event_ids));
2631 intel_pmu_lbr_init_atom();
2633 x86_pmu.event_constraints = intel_gen_event_constraints;
2634 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
2635 pr_cont("Atom events, ");
2638 case 55: /* 22nm Atom "Silvermont" */
2639 case 76: /* 14nm Atom "Airmont" */
2640 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
2641 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
2642 sizeof(hw_cache_event_ids));
2643 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
2644 sizeof(hw_cache_extra_regs));
2646 intel_pmu_lbr_init_atom();
2648 x86_pmu.event_constraints = intel_slm_event_constraints;
2649 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
2650 x86_pmu.extra_regs = intel_slm_extra_regs;
2651 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2652 pr_cont("Silvermont events, ");
2655 case 37: /* 32nm Westmere */
2656 case 44: /* 32nm Westmere-EP */
2657 case 47: /* 32nm Westmere-EX */
2658 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
2659 sizeof(hw_cache_event_ids));
2660 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
2661 sizeof(hw_cache_extra_regs));
2663 intel_pmu_lbr_init_nhm();
2665 x86_pmu.event_constraints = intel_westmere_event_constraints;
2666 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
2667 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
2668 x86_pmu.extra_regs = intel_westmere_extra_regs;
2669 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2671 x86_pmu.cpu_events = nhm_events_attrs;
2673 /* UOPS_ISSUED.STALLED_CYCLES */
2674 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2675 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2676 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2677 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2678 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
2680 pr_cont("Westmere events, ");
2683 case 42: /* 32nm SandyBridge */
2684 case 45: /* 32nm SandyBridge-E/EN/EP */
2685 x86_add_quirk(intel_sandybridge_quirk);
2686 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2687 sizeof(hw_cache_event_ids));
2688 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2689 sizeof(hw_cache_extra_regs));
2691 intel_pmu_lbr_init_snb();
2693 x86_pmu.event_constraints = intel_snb_event_constraints;
2694 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
2695 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2696 if (boot_cpu_data.x86_model == 45)
2697 x86_pmu.extra_regs = intel_snbep_extra_regs;
2699 x86_pmu.extra_regs = intel_snb_extra_regs;
2700 /* all extra regs are per-cpu when HT is on */
2701 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2702 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2704 x86_pmu.cpu_events = snb_events_attrs;
2706 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2707 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2708 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2709 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
2710 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2711 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
2713 pr_cont("SandyBridge events, ");
2716 case 58: /* 22nm IvyBridge */
2717 case 62: /* 22nm IvyBridge-EP/EX */
2718 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2719 sizeof(hw_cache_event_ids));
2720 /* dTLB-load-misses on IVB is different than SNB */
2721 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
2723 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2724 sizeof(hw_cache_extra_regs));
2726 intel_pmu_lbr_init_snb();
2728 x86_pmu.event_constraints = intel_ivb_event_constraints;
2729 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
2730 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2731 if (boot_cpu_data.x86_model == 62)
2732 x86_pmu.extra_regs = intel_snbep_extra_regs;
2734 x86_pmu.extra_regs = intel_snb_extra_regs;
2735 /* all extra regs are per-cpu when HT is on */
2736 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2737 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2739 x86_pmu.cpu_events = snb_events_attrs;
2741 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2742 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2743 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2745 pr_cont("IvyBridge events, ");
2749 case 60: /* 22nm Haswell Core */
2750 case 63: /* 22nm Haswell Server */
2751 case 69: /* 22nm Haswell ULT */
2752 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
2753 x86_pmu.late_ack = true;
2754 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
2755 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
2757 intel_pmu_lbr_init_hsw();
2759 x86_pmu.event_constraints = intel_hsw_event_constraints;
2760 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
2761 x86_pmu.extra_regs = intel_snbep_extra_regs;
2762 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2763 /* all extra regs are per-cpu when HT is on */
2764 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2765 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2767 x86_pmu.hw_config = hsw_hw_config;
2768 x86_pmu.get_event_constraints = hsw_get_event_constraints;
2769 x86_pmu.cpu_events = hsw_events_attrs;
2770 x86_pmu.lbr_double_abort = true;
2771 pr_cont("Haswell events, ");
2774 case 61: /* 14nm Broadwell Core-M */
2775 case 86: /* 14nm Broadwell Xeon D */
2776 x86_pmu.late_ack = true;
2777 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
2778 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
2780 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
2781 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
2782 BDW_L3_MISS|HSW_SNOOP_DRAM;
2783 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
2785 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
2786 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
2787 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
2788 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
2790 intel_pmu_lbr_init_snb();
2792 x86_pmu.event_constraints = intel_bdw_event_constraints;
2793 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
2794 x86_pmu.extra_regs = intel_snbep_extra_regs;
2795 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2796 /* all extra regs are per-cpu when HT is on */
2797 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2798 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2800 x86_pmu.hw_config = hsw_hw_config;
2801 x86_pmu.get_event_constraints = hsw_get_event_constraints;
2802 x86_pmu.cpu_events = hsw_events_attrs;
2803 x86_pmu.limit_period = bdw_limit_period;
2804 pr_cont("Broadwell events, ");
2808 switch (x86_pmu.version) {
2810 x86_pmu.event_constraints = intel_v1_event_constraints;
2811 pr_cont("generic architected perfmon v1, ");
2815 * default constraints for v2 and up
2817 x86_pmu.event_constraints = intel_gen_event_constraints;
2818 pr_cont("generic architected perfmon, ");
2823 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
2824 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2825 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
2826 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
2828 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
2830 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
2831 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2832 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
2833 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
2836 x86_pmu.intel_ctrl |=
2837 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
2839 if (x86_pmu.event_constraints) {
2841 * event on fixed counter2 (REF_CYCLES) only works on this
2842 * counter, so do not extend mask to generic counters
2844 for_each_event_constraint(c, x86_pmu.event_constraints) {
2845 if (c->cmask != FIXED_EVENT_FLAGS
2846 || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
2850 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
2851 c->weight += x86_pmu.num_counters;
2856 * Access LBR MSR may cause #GP under certain circumstances.
2857 * E.g. KVM doesn't support LBR MSR
2858 * Check all LBT MSR here.
2859 * Disable LBR access if any LBR MSRs can not be accessed.
2861 if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
2863 for (i = 0; i < x86_pmu.lbr_nr; i++) {
2864 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
2865 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
2870 * Access extra MSR may cause #GP under certain circumstances.
2871 * E.g. KVM doesn't support offcore event
2872 * Check all extra_regs here.
2874 if (x86_pmu.extra_regs) {
2875 for (er = x86_pmu.extra_regs; er->msr; er++) {
2876 er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
2877 /* Disable LBR select mapping */
2878 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
2879 x86_pmu.lbr_sel_map = NULL;
2883 /* Support full width counters using alternative MSR range */
2884 if (x86_pmu.intel_cap.full_width_write) {
2885 x86_pmu.max_period = x86_pmu.cntval_mask;
2886 x86_pmu.perfctr = MSR_IA32_PMC0;
2887 pr_cont("full-width counters, ");