perf/x86/intel/bts: Add BTS PMU driver
[linux-2.6-block.git] / arch / x86 / kernel / cpu / perf_event_intel.c
1 /*
2  * Per core/cpu state
3  *
4  * Used to coordinate shared registers between HT threads or
5  * among events on a single PMU.
6  */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
15
16 #include <asm/cpufeature.h>
17 #include <asm/hardirq.h>
18 #include <asm/apic.h>
19
20 #include "perf_event.h"
21
22 /*
23  * Intel PerfMon, used on Core and later.
24  */
25 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
26 {
27         [PERF_COUNT_HW_CPU_CYCLES]              = 0x003c,
28         [PERF_COUNT_HW_INSTRUCTIONS]            = 0x00c0,
29         [PERF_COUNT_HW_CACHE_REFERENCES]        = 0x4f2e,
30         [PERF_COUNT_HW_CACHE_MISSES]            = 0x412e,
31         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x00c4,
32         [PERF_COUNT_HW_BRANCH_MISSES]           = 0x00c5,
33         [PERF_COUNT_HW_BUS_CYCLES]              = 0x013c,
34         [PERF_COUNT_HW_REF_CPU_CYCLES]          = 0x0300, /* pseudo-encoding */
35 };
36
37 static struct event_constraint intel_core_event_constraints[] __read_mostly =
38 {
39         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
40         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
41         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
42         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
43         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
44         INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
45         EVENT_CONSTRAINT_END
46 };
47
48 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
49 {
50         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
51         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
52         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
53         INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
54         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
55         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
56         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
57         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
58         INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
59         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
60         INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
61         INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
62         INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
63         EVENT_CONSTRAINT_END
64 };
65
66 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
67 {
68         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
69         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
70         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
71         INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
72         INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
73         INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
74         INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
75         INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
76         INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
77         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
78         INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
79         EVENT_CONSTRAINT_END
80 };
81
82 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
83 {
84         /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
85         INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
86         INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
87         EVENT_EXTRA_END
88 };
89
90 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
91 {
92         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
93         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
94         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
95         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
96         INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
97         INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
98         INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
99         EVENT_CONSTRAINT_END
100 };
101
102 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
103 {
104         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
105         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
106         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
107         INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
108         INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
109         INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
110         INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
111         INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
112         INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
113         INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
114         INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
115         INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
116         EVENT_CONSTRAINT_END
117 };
118
119 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
120 {
121         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
122         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
123         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
124         INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
125         INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
126         INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
127         INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
128         INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
129         INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
130         INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
131         INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
132         INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
133         INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
134         /*
135          * Errata BV98 -- MEM_*_RETIRED events can leak between counters of SMT
136          * siblings; disable these events because they can corrupt unrelated
137          * counters.
138          */
139         INTEL_EVENT_CONSTRAINT(0xd0, 0x0), /* MEM_UOPS_RETIRED.* */
140         INTEL_EVENT_CONSTRAINT(0xd1, 0x0), /* MEM_LOAD_UOPS_RETIRED.* */
141         INTEL_EVENT_CONSTRAINT(0xd2, 0x0), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
142         INTEL_EVENT_CONSTRAINT(0xd3, 0x0), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
143         EVENT_CONSTRAINT_END
144 };
145
146 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
147 {
148         /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
149         INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
150         INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
151         INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
152         EVENT_EXTRA_END
153 };
154
155 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
156 {
157         EVENT_CONSTRAINT_END
158 };
159
160 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
161 {
162         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
163         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
164         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
165         EVENT_CONSTRAINT_END
166 };
167
168 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
169 {
170         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
171         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
172         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
173         EVENT_CONSTRAINT_END
174 };
175
176 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
177         /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
178         INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
179         INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
180         INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
181         EVENT_EXTRA_END
182 };
183
184 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
185         /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
186         INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
187         INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
188         INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
189         EVENT_EXTRA_END
190 };
191
192 EVENT_ATTR_STR(mem-loads,       mem_ld_nhm,     "event=0x0b,umask=0x10,ldlat=3");
193 EVENT_ATTR_STR(mem-loads,       mem_ld_snb,     "event=0xcd,umask=0x1,ldlat=3");
194 EVENT_ATTR_STR(mem-stores,      mem_st_snb,     "event=0xcd,umask=0x2");
195
196 struct attribute *nhm_events_attrs[] = {
197         EVENT_PTR(mem_ld_nhm),
198         NULL,
199 };
200
201 struct attribute *snb_events_attrs[] = {
202         EVENT_PTR(mem_ld_snb),
203         EVENT_PTR(mem_st_snb),
204         NULL,
205 };
206
207 static struct event_constraint intel_hsw_event_constraints[] = {
208         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
209         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
210         FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
211         INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
212         INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
213         INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
214         /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
215         INTEL_EVENT_CONSTRAINT(0x08a3, 0x4),
216         /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
217         INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4),
218         /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
219         INTEL_EVENT_CONSTRAINT(0x04a3, 0xf),
220         EVENT_CONSTRAINT_END
221 };
222
223 struct event_constraint intel_bdw_event_constraints[] = {
224         FIXED_EVENT_CONSTRAINT(0x00c0, 0),      /* INST_RETIRED.ANY */
225         FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
226         FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
227         INTEL_UEVENT_CONSTRAINT(0x148, 0x4),    /* L1D_PEND_MISS.PENDING */
228         INTEL_EVENT_CONSTRAINT(0xa3, 0x4),      /* CYCLE_ACTIVITY.* */
229         EVENT_CONSTRAINT_END
230 };
231
232 static u64 intel_pmu_event_map(int hw_event)
233 {
234         return intel_perfmon_event_map[hw_event];
235 }
236
237 #define SNB_DMND_DATA_RD        (1ULL << 0)
238 #define SNB_DMND_RFO            (1ULL << 1)
239 #define SNB_DMND_IFETCH         (1ULL << 2)
240 #define SNB_DMND_WB             (1ULL << 3)
241 #define SNB_PF_DATA_RD          (1ULL << 4)
242 #define SNB_PF_RFO              (1ULL << 5)
243 #define SNB_PF_IFETCH           (1ULL << 6)
244 #define SNB_LLC_DATA_RD         (1ULL << 7)
245 #define SNB_LLC_RFO             (1ULL << 8)
246 #define SNB_LLC_IFETCH          (1ULL << 9)
247 #define SNB_BUS_LOCKS           (1ULL << 10)
248 #define SNB_STRM_ST             (1ULL << 11)
249 #define SNB_OTHER               (1ULL << 15)
250 #define SNB_RESP_ANY            (1ULL << 16)
251 #define SNB_NO_SUPP             (1ULL << 17)
252 #define SNB_LLC_HITM            (1ULL << 18)
253 #define SNB_LLC_HITE            (1ULL << 19)
254 #define SNB_LLC_HITS            (1ULL << 20)
255 #define SNB_LLC_HITF            (1ULL << 21)
256 #define SNB_LOCAL               (1ULL << 22)
257 #define SNB_REMOTE              (0xffULL << 23)
258 #define SNB_SNP_NONE            (1ULL << 31)
259 #define SNB_SNP_NOT_NEEDED      (1ULL << 32)
260 #define SNB_SNP_MISS            (1ULL << 33)
261 #define SNB_NO_FWD              (1ULL << 34)
262 #define SNB_SNP_FWD             (1ULL << 35)
263 #define SNB_HITM                (1ULL << 36)
264 #define SNB_NON_DRAM            (1ULL << 37)
265
266 #define SNB_DMND_READ           (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
267 #define SNB_DMND_WRITE          (SNB_DMND_RFO|SNB_LLC_RFO)
268 #define SNB_DMND_PREFETCH       (SNB_PF_DATA_RD|SNB_PF_RFO)
269
270 #define SNB_SNP_ANY             (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
271                                  SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
272                                  SNB_HITM)
273
274 #define SNB_DRAM_ANY            (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
275 #define SNB_DRAM_REMOTE         (SNB_REMOTE|SNB_SNP_ANY)
276
277 #define SNB_L3_ACCESS           SNB_RESP_ANY
278 #define SNB_L3_MISS             (SNB_DRAM_ANY|SNB_NON_DRAM)
279
280 static __initconst const u64 snb_hw_cache_extra_regs
281                                 [PERF_COUNT_HW_CACHE_MAX]
282                                 [PERF_COUNT_HW_CACHE_OP_MAX]
283                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
284 {
285  [ C(LL  ) ] = {
286         [ C(OP_READ) ] = {
287                 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
288                 [ C(RESULT_MISS)   ] = SNB_DMND_READ|SNB_L3_MISS,
289         },
290         [ C(OP_WRITE) ] = {
291                 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
292                 [ C(RESULT_MISS)   ] = SNB_DMND_WRITE|SNB_L3_MISS,
293         },
294         [ C(OP_PREFETCH) ] = {
295                 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
296                 [ C(RESULT_MISS)   ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
297         },
298  },
299  [ C(NODE) ] = {
300         [ C(OP_READ) ] = {
301                 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
302                 [ C(RESULT_MISS)   ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
303         },
304         [ C(OP_WRITE) ] = {
305                 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
306                 [ C(RESULT_MISS)   ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
307         },
308         [ C(OP_PREFETCH) ] = {
309                 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
310                 [ C(RESULT_MISS)   ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
311         },
312  },
313 };
314
315 static __initconst const u64 snb_hw_cache_event_ids
316                                 [PERF_COUNT_HW_CACHE_MAX]
317                                 [PERF_COUNT_HW_CACHE_OP_MAX]
318                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
319 {
320  [ C(L1D) ] = {
321         [ C(OP_READ) ] = {
322                 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS        */
323                 [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPLACEMENT              */
324         },
325         [ C(OP_WRITE) ] = {
326                 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES       */
327                 [ C(RESULT_MISS)   ] = 0x0851, /* L1D.ALL_M_REPLACEMENT        */
328         },
329         [ C(OP_PREFETCH) ] = {
330                 [ C(RESULT_ACCESS) ] = 0x0,
331                 [ C(RESULT_MISS)   ] = 0x024e, /* HW_PRE_REQ.DL1_MISS          */
332         },
333  },
334  [ C(L1I ) ] = {
335         [ C(OP_READ) ] = {
336                 [ C(RESULT_ACCESS) ] = 0x0,
337                 [ C(RESULT_MISS)   ] = 0x0280, /* ICACHE.MISSES */
338         },
339         [ C(OP_WRITE) ] = {
340                 [ C(RESULT_ACCESS) ] = -1,
341                 [ C(RESULT_MISS)   ] = -1,
342         },
343         [ C(OP_PREFETCH) ] = {
344                 [ C(RESULT_ACCESS) ] = 0x0,
345                 [ C(RESULT_MISS)   ] = 0x0,
346         },
347  },
348  [ C(LL  ) ] = {
349         [ C(OP_READ) ] = {
350                 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
351                 [ C(RESULT_ACCESS) ] = 0x01b7,
352                 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
353                 [ C(RESULT_MISS)   ] = 0x01b7,
354         },
355         [ C(OP_WRITE) ] = {
356                 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
357                 [ C(RESULT_ACCESS) ] = 0x01b7,
358                 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
359                 [ C(RESULT_MISS)   ] = 0x01b7,
360         },
361         [ C(OP_PREFETCH) ] = {
362                 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
363                 [ C(RESULT_ACCESS) ] = 0x01b7,
364                 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
365                 [ C(RESULT_MISS)   ] = 0x01b7,
366         },
367  },
368  [ C(DTLB) ] = {
369         [ C(OP_READ) ] = {
370                 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
371                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
372         },
373         [ C(OP_WRITE) ] = {
374                 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
375                 [ C(RESULT_MISS)   ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
376         },
377         [ C(OP_PREFETCH) ] = {
378                 [ C(RESULT_ACCESS) ] = 0x0,
379                 [ C(RESULT_MISS)   ] = 0x0,
380         },
381  },
382  [ C(ITLB) ] = {
383         [ C(OP_READ) ] = {
384                 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT         */
385                 [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK    */
386         },
387         [ C(OP_WRITE) ] = {
388                 [ C(RESULT_ACCESS) ] = -1,
389                 [ C(RESULT_MISS)   ] = -1,
390         },
391         [ C(OP_PREFETCH) ] = {
392                 [ C(RESULT_ACCESS) ] = -1,
393                 [ C(RESULT_MISS)   ] = -1,
394         },
395  },
396  [ C(BPU ) ] = {
397         [ C(OP_READ) ] = {
398                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
399                 [ C(RESULT_MISS)   ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
400         },
401         [ C(OP_WRITE) ] = {
402                 [ C(RESULT_ACCESS) ] = -1,
403                 [ C(RESULT_MISS)   ] = -1,
404         },
405         [ C(OP_PREFETCH) ] = {
406                 [ C(RESULT_ACCESS) ] = -1,
407                 [ C(RESULT_MISS)   ] = -1,
408         },
409  },
410  [ C(NODE) ] = {
411         [ C(OP_READ) ] = {
412                 [ C(RESULT_ACCESS) ] = 0x01b7,
413                 [ C(RESULT_MISS)   ] = 0x01b7,
414         },
415         [ C(OP_WRITE) ] = {
416                 [ C(RESULT_ACCESS) ] = 0x01b7,
417                 [ C(RESULT_MISS)   ] = 0x01b7,
418         },
419         [ C(OP_PREFETCH) ] = {
420                 [ C(RESULT_ACCESS) ] = 0x01b7,
421                 [ C(RESULT_MISS)   ] = 0x01b7,
422         },
423  },
424
425 };
426
427 /*
428  * Notes on the events:
429  * - data reads do not include code reads (comparable to earlier tables)
430  * - data counts include speculative execution (except L1 write, dtlb, bpu)
431  * - remote node access includes remote memory, remote cache, remote mmio.
432  * - prefetches are not included in the counts because they are not
433  *   reliably counted.
434  */
435
436 #define HSW_DEMAND_DATA_RD              BIT_ULL(0)
437 #define HSW_DEMAND_RFO                  BIT_ULL(1)
438 #define HSW_ANY_RESPONSE                BIT_ULL(16)
439 #define HSW_SUPPLIER_NONE               BIT_ULL(17)
440 #define HSW_L3_MISS_LOCAL_DRAM          BIT_ULL(22)
441 #define HSW_L3_MISS_REMOTE_HOP0         BIT_ULL(27)
442 #define HSW_L3_MISS_REMOTE_HOP1         BIT_ULL(28)
443 #define HSW_L3_MISS_REMOTE_HOP2P        BIT_ULL(29)
444 #define HSW_L3_MISS                     (HSW_L3_MISS_LOCAL_DRAM| \
445                                          HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
446                                          HSW_L3_MISS_REMOTE_HOP2P)
447 #define HSW_SNOOP_NONE                  BIT_ULL(31)
448 #define HSW_SNOOP_NOT_NEEDED            BIT_ULL(32)
449 #define HSW_SNOOP_MISS                  BIT_ULL(33)
450 #define HSW_SNOOP_HIT_NO_FWD            BIT_ULL(34)
451 #define HSW_SNOOP_HIT_WITH_FWD          BIT_ULL(35)
452 #define HSW_SNOOP_HITM                  BIT_ULL(36)
453 #define HSW_SNOOP_NON_DRAM              BIT_ULL(37)
454 #define HSW_ANY_SNOOP                   (HSW_SNOOP_NONE| \
455                                          HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
456                                          HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
457                                          HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
458 #define HSW_SNOOP_DRAM                  (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
459 #define HSW_DEMAND_READ                 HSW_DEMAND_DATA_RD
460 #define HSW_DEMAND_WRITE                HSW_DEMAND_RFO
461 #define HSW_L3_MISS_REMOTE              (HSW_L3_MISS_REMOTE_HOP0|\
462                                          HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
463 #define HSW_LLC_ACCESS                  HSW_ANY_RESPONSE
464
465 #define BDW_L3_MISS_LOCAL               BIT(26)
466 #define BDW_L3_MISS                     (BDW_L3_MISS_LOCAL| \
467                                          HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
468                                          HSW_L3_MISS_REMOTE_HOP2P)
469
470
471 static __initconst const u64 hsw_hw_cache_event_ids
472                                 [PERF_COUNT_HW_CACHE_MAX]
473                                 [PERF_COUNT_HW_CACHE_OP_MAX]
474                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
475 {
476  [ C(L1D ) ] = {
477         [ C(OP_READ) ] = {
478                 [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_UOPS_RETIRED.ALL_LOADS */
479                 [ C(RESULT_MISS)   ] = 0x151,   /* L1D.REPLACEMENT */
480         },
481         [ C(OP_WRITE) ] = {
482                 [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_UOPS_RETIRED.ALL_STORES */
483                 [ C(RESULT_MISS)   ] = 0x0,
484         },
485         [ C(OP_PREFETCH) ] = {
486                 [ C(RESULT_ACCESS) ] = 0x0,
487                 [ C(RESULT_MISS)   ] = 0x0,
488         },
489  },
490  [ C(L1I ) ] = {
491         [ C(OP_READ) ] = {
492                 [ C(RESULT_ACCESS) ] = 0x0,
493                 [ C(RESULT_MISS)   ] = 0x280,   /* ICACHE.MISSES */
494         },
495         [ C(OP_WRITE) ] = {
496                 [ C(RESULT_ACCESS) ] = -1,
497                 [ C(RESULT_MISS)   ] = -1,
498         },
499         [ C(OP_PREFETCH) ] = {
500                 [ C(RESULT_ACCESS) ] = 0x0,
501                 [ C(RESULT_MISS)   ] = 0x0,
502         },
503  },
504  [ C(LL  ) ] = {
505         [ C(OP_READ) ] = {
506                 [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
507                 [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
508         },
509         [ C(OP_WRITE) ] = {
510                 [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
511                 [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
512         },
513         [ C(OP_PREFETCH) ] = {
514                 [ C(RESULT_ACCESS) ] = 0x0,
515                 [ C(RESULT_MISS)   ] = 0x0,
516         },
517  },
518  [ C(DTLB) ] = {
519         [ C(OP_READ) ] = {
520                 [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_UOPS_RETIRED.ALL_LOADS */
521                 [ C(RESULT_MISS)   ] = 0x108,   /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
522         },
523         [ C(OP_WRITE) ] = {
524                 [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_UOPS_RETIRED.ALL_STORES */
525                 [ C(RESULT_MISS)   ] = 0x149,   /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
526         },
527         [ C(OP_PREFETCH) ] = {
528                 [ C(RESULT_ACCESS) ] = 0x0,
529                 [ C(RESULT_MISS)   ] = 0x0,
530         },
531  },
532  [ C(ITLB) ] = {
533         [ C(OP_READ) ] = {
534                 [ C(RESULT_ACCESS) ] = 0x6085,  /* ITLB_MISSES.STLB_HIT */
535                 [ C(RESULT_MISS)   ] = 0x185,   /* ITLB_MISSES.MISS_CAUSES_A_WALK */
536         },
537         [ C(OP_WRITE) ] = {
538                 [ C(RESULT_ACCESS) ] = -1,
539                 [ C(RESULT_MISS)   ] = -1,
540         },
541         [ C(OP_PREFETCH) ] = {
542                 [ C(RESULT_ACCESS) ] = -1,
543                 [ C(RESULT_MISS)   ] = -1,
544         },
545  },
546  [ C(BPU ) ] = {
547         [ C(OP_READ) ] = {
548                 [ C(RESULT_ACCESS) ] = 0xc4,    /* BR_INST_RETIRED.ALL_BRANCHES */
549                 [ C(RESULT_MISS)   ] = 0xc5,    /* BR_MISP_RETIRED.ALL_BRANCHES */
550         },
551         [ C(OP_WRITE) ] = {
552                 [ C(RESULT_ACCESS) ] = -1,
553                 [ C(RESULT_MISS)   ] = -1,
554         },
555         [ C(OP_PREFETCH) ] = {
556                 [ C(RESULT_ACCESS) ] = -1,
557                 [ C(RESULT_MISS)   ] = -1,
558         },
559  },
560  [ C(NODE) ] = {
561         [ C(OP_READ) ] = {
562                 [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
563                 [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
564         },
565         [ C(OP_WRITE) ] = {
566                 [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
567                 [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
568         },
569         [ C(OP_PREFETCH) ] = {
570                 [ C(RESULT_ACCESS) ] = 0x0,
571                 [ C(RESULT_MISS)   ] = 0x0,
572         },
573  },
574 };
575
576 static __initconst const u64 hsw_hw_cache_extra_regs
577                                 [PERF_COUNT_HW_CACHE_MAX]
578                                 [PERF_COUNT_HW_CACHE_OP_MAX]
579                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
580 {
581  [ C(LL  ) ] = {
582         [ C(OP_READ) ] = {
583                 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
584                                        HSW_LLC_ACCESS,
585                 [ C(RESULT_MISS)   ] = HSW_DEMAND_READ|
586                                        HSW_L3_MISS|HSW_ANY_SNOOP,
587         },
588         [ C(OP_WRITE) ] = {
589                 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
590                                        HSW_LLC_ACCESS,
591                 [ C(RESULT_MISS)   ] = HSW_DEMAND_WRITE|
592                                        HSW_L3_MISS|HSW_ANY_SNOOP,
593         },
594         [ C(OP_PREFETCH) ] = {
595                 [ C(RESULT_ACCESS) ] = 0x0,
596                 [ C(RESULT_MISS)   ] = 0x0,
597         },
598  },
599  [ C(NODE) ] = {
600         [ C(OP_READ) ] = {
601                 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
602                                        HSW_L3_MISS_LOCAL_DRAM|
603                                        HSW_SNOOP_DRAM,
604                 [ C(RESULT_MISS)   ] = HSW_DEMAND_READ|
605                                        HSW_L3_MISS_REMOTE|
606                                        HSW_SNOOP_DRAM,
607         },
608         [ C(OP_WRITE) ] = {
609                 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
610                                        HSW_L3_MISS_LOCAL_DRAM|
611                                        HSW_SNOOP_DRAM,
612                 [ C(RESULT_MISS)   ] = HSW_DEMAND_WRITE|
613                                        HSW_L3_MISS_REMOTE|
614                                        HSW_SNOOP_DRAM,
615         },
616         [ C(OP_PREFETCH) ] = {
617                 [ C(RESULT_ACCESS) ] = 0x0,
618                 [ C(RESULT_MISS)   ] = 0x0,
619         },
620  },
621 };
622
623 static __initconst const u64 westmere_hw_cache_event_ids
624                                 [PERF_COUNT_HW_CACHE_MAX]
625                                 [PERF_COUNT_HW_CACHE_OP_MAX]
626                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
627 {
628  [ C(L1D) ] = {
629         [ C(OP_READ) ] = {
630                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
631                 [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
632         },
633         [ C(OP_WRITE) ] = {
634                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
635                 [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
636         },
637         [ C(OP_PREFETCH) ] = {
638                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
639                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
640         },
641  },
642  [ C(L1I ) ] = {
643         [ C(OP_READ) ] = {
644                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
645                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
646         },
647         [ C(OP_WRITE) ] = {
648                 [ C(RESULT_ACCESS) ] = -1,
649                 [ C(RESULT_MISS)   ] = -1,
650         },
651         [ C(OP_PREFETCH) ] = {
652                 [ C(RESULT_ACCESS) ] = 0x0,
653                 [ C(RESULT_MISS)   ] = 0x0,
654         },
655  },
656  [ C(LL  ) ] = {
657         [ C(OP_READ) ] = {
658                 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
659                 [ C(RESULT_ACCESS) ] = 0x01b7,
660                 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
661                 [ C(RESULT_MISS)   ] = 0x01b7,
662         },
663         /*
664          * Use RFO, not WRITEBACK, because a write miss would typically occur
665          * on RFO.
666          */
667         [ C(OP_WRITE) ] = {
668                 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
669                 [ C(RESULT_ACCESS) ] = 0x01b7,
670                 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
671                 [ C(RESULT_MISS)   ] = 0x01b7,
672         },
673         [ C(OP_PREFETCH) ] = {
674                 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
675                 [ C(RESULT_ACCESS) ] = 0x01b7,
676                 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
677                 [ C(RESULT_MISS)   ] = 0x01b7,
678         },
679  },
680  [ C(DTLB) ] = {
681         [ C(OP_READ) ] = {
682                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
683                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
684         },
685         [ C(OP_WRITE) ] = {
686                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
687                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
688         },
689         [ C(OP_PREFETCH) ] = {
690                 [ C(RESULT_ACCESS) ] = 0x0,
691                 [ C(RESULT_MISS)   ] = 0x0,
692         },
693  },
694  [ C(ITLB) ] = {
695         [ C(OP_READ) ] = {
696                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
697                 [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */
698         },
699         [ C(OP_WRITE) ] = {
700                 [ C(RESULT_ACCESS) ] = -1,
701                 [ C(RESULT_MISS)   ] = -1,
702         },
703         [ C(OP_PREFETCH) ] = {
704                 [ C(RESULT_ACCESS) ] = -1,
705                 [ C(RESULT_MISS)   ] = -1,
706         },
707  },
708  [ C(BPU ) ] = {
709         [ C(OP_READ) ] = {
710                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
711                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
712         },
713         [ C(OP_WRITE) ] = {
714                 [ C(RESULT_ACCESS) ] = -1,
715                 [ C(RESULT_MISS)   ] = -1,
716         },
717         [ C(OP_PREFETCH) ] = {
718                 [ C(RESULT_ACCESS) ] = -1,
719                 [ C(RESULT_MISS)   ] = -1,
720         },
721  },
722  [ C(NODE) ] = {
723         [ C(OP_READ) ] = {
724                 [ C(RESULT_ACCESS) ] = 0x01b7,
725                 [ C(RESULT_MISS)   ] = 0x01b7,
726         },
727         [ C(OP_WRITE) ] = {
728                 [ C(RESULT_ACCESS) ] = 0x01b7,
729                 [ C(RESULT_MISS)   ] = 0x01b7,
730         },
731         [ C(OP_PREFETCH) ] = {
732                 [ C(RESULT_ACCESS) ] = 0x01b7,
733                 [ C(RESULT_MISS)   ] = 0x01b7,
734         },
735  },
736 };
737
738 /*
739  * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
740  * See IA32 SDM Vol 3B 30.6.1.3
741  */
742
743 #define NHM_DMND_DATA_RD        (1 << 0)
744 #define NHM_DMND_RFO            (1 << 1)
745 #define NHM_DMND_IFETCH         (1 << 2)
746 #define NHM_DMND_WB             (1 << 3)
747 #define NHM_PF_DATA_RD          (1 << 4)
748 #define NHM_PF_DATA_RFO         (1 << 5)
749 #define NHM_PF_IFETCH           (1 << 6)
750 #define NHM_OFFCORE_OTHER       (1 << 7)
751 #define NHM_UNCORE_HIT          (1 << 8)
752 #define NHM_OTHER_CORE_HIT_SNP  (1 << 9)
753 #define NHM_OTHER_CORE_HITM     (1 << 10)
754                                 /* reserved */
755 #define NHM_REMOTE_CACHE_FWD    (1 << 12)
756 #define NHM_REMOTE_DRAM         (1 << 13)
757 #define NHM_LOCAL_DRAM          (1 << 14)
758 #define NHM_NON_DRAM            (1 << 15)
759
760 #define NHM_LOCAL               (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
761 #define NHM_REMOTE              (NHM_REMOTE_DRAM)
762
763 #define NHM_DMND_READ           (NHM_DMND_DATA_RD)
764 #define NHM_DMND_WRITE          (NHM_DMND_RFO|NHM_DMND_WB)
765 #define NHM_DMND_PREFETCH       (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
766
767 #define NHM_L3_HIT      (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
768 #define NHM_L3_MISS     (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
769 #define NHM_L3_ACCESS   (NHM_L3_HIT|NHM_L3_MISS)
770
771 static __initconst const u64 nehalem_hw_cache_extra_regs
772                                 [PERF_COUNT_HW_CACHE_MAX]
773                                 [PERF_COUNT_HW_CACHE_OP_MAX]
774                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
775 {
776  [ C(LL  ) ] = {
777         [ C(OP_READ) ] = {
778                 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
779                 [ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_L3_MISS,
780         },
781         [ C(OP_WRITE) ] = {
782                 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
783                 [ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_L3_MISS,
784         },
785         [ C(OP_PREFETCH) ] = {
786                 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
787                 [ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
788         },
789  },
790  [ C(NODE) ] = {
791         [ C(OP_READ) ] = {
792                 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
793                 [ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_REMOTE,
794         },
795         [ C(OP_WRITE) ] = {
796                 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
797                 [ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_REMOTE,
798         },
799         [ C(OP_PREFETCH) ] = {
800                 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
801                 [ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_REMOTE,
802         },
803  },
804 };
805
806 static __initconst const u64 nehalem_hw_cache_event_ids
807                                 [PERF_COUNT_HW_CACHE_MAX]
808                                 [PERF_COUNT_HW_CACHE_OP_MAX]
809                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
810 {
811  [ C(L1D) ] = {
812         [ C(OP_READ) ] = {
813                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
814                 [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
815         },
816         [ C(OP_WRITE) ] = {
817                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
818                 [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
819         },
820         [ C(OP_PREFETCH) ] = {
821                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
822                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
823         },
824  },
825  [ C(L1I ) ] = {
826         [ C(OP_READ) ] = {
827                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
828                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
829         },
830         [ C(OP_WRITE) ] = {
831                 [ C(RESULT_ACCESS) ] = -1,
832                 [ C(RESULT_MISS)   ] = -1,
833         },
834         [ C(OP_PREFETCH) ] = {
835                 [ C(RESULT_ACCESS) ] = 0x0,
836                 [ C(RESULT_MISS)   ] = 0x0,
837         },
838  },
839  [ C(LL  ) ] = {
840         [ C(OP_READ) ] = {
841                 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
842                 [ C(RESULT_ACCESS) ] = 0x01b7,
843                 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
844                 [ C(RESULT_MISS)   ] = 0x01b7,
845         },
846         /*
847          * Use RFO, not WRITEBACK, because a write miss would typically occur
848          * on RFO.
849          */
850         [ C(OP_WRITE) ] = {
851                 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
852                 [ C(RESULT_ACCESS) ] = 0x01b7,
853                 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
854                 [ C(RESULT_MISS)   ] = 0x01b7,
855         },
856         [ C(OP_PREFETCH) ] = {
857                 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
858                 [ C(RESULT_ACCESS) ] = 0x01b7,
859                 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
860                 [ C(RESULT_MISS)   ] = 0x01b7,
861         },
862  },
863  [ C(DTLB) ] = {
864         [ C(OP_READ) ] = {
865                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
866                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
867         },
868         [ C(OP_WRITE) ] = {
869                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
870                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
871         },
872         [ C(OP_PREFETCH) ] = {
873                 [ C(RESULT_ACCESS) ] = 0x0,
874                 [ C(RESULT_MISS)   ] = 0x0,
875         },
876  },
877  [ C(ITLB) ] = {
878         [ C(OP_READ) ] = {
879                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
880                 [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
881         },
882         [ C(OP_WRITE) ] = {
883                 [ C(RESULT_ACCESS) ] = -1,
884                 [ C(RESULT_MISS)   ] = -1,
885         },
886         [ C(OP_PREFETCH) ] = {
887                 [ C(RESULT_ACCESS) ] = -1,
888                 [ C(RESULT_MISS)   ] = -1,
889         },
890  },
891  [ C(BPU ) ] = {
892         [ C(OP_READ) ] = {
893                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
894                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
895         },
896         [ C(OP_WRITE) ] = {
897                 [ C(RESULT_ACCESS) ] = -1,
898                 [ C(RESULT_MISS)   ] = -1,
899         },
900         [ C(OP_PREFETCH) ] = {
901                 [ C(RESULT_ACCESS) ] = -1,
902                 [ C(RESULT_MISS)   ] = -1,
903         },
904  },
905  [ C(NODE) ] = {
906         [ C(OP_READ) ] = {
907                 [ C(RESULT_ACCESS) ] = 0x01b7,
908                 [ C(RESULT_MISS)   ] = 0x01b7,
909         },
910         [ C(OP_WRITE) ] = {
911                 [ C(RESULT_ACCESS) ] = 0x01b7,
912                 [ C(RESULT_MISS)   ] = 0x01b7,
913         },
914         [ C(OP_PREFETCH) ] = {
915                 [ C(RESULT_ACCESS) ] = 0x01b7,
916                 [ C(RESULT_MISS)   ] = 0x01b7,
917         },
918  },
919 };
920
921 static __initconst const u64 core2_hw_cache_event_ids
922                                 [PERF_COUNT_HW_CACHE_MAX]
923                                 [PERF_COUNT_HW_CACHE_OP_MAX]
924                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
925 {
926  [ C(L1D) ] = {
927         [ C(OP_READ) ] = {
928                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
929                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
930         },
931         [ C(OP_WRITE) ] = {
932                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
933                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
934         },
935         [ C(OP_PREFETCH) ] = {
936                 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
937                 [ C(RESULT_MISS)   ] = 0,
938         },
939  },
940  [ C(L1I ) ] = {
941         [ C(OP_READ) ] = {
942                 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
943                 [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
944         },
945         [ C(OP_WRITE) ] = {
946                 [ C(RESULT_ACCESS) ] = -1,
947                 [ C(RESULT_MISS)   ] = -1,
948         },
949         [ C(OP_PREFETCH) ] = {
950                 [ C(RESULT_ACCESS) ] = 0,
951                 [ C(RESULT_MISS)   ] = 0,
952         },
953  },
954  [ C(LL  ) ] = {
955         [ C(OP_READ) ] = {
956                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
957                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
958         },
959         [ C(OP_WRITE) ] = {
960                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
961                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
962         },
963         [ C(OP_PREFETCH) ] = {
964                 [ C(RESULT_ACCESS) ] = 0,
965                 [ C(RESULT_MISS)   ] = 0,
966         },
967  },
968  [ C(DTLB) ] = {
969         [ C(OP_READ) ] = {
970                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
971                 [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
972         },
973         [ C(OP_WRITE) ] = {
974                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
975                 [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
976         },
977         [ C(OP_PREFETCH) ] = {
978                 [ C(RESULT_ACCESS) ] = 0,
979                 [ C(RESULT_MISS)   ] = 0,
980         },
981  },
982  [ C(ITLB) ] = {
983         [ C(OP_READ) ] = {
984                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
985                 [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
986         },
987         [ C(OP_WRITE) ] = {
988                 [ C(RESULT_ACCESS) ] = -1,
989                 [ C(RESULT_MISS)   ] = -1,
990         },
991         [ C(OP_PREFETCH) ] = {
992                 [ C(RESULT_ACCESS) ] = -1,
993                 [ C(RESULT_MISS)   ] = -1,
994         },
995  },
996  [ C(BPU ) ] = {
997         [ C(OP_READ) ] = {
998                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
999                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
1000         },
1001         [ C(OP_WRITE) ] = {
1002                 [ C(RESULT_ACCESS) ] = -1,
1003                 [ C(RESULT_MISS)   ] = -1,
1004         },
1005         [ C(OP_PREFETCH) ] = {
1006                 [ C(RESULT_ACCESS) ] = -1,
1007                 [ C(RESULT_MISS)   ] = -1,
1008         },
1009  },
1010 };
1011
1012 static __initconst const u64 atom_hw_cache_event_ids
1013                                 [PERF_COUNT_HW_CACHE_MAX]
1014                                 [PERF_COUNT_HW_CACHE_OP_MAX]
1015                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1016 {
1017  [ C(L1D) ] = {
1018         [ C(OP_READ) ] = {
1019                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
1020                 [ C(RESULT_MISS)   ] = 0,
1021         },
1022         [ C(OP_WRITE) ] = {
1023                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
1024                 [ C(RESULT_MISS)   ] = 0,
1025         },
1026         [ C(OP_PREFETCH) ] = {
1027                 [ C(RESULT_ACCESS) ] = 0x0,
1028                 [ C(RESULT_MISS)   ] = 0,
1029         },
1030  },
1031  [ C(L1I ) ] = {
1032         [ C(OP_READ) ] = {
1033                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
1034                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
1035         },
1036         [ C(OP_WRITE) ] = {
1037                 [ C(RESULT_ACCESS) ] = -1,
1038                 [ C(RESULT_MISS)   ] = -1,
1039         },
1040         [ C(OP_PREFETCH) ] = {
1041                 [ C(RESULT_ACCESS) ] = 0,
1042                 [ C(RESULT_MISS)   ] = 0,
1043         },
1044  },
1045  [ C(LL  ) ] = {
1046         [ C(OP_READ) ] = {
1047                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
1048                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
1049         },
1050         [ C(OP_WRITE) ] = {
1051                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
1052                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
1053         },
1054         [ C(OP_PREFETCH) ] = {
1055                 [ C(RESULT_ACCESS) ] = 0,
1056                 [ C(RESULT_MISS)   ] = 0,
1057         },
1058  },
1059  [ C(DTLB) ] = {
1060         [ C(OP_READ) ] = {
1061                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
1062                 [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
1063         },
1064         [ C(OP_WRITE) ] = {
1065                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
1066                 [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
1067         },
1068         [ C(OP_PREFETCH) ] = {
1069                 [ C(RESULT_ACCESS) ] = 0,
1070                 [ C(RESULT_MISS)   ] = 0,
1071         },
1072  },
1073  [ C(ITLB) ] = {
1074         [ C(OP_READ) ] = {
1075                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
1076                 [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
1077         },
1078         [ C(OP_WRITE) ] = {
1079                 [ C(RESULT_ACCESS) ] = -1,
1080                 [ C(RESULT_MISS)   ] = -1,
1081         },
1082         [ C(OP_PREFETCH) ] = {
1083                 [ C(RESULT_ACCESS) ] = -1,
1084                 [ C(RESULT_MISS)   ] = -1,
1085         },
1086  },
1087  [ C(BPU ) ] = {
1088         [ C(OP_READ) ] = {
1089                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
1090                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
1091         },
1092         [ C(OP_WRITE) ] = {
1093                 [ C(RESULT_ACCESS) ] = -1,
1094                 [ C(RESULT_MISS)   ] = -1,
1095         },
1096         [ C(OP_PREFETCH) ] = {
1097                 [ C(RESULT_ACCESS) ] = -1,
1098                 [ C(RESULT_MISS)   ] = -1,
1099         },
1100  },
1101 };
1102
1103 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1104 {
1105         /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1106         INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1107         INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffffull, RSP_1),
1108         EVENT_EXTRA_END
1109 };
1110
1111 #define SLM_DMND_READ           SNB_DMND_DATA_RD
1112 #define SLM_DMND_WRITE          SNB_DMND_RFO
1113 #define SLM_DMND_PREFETCH       (SNB_PF_DATA_RD|SNB_PF_RFO)
1114
1115 #define SLM_SNP_ANY             (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1116 #define SLM_LLC_ACCESS          SNB_RESP_ANY
1117 #define SLM_LLC_MISS            (SLM_SNP_ANY|SNB_NON_DRAM)
1118
1119 static __initconst const u64 slm_hw_cache_extra_regs
1120                                 [PERF_COUNT_HW_CACHE_MAX]
1121                                 [PERF_COUNT_HW_CACHE_OP_MAX]
1122                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1123 {
1124  [ C(LL  ) ] = {
1125         [ C(OP_READ) ] = {
1126                 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1127                 [ C(RESULT_MISS)   ] = SLM_DMND_READ|SLM_LLC_MISS,
1128         },
1129         [ C(OP_WRITE) ] = {
1130                 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1131                 [ C(RESULT_MISS)   ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1132         },
1133         [ C(OP_PREFETCH) ] = {
1134                 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1135                 [ C(RESULT_MISS)   ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1136         },
1137  },
1138 };
1139
1140 static __initconst const u64 slm_hw_cache_event_ids
1141                                 [PERF_COUNT_HW_CACHE_MAX]
1142                                 [PERF_COUNT_HW_CACHE_OP_MAX]
1143                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1144 {
1145  [ C(L1D) ] = {
1146         [ C(OP_READ) ] = {
1147                 [ C(RESULT_ACCESS) ] = 0,
1148                 [ C(RESULT_MISS)   ] = 0x0104, /* LD_DCU_MISS */
1149         },
1150         [ C(OP_WRITE) ] = {
1151                 [ C(RESULT_ACCESS) ] = 0,
1152                 [ C(RESULT_MISS)   ] = 0,
1153         },
1154         [ C(OP_PREFETCH) ] = {
1155                 [ C(RESULT_ACCESS) ] = 0,
1156                 [ C(RESULT_MISS)   ] = 0,
1157         },
1158  },
1159  [ C(L1I ) ] = {
1160         [ C(OP_READ) ] = {
1161                 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1162                 [ C(RESULT_MISS)   ] = 0x0280, /* ICACGE.MISSES */
1163         },
1164         [ C(OP_WRITE) ] = {
1165                 [ C(RESULT_ACCESS) ] = -1,
1166                 [ C(RESULT_MISS)   ] = -1,
1167         },
1168         [ C(OP_PREFETCH) ] = {
1169                 [ C(RESULT_ACCESS) ] = 0,
1170                 [ C(RESULT_MISS)   ] = 0,
1171         },
1172  },
1173  [ C(LL  ) ] = {
1174         [ C(OP_READ) ] = {
1175                 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1176                 [ C(RESULT_ACCESS) ] = 0x01b7,
1177                 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1178                 [ C(RESULT_MISS)   ] = 0x01b7,
1179         },
1180         [ C(OP_WRITE) ] = {
1181                 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1182                 [ C(RESULT_ACCESS) ] = 0x01b7,
1183                 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1184                 [ C(RESULT_MISS)   ] = 0x01b7,
1185         },
1186         [ C(OP_PREFETCH) ] = {
1187                 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1188                 [ C(RESULT_ACCESS) ] = 0x01b7,
1189                 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1190                 [ C(RESULT_MISS)   ] = 0x01b7,
1191         },
1192  },
1193  [ C(DTLB) ] = {
1194         [ C(OP_READ) ] = {
1195                 [ C(RESULT_ACCESS) ] = 0,
1196                 [ C(RESULT_MISS)   ] = 0x0804, /* LD_DTLB_MISS */
1197         },
1198         [ C(OP_WRITE) ] = {
1199                 [ C(RESULT_ACCESS) ] = 0,
1200                 [ C(RESULT_MISS)   ] = 0,
1201         },
1202         [ C(OP_PREFETCH) ] = {
1203                 [ C(RESULT_ACCESS) ] = 0,
1204                 [ C(RESULT_MISS)   ] = 0,
1205         },
1206  },
1207  [ C(ITLB) ] = {
1208         [ C(OP_READ) ] = {
1209                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1210                 [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES */
1211         },
1212         [ C(OP_WRITE) ] = {
1213                 [ C(RESULT_ACCESS) ] = -1,
1214                 [ C(RESULT_MISS)   ] = -1,
1215         },
1216         [ C(OP_PREFETCH) ] = {
1217                 [ C(RESULT_ACCESS) ] = -1,
1218                 [ C(RESULT_MISS)   ] = -1,
1219         },
1220  },
1221  [ C(BPU ) ] = {
1222         [ C(OP_READ) ] = {
1223                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1224                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1225         },
1226         [ C(OP_WRITE) ] = {
1227                 [ C(RESULT_ACCESS) ] = -1,
1228                 [ C(RESULT_MISS)   ] = -1,
1229         },
1230         [ C(OP_PREFETCH) ] = {
1231                 [ C(RESULT_ACCESS) ] = -1,
1232                 [ C(RESULT_MISS)   ] = -1,
1233         },
1234  },
1235 };
1236
1237 static void intel_pmu_disable_all(void)
1238 {
1239         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1240
1241         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1242
1243         if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1244                 intel_pmu_disable_bts();
1245         else
1246                 intel_bts_disable_local();
1247
1248         intel_pmu_pebs_disable_all();
1249         intel_pmu_lbr_disable_all();
1250 }
1251
1252 static void intel_pmu_enable_all(int added)
1253 {
1254         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1255
1256         intel_pmu_pebs_enable_all();
1257         intel_pmu_lbr_enable_all();
1258         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
1259                         x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
1260
1261         if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1262                 struct perf_event *event =
1263                         cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
1264
1265                 if (WARN_ON_ONCE(!event))
1266                         return;
1267
1268                 intel_pmu_enable_bts(event->hw.config);
1269         } else
1270                 intel_bts_enable_local();
1271 }
1272
1273 /*
1274  * Workaround for:
1275  *   Intel Errata AAK100 (model 26)
1276  *   Intel Errata AAP53  (model 30)
1277  *   Intel Errata BD53   (model 44)
1278  *
1279  * The official story:
1280  *   These chips need to be 'reset' when adding counters by programming the
1281  *   magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
1282  *   in sequence on the same PMC or on different PMCs.
1283  *
1284  * In practise it appears some of these events do in fact count, and
1285  * we need to programm all 4 events.
1286  */
1287 static void intel_pmu_nhm_workaround(void)
1288 {
1289         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1290         static const unsigned long nhm_magic[4] = {
1291                 0x4300B5,
1292                 0x4300D2,
1293                 0x4300B1,
1294                 0x4300B1
1295         };
1296         struct perf_event *event;
1297         int i;
1298
1299         /*
1300          * The Errata requires below steps:
1301          * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
1302          * 2) Configure 4 PERFEVTSELx with the magic events and clear
1303          *    the corresponding PMCx;
1304          * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
1305          * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
1306          * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
1307          */
1308
1309         /*
1310          * The real steps we choose are a little different from above.
1311          * A) To reduce MSR operations, we don't run step 1) as they
1312          *    are already cleared before this function is called;
1313          * B) Call x86_perf_event_update to save PMCx before configuring
1314          *    PERFEVTSELx with magic number;
1315          * C) With step 5), we do clear only when the PERFEVTSELx is
1316          *    not used currently.
1317          * D) Call x86_perf_event_set_period to restore PMCx;
1318          */
1319
1320         /* We always operate 4 pairs of PERF Counters */
1321         for (i = 0; i < 4; i++) {
1322                 event = cpuc->events[i];
1323                 if (event)
1324                         x86_perf_event_update(event);
1325         }
1326
1327         for (i = 0; i < 4; i++) {
1328                 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
1329                 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
1330         }
1331
1332         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
1333         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
1334
1335         for (i = 0; i < 4; i++) {
1336                 event = cpuc->events[i];
1337
1338                 if (event) {
1339                         x86_perf_event_set_period(event);
1340                         __x86_pmu_enable_event(&event->hw,
1341                                         ARCH_PERFMON_EVENTSEL_ENABLE);
1342                 } else
1343                         wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
1344         }
1345 }
1346
1347 static void intel_pmu_nhm_enable_all(int added)
1348 {
1349         if (added)
1350                 intel_pmu_nhm_workaround();
1351         intel_pmu_enable_all(added);
1352 }
1353
1354 static inline u64 intel_pmu_get_status(void)
1355 {
1356         u64 status;
1357
1358         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1359
1360         return status;
1361 }
1362
1363 static inline void intel_pmu_ack_status(u64 ack)
1364 {
1365         wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1366 }
1367
1368 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
1369 {
1370         int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
1371         u64 ctrl_val, mask;
1372
1373         mask = 0xfULL << (idx * 4);
1374
1375         rdmsrl(hwc->config_base, ctrl_val);
1376         ctrl_val &= ~mask;
1377         wrmsrl(hwc->config_base, ctrl_val);
1378 }
1379
1380 static inline bool event_is_checkpointed(struct perf_event *event)
1381 {
1382         return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
1383 }
1384
1385 static void intel_pmu_disable_event(struct perf_event *event)
1386 {
1387         struct hw_perf_event *hwc = &event->hw;
1388         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1389
1390         if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
1391                 intel_pmu_disable_bts();
1392                 intel_pmu_drain_bts_buffer();
1393                 return;
1394         }
1395
1396         cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
1397         cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
1398         cpuc->intel_cp_status &= ~(1ull << hwc->idx);
1399
1400         /*
1401          * must disable before any actual event
1402          * because any event may be combined with LBR
1403          */
1404         if (needs_branch_stack(event))
1405                 intel_pmu_lbr_disable(event);
1406
1407         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1408                 intel_pmu_disable_fixed(hwc);
1409                 return;
1410         }
1411
1412         x86_pmu_disable_event(event);
1413
1414         if (unlikely(event->attr.precise_ip))
1415                 intel_pmu_pebs_disable(event);
1416 }
1417
1418 static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
1419 {
1420         int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
1421         u64 ctrl_val, bits, mask;
1422
1423         /*
1424          * Enable IRQ generation (0x8),
1425          * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1426          * if requested:
1427          */
1428         bits = 0x8ULL;
1429         if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1430                 bits |= 0x2;
1431         if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1432                 bits |= 0x1;
1433
1434         /*
1435          * ANY bit is supported in v3 and up
1436          */
1437         if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1438                 bits |= 0x4;
1439
1440         bits <<= (idx * 4);
1441         mask = 0xfULL << (idx * 4);
1442
1443         rdmsrl(hwc->config_base, ctrl_val);
1444         ctrl_val &= ~mask;
1445         ctrl_val |= bits;
1446         wrmsrl(hwc->config_base, ctrl_val);
1447 }
1448
1449 static void intel_pmu_enable_event(struct perf_event *event)
1450 {
1451         struct hw_perf_event *hwc = &event->hw;
1452         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1453
1454         if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
1455                 if (!__this_cpu_read(cpu_hw_events.enabled))
1456                         return;
1457
1458                 intel_pmu_enable_bts(hwc->config);
1459                 return;
1460         }
1461         /*
1462          * must enabled before any actual event
1463          * because any event may be combined with LBR
1464          */
1465         if (needs_branch_stack(event))
1466                 intel_pmu_lbr_enable(event);
1467
1468         if (event->attr.exclude_host)
1469                 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
1470         if (event->attr.exclude_guest)
1471                 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
1472
1473         if (unlikely(event_is_checkpointed(event)))
1474                 cpuc->intel_cp_status |= (1ull << hwc->idx);
1475
1476         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1477                 intel_pmu_enable_fixed(hwc);
1478                 return;
1479         }
1480
1481         if (unlikely(event->attr.precise_ip))
1482                 intel_pmu_pebs_enable(event);
1483
1484         __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1485 }
1486
1487 /*
1488  * Save and restart an expired event. Called by NMI contexts,
1489  * so it has to be careful about preempting normal event ops:
1490  */
1491 int intel_pmu_save_and_restart(struct perf_event *event)
1492 {
1493         x86_perf_event_update(event);
1494         /*
1495          * For a checkpointed counter always reset back to 0.  This
1496          * avoids a situation where the counter overflows, aborts the
1497          * transaction and is then set back to shortly before the
1498          * overflow, and overflows and aborts again.
1499          */
1500         if (unlikely(event_is_checkpointed(event))) {
1501                 /* No race with NMIs because the counter should not be armed */
1502                 wrmsrl(event->hw.event_base, 0);
1503                 local64_set(&event->hw.prev_count, 0);
1504         }
1505         return x86_perf_event_set_period(event);
1506 }
1507
1508 static void intel_pmu_reset(void)
1509 {
1510         struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
1511         unsigned long flags;
1512         int idx;
1513
1514         if (!x86_pmu.num_counters)
1515                 return;
1516
1517         local_irq_save(flags);
1518
1519         pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1520
1521         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1522                 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
1523                 wrmsrl_safe(x86_pmu_event_addr(idx),  0ull);
1524         }
1525         for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
1526                 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1527
1528         if (ds)
1529                 ds->bts_index = ds->bts_buffer_base;
1530
1531         local_irq_restore(flags);
1532 }
1533
1534 /*
1535  * This handler is triggered by the local APIC, so the APIC IRQ handling
1536  * rules apply:
1537  */
1538 static int intel_pmu_handle_irq(struct pt_regs *regs)
1539 {
1540         struct perf_sample_data data;
1541         struct cpu_hw_events *cpuc;
1542         int bit, loops;
1543         u64 status;
1544         int handled;
1545
1546         cpuc = this_cpu_ptr(&cpu_hw_events);
1547
1548         /*
1549          * No known reason to not always do late ACK,
1550          * but just in case do it opt-in.
1551          */
1552         if (!x86_pmu.late_ack)
1553                 apic_write(APIC_LVTPC, APIC_DM_NMI);
1554         intel_pmu_disable_all();
1555         handled = intel_pmu_drain_bts_buffer();
1556         handled += intel_bts_interrupt();
1557         status = intel_pmu_get_status();
1558         if (!status)
1559                 goto done;
1560
1561         loops = 0;
1562 again:
1563         intel_pmu_ack_status(status);
1564         if (++loops > 100) {
1565                 static bool warned = false;
1566                 if (!warned) {
1567                         WARN(1, "perfevents: irq loop stuck!\n");
1568                         perf_event_print_debug();
1569                         warned = true;
1570                 }
1571                 intel_pmu_reset();
1572                 goto done;
1573         }
1574
1575         inc_irq_stat(apic_perf_irqs);
1576
1577         intel_pmu_lbr_read();
1578
1579         /*
1580          * CondChgd bit 63 doesn't mean any overflow status. Ignore
1581          * and clear the bit.
1582          */
1583         if (__test_and_clear_bit(63, (unsigned long *)&status)) {
1584                 if (!status)
1585                         goto done;
1586         }
1587
1588         /*
1589          * PEBS overflow sets bit 62 in the global status register
1590          */
1591         if (__test_and_clear_bit(62, (unsigned long *)&status)) {
1592                 handled++;
1593                 x86_pmu.drain_pebs(regs);
1594         }
1595
1596         /*
1597          * Intel PT
1598          */
1599         if (__test_and_clear_bit(55, (unsigned long *)&status)) {
1600                 handled++;
1601                 intel_pt_interrupt();
1602         }
1603
1604         /*
1605          * Checkpointed counters can lead to 'spurious' PMIs because the
1606          * rollback caused by the PMI will have cleared the overflow status
1607          * bit. Therefore always force probe these counters.
1608          */
1609         status |= cpuc->intel_cp_status;
1610
1611         for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1612                 struct perf_event *event = cpuc->events[bit];
1613
1614                 handled++;
1615
1616                 if (!test_bit(bit, cpuc->active_mask))
1617                         continue;
1618
1619                 if (!intel_pmu_save_and_restart(event))
1620                         continue;
1621
1622                 perf_sample_data_init(&data, 0, event->hw.last_period);
1623
1624                 if (has_branch_stack(event))
1625                         data.br_stack = &cpuc->lbr_stack;
1626
1627                 if (perf_event_overflow(event, &data, regs))
1628                         x86_pmu_stop(event, 0);
1629         }
1630
1631         /*
1632          * Repeat if there is more work to be done:
1633          */
1634         status = intel_pmu_get_status();
1635         if (status)
1636                 goto again;
1637
1638 done:
1639         intel_pmu_enable_all(0);
1640         /*
1641          * Only unmask the NMI after the overflow counters
1642          * have been reset. This avoids spurious NMIs on
1643          * Haswell CPUs.
1644          */
1645         if (x86_pmu.late_ack)
1646                 apic_write(APIC_LVTPC, APIC_DM_NMI);
1647         return handled;
1648 }
1649
1650 static struct event_constraint *
1651 intel_bts_constraints(struct perf_event *event)
1652 {
1653         struct hw_perf_event *hwc = &event->hw;
1654         unsigned int hw_event, bts_event;
1655
1656         if (event->attr.freq)
1657                 return NULL;
1658
1659         hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1660         bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
1661
1662         if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
1663                 return &bts_constraint;
1664
1665         return NULL;
1666 }
1667
1668 static int intel_alt_er(int idx)
1669 {
1670         if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
1671                 return idx;
1672
1673         if (idx == EXTRA_REG_RSP_0)
1674                 return EXTRA_REG_RSP_1;
1675
1676         if (idx == EXTRA_REG_RSP_1)
1677                 return EXTRA_REG_RSP_0;
1678
1679         return idx;
1680 }
1681
1682 static void intel_fixup_er(struct perf_event *event, int idx)
1683 {
1684         event->hw.extra_reg.idx = idx;
1685
1686         if (idx == EXTRA_REG_RSP_0) {
1687                 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1688                 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
1689                 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
1690         } else if (idx == EXTRA_REG_RSP_1) {
1691                 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1692                 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
1693                 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
1694         }
1695 }
1696
1697 /*
1698  * manage allocation of shared extra msr for certain events
1699  *
1700  * sharing can be:
1701  * per-cpu: to be shared between the various events on a single PMU
1702  * per-core: per-cpu + shared by HT threads
1703  */
1704 static struct event_constraint *
1705 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
1706                                    struct perf_event *event,
1707                                    struct hw_perf_event_extra *reg)
1708 {
1709         struct event_constraint *c = &emptyconstraint;
1710         struct er_account *era;
1711         unsigned long flags;
1712         int idx = reg->idx;
1713
1714         /*
1715          * reg->alloc can be set due to existing state, so for fake cpuc we
1716          * need to ignore this, otherwise we might fail to allocate proper fake
1717          * state for this extra reg constraint. Also see the comment below.
1718          */
1719         if (reg->alloc && !cpuc->is_fake)
1720                 return NULL; /* call x86_get_event_constraint() */
1721
1722 again:
1723         era = &cpuc->shared_regs->regs[idx];
1724         /*
1725          * we use spin_lock_irqsave() to avoid lockdep issues when
1726          * passing a fake cpuc
1727          */
1728         raw_spin_lock_irqsave(&era->lock, flags);
1729
1730         if (!atomic_read(&era->ref) || era->config == reg->config) {
1731
1732                 /*
1733                  * If its a fake cpuc -- as per validate_{group,event}() we
1734                  * shouldn't touch event state and we can avoid doing so
1735                  * since both will only call get_event_constraints() once
1736                  * on each event, this avoids the need for reg->alloc.
1737                  *
1738                  * Not doing the ER fixup will only result in era->reg being
1739                  * wrong, but since we won't actually try and program hardware
1740                  * this isn't a problem either.
1741                  */
1742                 if (!cpuc->is_fake) {
1743                         if (idx != reg->idx)
1744                                 intel_fixup_er(event, idx);
1745
1746                         /*
1747                          * x86_schedule_events() can call get_event_constraints()
1748                          * multiple times on events in the case of incremental
1749                          * scheduling(). reg->alloc ensures we only do the ER
1750                          * allocation once.
1751                          */
1752                         reg->alloc = 1;
1753                 }
1754
1755                 /* lock in msr value */
1756                 era->config = reg->config;
1757                 era->reg = reg->reg;
1758
1759                 /* one more user */
1760                 atomic_inc(&era->ref);
1761
1762                 /*
1763                  * need to call x86_get_event_constraint()
1764                  * to check if associated event has constraints
1765                  */
1766                 c = NULL;
1767         } else {
1768                 idx = intel_alt_er(idx);
1769                 if (idx != reg->idx) {
1770                         raw_spin_unlock_irqrestore(&era->lock, flags);
1771                         goto again;
1772                 }
1773         }
1774         raw_spin_unlock_irqrestore(&era->lock, flags);
1775
1776         return c;
1777 }
1778
1779 static void
1780 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
1781                                    struct hw_perf_event_extra *reg)
1782 {
1783         struct er_account *era;
1784
1785         /*
1786          * Only put constraint if extra reg was actually allocated. Also takes
1787          * care of event which do not use an extra shared reg.
1788          *
1789          * Also, if this is a fake cpuc we shouldn't touch any event state
1790          * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1791          * either since it'll be thrown out.
1792          */
1793         if (!reg->alloc || cpuc->is_fake)
1794                 return;
1795
1796         era = &cpuc->shared_regs->regs[reg->idx];
1797
1798         /* one fewer user */
1799         atomic_dec(&era->ref);
1800
1801         /* allocate again next time */
1802         reg->alloc = 0;
1803 }
1804
1805 static struct event_constraint *
1806 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
1807                               struct perf_event *event)
1808 {
1809         struct event_constraint *c = NULL, *d;
1810         struct hw_perf_event_extra *xreg, *breg;
1811
1812         xreg = &event->hw.extra_reg;
1813         if (xreg->idx != EXTRA_REG_NONE) {
1814                 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
1815                 if (c == &emptyconstraint)
1816                         return c;
1817         }
1818         breg = &event->hw.branch_reg;
1819         if (breg->idx != EXTRA_REG_NONE) {
1820                 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
1821                 if (d == &emptyconstraint) {
1822                         __intel_shared_reg_put_constraints(cpuc, xreg);
1823                         c = d;
1824                 }
1825         }
1826         return c;
1827 }
1828
1829 struct event_constraint *
1830 x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1831 {
1832         struct event_constraint *c;
1833
1834         if (x86_pmu.event_constraints) {
1835                 for_each_event_constraint(c, x86_pmu.event_constraints) {
1836                         if ((event->hw.config & c->cmask) == c->code) {
1837                                 event->hw.flags |= c->flags;
1838                                 return c;
1839                         }
1840                 }
1841         }
1842
1843         return &unconstrained;
1844 }
1845
1846 static struct event_constraint *
1847 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1848 {
1849         struct event_constraint *c;
1850
1851         c = intel_bts_constraints(event);
1852         if (c)
1853                 return c;
1854
1855         c = intel_pebs_constraints(event);
1856         if (c)
1857                 return c;
1858
1859         c = intel_shared_regs_constraints(cpuc, event);
1860         if (c)
1861                 return c;
1862
1863         return x86_get_event_constraints(cpuc, event);
1864 }
1865
1866 static void
1867 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
1868                                         struct perf_event *event)
1869 {
1870         struct hw_perf_event_extra *reg;
1871
1872         reg = &event->hw.extra_reg;
1873         if (reg->idx != EXTRA_REG_NONE)
1874                 __intel_shared_reg_put_constraints(cpuc, reg);
1875
1876         reg = &event->hw.branch_reg;
1877         if (reg->idx != EXTRA_REG_NONE)
1878                 __intel_shared_reg_put_constraints(cpuc, reg);
1879 }
1880
1881 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
1882                                         struct perf_event *event)
1883 {
1884         intel_put_shared_regs_event_constraints(cpuc, event);
1885 }
1886
1887 static void intel_pebs_aliases_core2(struct perf_event *event)
1888 {
1889         if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1890                 /*
1891                  * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1892                  * (0x003c) so that we can use it with PEBS.
1893                  *
1894                  * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1895                  * PEBS capable. However we can use INST_RETIRED.ANY_P
1896                  * (0x00c0), which is a PEBS capable event, to get the same
1897                  * count.
1898                  *
1899                  * INST_RETIRED.ANY_P counts the number of cycles that retires
1900                  * CNTMASK instructions. By setting CNTMASK to a value (16)
1901                  * larger than the maximum number of instructions that can be
1902                  * retired per cycle (4) and then inverting the condition, we
1903                  * count all cycles that retire 16 or less instructions, which
1904                  * is every cycle.
1905                  *
1906                  * Thereby we gain a PEBS capable cycle counter.
1907                  */
1908                 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
1909
1910                 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1911                 event->hw.config = alt_config;
1912         }
1913 }
1914
1915 static void intel_pebs_aliases_snb(struct perf_event *event)
1916 {
1917         if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1918                 /*
1919                  * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1920                  * (0x003c) so that we can use it with PEBS.
1921                  *
1922                  * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1923                  * PEBS capable. However we can use UOPS_RETIRED.ALL
1924                  * (0x01c2), which is a PEBS capable event, to get the same
1925                  * count.
1926                  *
1927                  * UOPS_RETIRED.ALL counts the number of cycles that retires
1928                  * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1929                  * larger than the maximum number of micro-ops that can be
1930                  * retired per cycle (4) and then inverting the condition, we
1931                  * count all cycles that retire 16 or less micro-ops, which
1932                  * is every cycle.
1933                  *
1934                  * Thereby we gain a PEBS capable cycle counter.
1935                  */
1936                 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
1937
1938                 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1939                 event->hw.config = alt_config;
1940         }
1941 }
1942
1943 static int intel_pmu_hw_config(struct perf_event *event)
1944 {
1945         int ret = x86_pmu_hw_config(event);
1946
1947         if (ret)
1948                 return ret;
1949
1950         if (event->attr.precise_ip && x86_pmu.pebs_aliases)
1951                 x86_pmu.pebs_aliases(event);
1952
1953         if (needs_branch_stack(event)) {
1954                 ret = intel_pmu_setup_lbr_filter(event);
1955                 if (ret)
1956                         return ret;
1957
1958                 /*
1959                  * BTS is set up earlier in this path, so don't account twice
1960                  */
1961                 if (!intel_pmu_has_bts(event)) {
1962                         /* disallow lbr if conflicting events are present */
1963                         if (x86_add_exclusive(x86_lbr_exclusive_lbr))
1964                                 return -EBUSY;
1965
1966                         event->destroy = hw_perf_lbr_event_destroy;
1967                 }
1968         }
1969
1970         if (event->attr.type != PERF_TYPE_RAW)
1971                 return 0;
1972
1973         if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
1974                 return 0;
1975
1976         if (x86_pmu.version < 3)
1977                 return -EINVAL;
1978
1979         if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1980                 return -EACCES;
1981
1982         event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
1983
1984         return 0;
1985 }
1986
1987 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
1988 {
1989         if (x86_pmu.guest_get_msrs)
1990                 return x86_pmu.guest_get_msrs(nr);
1991         *nr = 0;
1992         return NULL;
1993 }
1994 EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
1995
1996 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
1997 {
1998         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1999         struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
2000
2001         arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
2002         arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
2003         arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
2004         /*
2005          * If PMU counter has PEBS enabled it is not enough to disable counter
2006          * on a guest entry since PEBS memory write can overshoot guest entry
2007          * and corrupt guest memory. Disabling PEBS solves the problem.
2008          */
2009         arr[1].msr = MSR_IA32_PEBS_ENABLE;
2010         arr[1].host = cpuc->pebs_enabled;
2011         arr[1].guest = 0;
2012
2013         *nr = 2;
2014         return arr;
2015 }
2016
2017 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
2018 {
2019         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2020         struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
2021         int idx;
2022
2023         for (idx = 0; idx < x86_pmu.num_counters; idx++)  {
2024                 struct perf_event *event = cpuc->events[idx];
2025
2026                 arr[idx].msr = x86_pmu_config_addr(idx);
2027                 arr[idx].host = arr[idx].guest = 0;
2028
2029                 if (!test_bit(idx, cpuc->active_mask))
2030                         continue;
2031
2032                 arr[idx].host = arr[idx].guest =
2033                         event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
2034
2035                 if (event->attr.exclude_host)
2036                         arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
2037                 else if (event->attr.exclude_guest)
2038                         arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
2039         }
2040
2041         *nr = x86_pmu.num_counters;
2042         return arr;
2043 }
2044
2045 static void core_pmu_enable_event(struct perf_event *event)
2046 {
2047         if (!event->attr.exclude_host)
2048                 x86_pmu_enable_event(event);
2049 }
2050
2051 static void core_pmu_enable_all(int added)
2052 {
2053         struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2054         int idx;
2055
2056         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
2057                 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
2058
2059                 if (!test_bit(idx, cpuc->active_mask) ||
2060                                 cpuc->events[idx]->attr.exclude_host)
2061                         continue;
2062
2063                 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2064         }
2065 }
2066
2067 static int hsw_hw_config(struct perf_event *event)
2068 {
2069         int ret = intel_pmu_hw_config(event);
2070
2071         if (ret)
2072                 return ret;
2073         if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
2074                 return 0;
2075         event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
2076
2077         /*
2078          * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
2079          * PEBS or in ANY thread mode. Since the results are non-sensical forbid
2080          * this combination.
2081          */
2082         if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
2083              ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
2084               event->attr.precise_ip > 0))
2085                 return -EOPNOTSUPP;
2086
2087         if (event_is_checkpointed(event)) {
2088                 /*
2089                  * Sampling of checkpointed events can cause situations where
2090                  * the CPU constantly aborts because of a overflow, which is
2091                  * then checkpointed back and ignored. Forbid checkpointing
2092                  * for sampling.
2093                  *
2094                  * But still allow a long sampling period, so that perf stat
2095                  * from KVM works.
2096                  */
2097                 if (event->attr.sample_period > 0 &&
2098                     event->attr.sample_period < 0x7fffffff)
2099                         return -EOPNOTSUPP;
2100         }
2101         return 0;
2102 }
2103
2104 static struct event_constraint counter2_constraint =
2105                         EVENT_CONSTRAINT(0, 0x4, 0);
2106
2107 static struct event_constraint *
2108 hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2109 {
2110         struct event_constraint *c = intel_get_event_constraints(cpuc, event);
2111
2112         /* Handle special quirk on in_tx_checkpointed only in counter 2 */
2113         if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
2114                 if (c->idxmsk64 & (1U << 2))
2115                         return &counter2_constraint;
2116                 return &emptyconstraint;
2117         }
2118
2119         return c;
2120 }
2121
2122 /*
2123  * Broadwell:
2124  *
2125  * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
2126  * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
2127  * the two to enforce a minimum period of 128 (the smallest value that has bits
2128  * 0-5 cleared and >= 100).
2129  *
2130  * Because of how the code in x86_perf_event_set_period() works, the truncation
2131  * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
2132  * to make up for the 'lost' events due to carrying the 'error' in period_left.
2133  *
2134  * Therefore the effective (average) period matches the requested period,
2135  * despite coarser hardware granularity.
2136  */
2137 static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
2138 {
2139         if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
2140                         X86_CONFIG(.event=0xc0, .umask=0x01)) {
2141                 if (left < 128)
2142                         left = 128;
2143                 left &= ~0x3fu;
2144         }
2145         return left;
2146 }
2147
2148 PMU_FORMAT_ATTR(event,  "config:0-7"    );
2149 PMU_FORMAT_ATTR(umask,  "config:8-15"   );
2150 PMU_FORMAT_ATTR(edge,   "config:18"     );
2151 PMU_FORMAT_ATTR(pc,     "config:19"     );
2152 PMU_FORMAT_ATTR(any,    "config:21"     ); /* v3 + */
2153 PMU_FORMAT_ATTR(inv,    "config:23"     );
2154 PMU_FORMAT_ATTR(cmask,  "config:24-31"  );
2155 PMU_FORMAT_ATTR(in_tx,  "config:32");
2156 PMU_FORMAT_ATTR(in_tx_cp, "config:33");
2157
2158 static struct attribute *intel_arch_formats_attr[] = {
2159         &format_attr_event.attr,
2160         &format_attr_umask.attr,
2161         &format_attr_edge.attr,
2162         &format_attr_pc.attr,
2163         &format_attr_inv.attr,
2164         &format_attr_cmask.attr,
2165         NULL,
2166 };
2167
2168 ssize_t intel_event_sysfs_show(char *page, u64 config)
2169 {
2170         u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
2171
2172         return x86_event_sysfs_show(page, config, event);
2173 }
2174
2175 static __initconst const struct x86_pmu core_pmu = {
2176         .name                   = "core",
2177         .handle_irq             = x86_pmu_handle_irq,
2178         .disable_all            = x86_pmu_disable_all,
2179         .enable_all             = core_pmu_enable_all,
2180         .enable                 = core_pmu_enable_event,
2181         .disable                = x86_pmu_disable_event,
2182         .hw_config              = x86_pmu_hw_config,
2183         .schedule_events        = x86_schedule_events,
2184         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
2185         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
2186         .event_map              = intel_pmu_event_map,
2187         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
2188         .apic                   = 1,
2189         /*
2190          * Intel PMCs cannot be accessed sanely above 32 bit width,
2191          * so we install an artificial 1<<31 period regardless of
2192          * the generic event period:
2193          */
2194         .max_period             = (1ULL << 31) - 1,
2195         .get_event_constraints  = intel_get_event_constraints,
2196         .put_event_constraints  = intel_put_event_constraints,
2197         .event_constraints      = intel_core_event_constraints,
2198         .guest_get_msrs         = core_guest_get_msrs,
2199         .format_attrs           = intel_arch_formats_attr,
2200         .events_sysfs_show      = intel_event_sysfs_show,
2201 };
2202
2203 struct intel_shared_regs *allocate_shared_regs(int cpu)
2204 {
2205         struct intel_shared_regs *regs;
2206         int i;
2207
2208         regs = kzalloc_node(sizeof(struct intel_shared_regs),
2209                             GFP_KERNEL, cpu_to_node(cpu));
2210         if (regs) {
2211                 /*
2212                  * initialize the locks to keep lockdep happy
2213                  */
2214                 for (i = 0; i < EXTRA_REG_MAX; i++)
2215                         raw_spin_lock_init(&regs->regs[i].lock);
2216
2217                 regs->core_id = -1;
2218         }
2219         return regs;
2220 }
2221
2222 static int intel_pmu_cpu_prepare(int cpu)
2223 {
2224         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2225
2226         if (!(x86_pmu.extra_regs || x86_pmu.lbr_sel_map))
2227                 return NOTIFY_OK;
2228
2229         cpuc->shared_regs = allocate_shared_regs(cpu);
2230         if (!cpuc->shared_regs)
2231                 return NOTIFY_BAD;
2232
2233         return NOTIFY_OK;
2234 }
2235
2236 static void intel_pmu_cpu_starting(int cpu)
2237 {
2238         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2239         int core_id = topology_core_id(cpu);
2240         int i;
2241
2242         init_debug_store_on_cpu(cpu);
2243         /*
2244          * Deal with CPUs that don't clear their LBRs on power-up.
2245          */
2246         intel_pmu_lbr_reset();
2247
2248         cpuc->lbr_sel = NULL;
2249
2250         if (!cpuc->shared_regs)
2251                 return;
2252
2253         if (!(x86_pmu.er_flags & ERF_NO_HT_SHARING)) {
2254                 for_each_cpu(i, topology_thread_cpumask(cpu)) {
2255                         struct intel_shared_regs *pc;
2256
2257                         pc = per_cpu(cpu_hw_events, i).shared_regs;
2258                         if (pc && pc->core_id == core_id) {
2259                                 cpuc->kfree_on_online = cpuc->shared_regs;
2260                                 cpuc->shared_regs = pc;
2261                                 break;
2262                         }
2263                 }
2264                 cpuc->shared_regs->core_id = core_id;
2265                 cpuc->shared_regs->refcnt++;
2266         }
2267
2268         if (x86_pmu.lbr_sel_map)
2269                 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
2270 }
2271
2272 static void intel_pmu_cpu_dying(int cpu)
2273 {
2274         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2275         struct intel_shared_regs *pc;
2276
2277         pc = cpuc->shared_regs;
2278         if (pc) {
2279                 if (pc->core_id == -1 || --pc->refcnt == 0)
2280                         kfree(pc);
2281                 cpuc->shared_regs = NULL;
2282         }
2283
2284         fini_debug_store_on_cpu(cpu);
2285 }
2286
2287 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
2288
2289 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
2290
2291 static struct attribute *intel_arch3_formats_attr[] = {
2292         &format_attr_event.attr,
2293         &format_attr_umask.attr,
2294         &format_attr_edge.attr,
2295         &format_attr_pc.attr,
2296         &format_attr_any.attr,
2297         &format_attr_inv.attr,
2298         &format_attr_cmask.attr,
2299         &format_attr_in_tx.attr,
2300         &format_attr_in_tx_cp.attr,
2301
2302         &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
2303         &format_attr_ldlat.attr, /* PEBS load latency */
2304         NULL,
2305 };
2306
2307 static __initconst const struct x86_pmu intel_pmu = {
2308         .name                   = "Intel",
2309         .handle_irq             = intel_pmu_handle_irq,
2310         .disable_all            = intel_pmu_disable_all,
2311         .enable_all             = intel_pmu_enable_all,
2312         .enable                 = intel_pmu_enable_event,
2313         .disable                = intel_pmu_disable_event,
2314         .hw_config              = intel_pmu_hw_config,
2315         .schedule_events        = x86_schedule_events,
2316         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
2317         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
2318         .event_map              = intel_pmu_event_map,
2319         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
2320         .apic                   = 1,
2321         /*
2322          * Intel PMCs cannot be accessed sanely above 32 bit width,
2323          * so we install an artificial 1<<31 period regardless of
2324          * the generic event period:
2325          */
2326         .max_period             = (1ULL << 31) - 1,
2327         .get_event_constraints  = intel_get_event_constraints,
2328         .put_event_constraints  = intel_put_event_constraints,
2329         .pebs_aliases           = intel_pebs_aliases_core2,
2330
2331         .format_attrs           = intel_arch3_formats_attr,
2332         .events_sysfs_show      = intel_event_sysfs_show,
2333
2334         .cpu_prepare            = intel_pmu_cpu_prepare,
2335         .cpu_starting           = intel_pmu_cpu_starting,
2336         .cpu_dying              = intel_pmu_cpu_dying,
2337         .guest_get_msrs         = intel_guest_get_msrs,
2338         .sched_task             = intel_pmu_lbr_sched_task,
2339 };
2340
2341 static __init void intel_clovertown_quirk(void)
2342 {
2343         /*
2344          * PEBS is unreliable due to:
2345          *
2346          *   AJ67  - PEBS may experience CPL leaks
2347          *   AJ68  - PEBS PMI may be delayed by one event
2348          *   AJ69  - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
2349          *   AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
2350          *
2351          * AJ67 could be worked around by restricting the OS/USR flags.
2352          * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
2353          *
2354          * AJ106 could possibly be worked around by not allowing LBR
2355          *       usage from PEBS, including the fixup.
2356          * AJ68  could possibly be worked around by always programming
2357          *       a pebs_event_reset[0] value and coping with the lost events.
2358          *
2359          * But taken together it might just make sense to not enable PEBS on
2360          * these chips.
2361          */
2362         pr_warn("PEBS disabled due to CPU errata\n");
2363         x86_pmu.pebs = 0;
2364         x86_pmu.pebs_constraints = NULL;
2365 }
2366
2367 static int intel_snb_pebs_broken(int cpu)
2368 {
2369         u32 rev = UINT_MAX; /* default to broken for unknown models */
2370
2371         switch (cpu_data(cpu).x86_model) {
2372         case 42: /* SNB */
2373                 rev = 0x28;
2374                 break;
2375
2376         case 45: /* SNB-EP */
2377                 switch (cpu_data(cpu).x86_mask) {
2378                 case 6: rev = 0x618; break;
2379                 case 7: rev = 0x70c; break;
2380                 }
2381         }
2382
2383         return (cpu_data(cpu).microcode < rev);
2384 }
2385
2386 static void intel_snb_check_microcode(void)
2387 {
2388         int pebs_broken = 0;
2389         int cpu;
2390
2391         get_online_cpus();
2392         for_each_online_cpu(cpu) {
2393                 if ((pebs_broken = intel_snb_pebs_broken(cpu)))
2394                         break;
2395         }
2396         put_online_cpus();
2397
2398         if (pebs_broken == x86_pmu.pebs_broken)
2399                 return;
2400
2401         /*
2402          * Serialized by the microcode lock..
2403          */
2404         if (x86_pmu.pebs_broken) {
2405                 pr_info("PEBS enabled due to microcode update\n");
2406                 x86_pmu.pebs_broken = 0;
2407         } else {
2408                 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
2409                 x86_pmu.pebs_broken = 1;
2410         }
2411 }
2412
2413 /*
2414  * Under certain circumstances, access certain MSR may cause #GP.
2415  * The function tests if the input MSR can be safely accessed.
2416  */
2417 static bool check_msr(unsigned long msr, u64 mask)
2418 {
2419         u64 val_old, val_new, val_tmp;
2420
2421         /*
2422          * Read the current value, change it and read it back to see if it
2423          * matches, this is needed to detect certain hardware emulators
2424          * (qemu/kvm) that don't trap on the MSR access and always return 0s.
2425          */
2426         if (rdmsrl_safe(msr, &val_old))
2427                 return false;
2428
2429         /*
2430          * Only change the bits which can be updated by wrmsrl.
2431          */
2432         val_tmp = val_old ^ mask;
2433         if (wrmsrl_safe(msr, val_tmp) ||
2434             rdmsrl_safe(msr, &val_new))
2435                 return false;
2436
2437         if (val_new != val_tmp)
2438                 return false;
2439
2440         /* Here it's sure that the MSR can be safely accessed.
2441          * Restore the old value and return.
2442          */
2443         wrmsrl(msr, val_old);
2444
2445         return true;
2446 }
2447
2448 static __init void intel_sandybridge_quirk(void)
2449 {
2450         x86_pmu.check_microcode = intel_snb_check_microcode;
2451         intel_snb_check_microcode();
2452 }
2453
2454 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
2455         { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
2456         { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
2457         { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
2458         { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
2459         { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
2460         { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
2461         { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
2462 };
2463
2464 static __init void intel_arch_events_quirk(void)
2465 {
2466         int bit;
2467
2468         /* disable event that reported as not presend by cpuid */
2469         for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
2470                 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
2471                 pr_warn("CPUID marked event: \'%s\' unavailable\n",
2472                         intel_arch_events_map[bit].name);
2473         }
2474 }
2475
2476 static __init void intel_nehalem_quirk(void)
2477 {
2478         union cpuid10_ebx ebx;
2479
2480         ebx.full = x86_pmu.events_maskl;
2481         if (ebx.split.no_branch_misses_retired) {
2482                 /*
2483                  * Erratum AAJ80 detected, we work it around by using
2484                  * the BR_MISP_EXEC.ANY event. This will over-count
2485                  * branch-misses, but it's still much better than the
2486                  * architectural event which is often completely bogus:
2487                  */
2488                 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
2489                 ebx.split.no_branch_misses_retired = 0;
2490                 x86_pmu.events_maskl = ebx.full;
2491                 pr_info("CPU erratum AAJ80 worked around\n");
2492         }
2493 }
2494
2495 EVENT_ATTR_STR(mem-loads,       mem_ld_hsw,     "event=0xcd,umask=0x1,ldlat=3");
2496 EVENT_ATTR_STR(mem-stores,      mem_st_hsw,     "event=0xd0,umask=0x82")
2497
2498 /* Haswell special events */
2499 EVENT_ATTR_STR(tx-start,        tx_start,       "event=0xc9,umask=0x1");
2500 EVENT_ATTR_STR(tx-commit,       tx_commit,      "event=0xc9,umask=0x2");
2501 EVENT_ATTR_STR(tx-abort,        tx_abort,       "event=0xc9,umask=0x4");
2502 EVENT_ATTR_STR(tx-capacity,     tx_capacity,    "event=0x54,umask=0x2");
2503 EVENT_ATTR_STR(tx-conflict,     tx_conflict,    "event=0x54,umask=0x1");
2504 EVENT_ATTR_STR(el-start,        el_start,       "event=0xc8,umask=0x1");
2505 EVENT_ATTR_STR(el-commit,       el_commit,      "event=0xc8,umask=0x2");
2506 EVENT_ATTR_STR(el-abort,        el_abort,       "event=0xc8,umask=0x4");
2507 EVENT_ATTR_STR(el-capacity,     el_capacity,    "event=0x54,umask=0x2");
2508 EVENT_ATTR_STR(el-conflict,     el_conflict,    "event=0x54,umask=0x1");
2509 EVENT_ATTR_STR(cycles-t,        cycles_t,       "event=0x3c,in_tx=1");
2510 EVENT_ATTR_STR(cycles-ct,       cycles_ct,      "event=0x3c,in_tx=1,in_tx_cp=1");
2511
2512 static struct attribute *hsw_events_attrs[] = {
2513         EVENT_PTR(tx_start),
2514         EVENT_PTR(tx_commit),
2515         EVENT_PTR(tx_abort),
2516         EVENT_PTR(tx_capacity),
2517         EVENT_PTR(tx_conflict),
2518         EVENT_PTR(el_start),
2519         EVENT_PTR(el_commit),
2520         EVENT_PTR(el_abort),
2521         EVENT_PTR(el_capacity),
2522         EVENT_PTR(el_conflict),
2523         EVENT_PTR(cycles_t),
2524         EVENT_PTR(cycles_ct),
2525         EVENT_PTR(mem_ld_hsw),
2526         EVENT_PTR(mem_st_hsw),
2527         NULL
2528 };
2529
2530 __init int intel_pmu_init(void)
2531 {
2532         union cpuid10_edx edx;
2533         union cpuid10_eax eax;
2534         union cpuid10_ebx ebx;
2535         struct event_constraint *c;
2536         unsigned int unused;
2537         struct extra_reg *er;
2538         int version, i;
2539
2540         if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2541                 switch (boot_cpu_data.x86) {
2542                 case 0x6:
2543                         return p6_pmu_init();
2544                 case 0xb:
2545                         return knc_pmu_init();
2546                 case 0xf:
2547                         return p4_pmu_init();
2548                 }
2549                 return -ENODEV;
2550         }
2551
2552         /*
2553          * Check whether the Architectural PerfMon supports
2554          * Branch Misses Retired hw_event or not.
2555          */
2556         cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
2557         if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
2558                 return -ENODEV;
2559
2560         version = eax.split.version_id;
2561         if (version < 2)
2562                 x86_pmu = core_pmu;
2563         else
2564                 x86_pmu = intel_pmu;
2565
2566         x86_pmu.version                 = version;
2567         x86_pmu.num_counters            = eax.split.num_counters;
2568         x86_pmu.cntval_bits             = eax.split.bit_width;
2569         x86_pmu.cntval_mask             = (1ULL << eax.split.bit_width) - 1;
2570
2571         x86_pmu.events_maskl            = ebx.full;
2572         x86_pmu.events_mask_len         = eax.split.mask_length;
2573
2574         x86_pmu.max_pebs_events         = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
2575
2576         /*
2577          * Quirk: v2 perfmon does not report fixed-purpose events, so
2578          * assume at least 3 events:
2579          */
2580         if (version > 1)
2581                 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
2582
2583         if (boot_cpu_has(X86_FEATURE_PDCM)) {
2584                 u64 capabilities;
2585
2586                 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
2587                 x86_pmu.intel_cap.capabilities = capabilities;
2588         }
2589
2590         intel_ds_init();
2591
2592         x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
2593
2594         /*
2595          * Install the hw-cache-events table:
2596          */
2597         switch (boot_cpu_data.x86_model) {
2598         case 14: /* 65nm Core "Yonah" */
2599                 pr_cont("Core events, ");
2600                 break;
2601
2602         case 15: /* 65nm Core2 "Merom"          */
2603                 x86_add_quirk(intel_clovertown_quirk);
2604         case 22: /* 65nm Core2 "Merom-L"        */
2605         case 23: /* 45nm Core2 "Penryn"         */
2606         case 29: /* 45nm Core2 "Dunnington (MP) */
2607                 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2608                        sizeof(hw_cache_event_ids));
2609
2610                 intel_pmu_lbr_init_core();
2611
2612                 x86_pmu.event_constraints = intel_core2_event_constraints;
2613                 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
2614                 pr_cont("Core2 events, ");
2615                 break;
2616
2617         case 30: /* 45nm Nehalem    */
2618         case 26: /* 45nm Nehalem-EP */
2619         case 46: /* 45nm Nehalem-EX */
2620                 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2621                        sizeof(hw_cache_event_ids));
2622                 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
2623                        sizeof(hw_cache_extra_regs));
2624
2625                 intel_pmu_lbr_init_nhm();
2626
2627                 x86_pmu.event_constraints = intel_nehalem_event_constraints;
2628                 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
2629                 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
2630                 x86_pmu.extra_regs = intel_nehalem_extra_regs;
2631
2632                 x86_pmu.cpu_events = nhm_events_attrs;
2633
2634                 /* UOPS_ISSUED.STALLED_CYCLES */
2635                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2636                         X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2637                 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2638                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2639                         X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
2640
2641                 x86_add_quirk(intel_nehalem_quirk);
2642
2643                 pr_cont("Nehalem events, ");
2644                 break;
2645
2646         case 28: /* 45nm Atom "Pineview"   */
2647         case 38: /* 45nm Atom "Lincroft"   */
2648         case 39: /* 32nm Atom "Penwell"    */
2649         case 53: /* 32nm Atom "Cloverview" */
2650         case 54: /* 32nm Atom "Cedarview"  */
2651                 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2652                        sizeof(hw_cache_event_ids));
2653
2654                 intel_pmu_lbr_init_atom();
2655
2656                 x86_pmu.event_constraints = intel_gen_event_constraints;
2657                 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
2658                 pr_cont("Atom events, ");
2659                 break;
2660
2661         case 55: /* 22nm Atom "Silvermont"                */
2662         case 76: /* 14nm Atom "Airmont"                   */
2663         case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
2664                 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
2665                         sizeof(hw_cache_event_ids));
2666                 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
2667                        sizeof(hw_cache_extra_regs));
2668
2669                 intel_pmu_lbr_init_atom();
2670
2671                 x86_pmu.event_constraints = intel_slm_event_constraints;
2672                 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
2673                 x86_pmu.extra_regs = intel_slm_extra_regs;
2674                 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2675                 pr_cont("Silvermont events, ");
2676                 break;
2677
2678         case 37: /* 32nm Westmere    */
2679         case 44: /* 32nm Westmere-EP */
2680         case 47: /* 32nm Westmere-EX */
2681                 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
2682                        sizeof(hw_cache_event_ids));
2683                 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
2684                        sizeof(hw_cache_extra_regs));
2685
2686                 intel_pmu_lbr_init_nhm();
2687
2688                 x86_pmu.event_constraints = intel_westmere_event_constraints;
2689                 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
2690                 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
2691                 x86_pmu.extra_regs = intel_westmere_extra_regs;
2692                 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2693
2694                 x86_pmu.cpu_events = nhm_events_attrs;
2695
2696                 /* UOPS_ISSUED.STALLED_CYCLES */
2697                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2698                         X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2699                 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2700                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2701                         X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
2702
2703                 pr_cont("Westmere events, ");
2704                 break;
2705
2706         case 42: /* 32nm SandyBridge         */
2707         case 45: /* 32nm SandyBridge-E/EN/EP */
2708                 x86_add_quirk(intel_sandybridge_quirk);
2709                 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2710                        sizeof(hw_cache_event_ids));
2711                 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2712                        sizeof(hw_cache_extra_regs));
2713
2714                 intel_pmu_lbr_init_snb();
2715
2716                 x86_pmu.event_constraints = intel_snb_event_constraints;
2717                 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
2718                 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2719                 if (boot_cpu_data.x86_model == 45)
2720                         x86_pmu.extra_regs = intel_snbep_extra_regs;
2721                 else
2722                         x86_pmu.extra_regs = intel_snb_extra_regs;
2723                 /* all extra regs are per-cpu when HT is on */
2724                 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2725                 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2726
2727                 x86_pmu.cpu_events = snb_events_attrs;
2728
2729                 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2730                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2731                         X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2732                 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
2733                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2734                         X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
2735
2736                 pr_cont("SandyBridge events, ");
2737                 break;
2738
2739         case 58: /* 22nm IvyBridge       */
2740         case 62: /* 22nm IvyBridge-EP/EX */
2741                 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2742                        sizeof(hw_cache_event_ids));
2743                 /* dTLB-load-misses on IVB is different than SNB */
2744                 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
2745
2746                 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2747                        sizeof(hw_cache_extra_regs));
2748
2749                 intel_pmu_lbr_init_snb();
2750
2751                 x86_pmu.event_constraints = intel_ivb_event_constraints;
2752                 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
2753                 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2754                 if (boot_cpu_data.x86_model == 62)
2755                         x86_pmu.extra_regs = intel_snbep_extra_regs;
2756                 else
2757                         x86_pmu.extra_regs = intel_snb_extra_regs;
2758                 /* all extra regs are per-cpu when HT is on */
2759                 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2760                 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2761
2762                 x86_pmu.cpu_events = snb_events_attrs;
2763
2764                 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2765                 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2766                         X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2767
2768                 pr_cont("IvyBridge events, ");
2769                 break;
2770
2771
2772         case 60: /* 22nm Haswell Core */
2773         case 63: /* 22nm Haswell Server */
2774         case 69: /* 22nm Haswell ULT */
2775         case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
2776                 x86_pmu.late_ack = true;
2777                 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
2778                 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
2779
2780                 intel_pmu_lbr_init_hsw();
2781
2782                 x86_pmu.event_constraints = intel_hsw_event_constraints;
2783                 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
2784                 x86_pmu.extra_regs = intel_snbep_extra_regs;
2785                 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2786                 /* all extra regs are per-cpu when HT is on */
2787                 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2788                 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2789
2790                 x86_pmu.hw_config = hsw_hw_config;
2791                 x86_pmu.get_event_constraints = hsw_get_event_constraints;
2792                 x86_pmu.cpu_events = hsw_events_attrs;
2793                 x86_pmu.lbr_double_abort = true;
2794                 pr_cont("Haswell events, ");
2795                 break;
2796
2797         case 61: /* 14nm Broadwell Core-M */
2798         case 86: /* 14nm Broadwell Xeon D */
2799                 x86_pmu.late_ack = true;
2800                 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
2801                 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
2802
2803                 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
2804                 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
2805                                                                          BDW_L3_MISS|HSW_SNOOP_DRAM;
2806                 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
2807                                                                           HSW_SNOOP_DRAM;
2808                 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
2809                                                                              BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
2810                 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
2811                                                                               BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
2812
2813                 intel_pmu_lbr_init_snb();
2814
2815                 x86_pmu.event_constraints = intel_bdw_event_constraints;
2816                 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
2817                 x86_pmu.extra_regs = intel_snbep_extra_regs;
2818                 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2819                 /* all extra regs are per-cpu when HT is on */
2820                 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2821                 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2822
2823                 x86_pmu.hw_config = hsw_hw_config;
2824                 x86_pmu.get_event_constraints = hsw_get_event_constraints;
2825                 x86_pmu.cpu_events = hsw_events_attrs;
2826                 x86_pmu.limit_period = bdw_limit_period;
2827                 pr_cont("Broadwell events, ");
2828                 break;
2829
2830         default:
2831                 switch (x86_pmu.version) {
2832                 case 1:
2833                         x86_pmu.event_constraints = intel_v1_event_constraints;
2834                         pr_cont("generic architected perfmon v1, ");
2835                         break;
2836                 default:
2837                         /*
2838                          * default constraints for v2 and up
2839                          */
2840                         x86_pmu.event_constraints = intel_gen_event_constraints;
2841                         pr_cont("generic architected perfmon, ");
2842                         break;
2843                 }
2844         }
2845
2846         if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
2847                 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2848                      x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
2849                 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
2850         }
2851         x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
2852
2853         if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
2854                 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2855                      x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
2856                 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
2857         }
2858
2859         x86_pmu.intel_ctrl |=
2860                 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
2861
2862         if (x86_pmu.event_constraints) {
2863                 /*
2864                  * event on fixed counter2 (REF_CYCLES) only works on this
2865                  * counter, so do not extend mask to generic counters
2866                  */
2867                 for_each_event_constraint(c, x86_pmu.event_constraints) {
2868                         if (c->cmask != FIXED_EVENT_FLAGS
2869                             || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
2870                                 continue;
2871                         }
2872
2873                         c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
2874                         c->weight += x86_pmu.num_counters;
2875                 }
2876         }
2877
2878         /*
2879          * Access LBR MSR may cause #GP under certain circumstances.
2880          * E.g. KVM doesn't support LBR MSR
2881          * Check all LBT MSR here.
2882          * Disable LBR access if any LBR MSRs can not be accessed.
2883          */
2884         if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
2885                 x86_pmu.lbr_nr = 0;
2886         for (i = 0; i < x86_pmu.lbr_nr; i++) {
2887                 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
2888                       check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
2889                         x86_pmu.lbr_nr = 0;
2890         }
2891
2892         /*
2893          * Access extra MSR may cause #GP under certain circumstances.
2894          * E.g. KVM doesn't support offcore event
2895          * Check all extra_regs here.
2896          */
2897         if (x86_pmu.extra_regs) {
2898                 for (er = x86_pmu.extra_regs; er->msr; er++) {
2899                         er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
2900                         /* Disable LBR select mapping */
2901                         if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
2902                                 x86_pmu.lbr_sel_map = NULL;
2903                 }
2904         }
2905
2906         /* Support full width counters using alternative MSR range */
2907         if (x86_pmu.intel_cap.full_width_write) {
2908                 x86_pmu.max_period = x86_pmu.cntval_mask;
2909                 x86_pmu.perfctr = MSR_IA32_PMC0;
2910                 pr_cont("full-width counters, ");
2911         }
2912
2913         return 0;
2914 }