Commit | Line | Data |
---|---|---|
f22f54f4 PZ |
1 | #ifdef CONFIG_CPU_SUP_INTEL |
2 | ||
3 | /* | |
b622d644 | 4 | * Intel PerfMon, used on Core and later. |
f22f54f4 PZ |
5 | */ |
6 | static const u64 intel_perfmon_event_map[] = | |
7 | { | |
8 | [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, | |
9 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | |
10 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e, | |
11 | [PERF_COUNT_HW_CACHE_MISSES] = 0x412e, | |
12 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, | |
13 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, | |
14 | [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, | |
15 | }; | |
16 | ||
17 | static struct event_constraint intel_core_event_constraints[] = | |
18 | { | |
19 | INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ | |
20 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | |
21 | INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ | |
22 | INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ | |
23 | INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ | |
24 | INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */ | |
25 | EVENT_CONSTRAINT_END | |
26 | }; | |
27 | ||
28 | static struct event_constraint intel_core2_event_constraints[] = | |
29 | { | |
b622d644 PZ |
30 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
31 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | |
32 | /* | |
33 | * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event | |
34 | * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed | |
35 | * ratio between these counters. | |
36 | */ | |
37 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | |
f22f54f4 PZ |
38 | INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ |
39 | INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ | |
40 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | |
41 | INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ | |
42 | INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ | |
43 | INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ | |
44 | INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ | |
45 | INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ | |
b622d644 | 46 | INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */ |
f22f54f4 PZ |
47 | INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ |
48 | EVENT_CONSTRAINT_END | |
49 | }; | |
50 | ||
51 | static struct event_constraint intel_nehalem_event_constraints[] = | |
52 | { | |
b622d644 PZ |
53 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
54 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | |
55 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | |
f22f54f4 PZ |
56 | INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ |
57 | INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ | |
58 | INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ | |
59 | INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */ | |
60 | INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */ | |
61 | INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */ | |
62 | INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ | |
63 | INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ | |
64 | EVENT_CONSTRAINT_END | |
65 | }; | |
66 | ||
67 | static struct event_constraint intel_westmere_event_constraints[] = | |
68 | { | |
b622d644 PZ |
69 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
70 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | |
71 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | |
f22f54f4 PZ |
72 | INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ |
73 | INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ | |
74 | INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ | |
d1100770 | 75 | INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */ |
f22f54f4 PZ |
76 | EVENT_CONSTRAINT_END |
77 | }; | |
78 | ||
79 | static struct event_constraint intel_gen_event_constraints[] = | |
80 | { | |
b622d644 PZ |
81 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
82 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | |
83 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | |
f22f54f4 PZ |
84 | EVENT_CONSTRAINT_END |
85 | }; | |
86 | ||
87 | static u64 intel_pmu_event_map(int hw_event) | |
88 | { | |
89 | return intel_perfmon_event_map[hw_event]; | |
90 | } | |
91 | ||
caaa8be3 | 92 | static __initconst const u64 westmere_hw_cache_event_ids |
f22f54f4 PZ |
93 | [PERF_COUNT_HW_CACHE_MAX] |
94 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
95 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
96 | { | |
97 | [ C(L1D) ] = { | |
98 | [ C(OP_READ) ] = { | |
99 | [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ | |
100 | [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ | |
101 | }, | |
102 | [ C(OP_WRITE) ] = { | |
103 | [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ | |
104 | [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ | |
105 | }, | |
106 | [ C(OP_PREFETCH) ] = { | |
107 | [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ | |
108 | [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ | |
109 | }, | |
110 | }, | |
111 | [ C(L1I ) ] = { | |
112 | [ C(OP_READ) ] = { | |
113 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ | |
114 | [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ | |
115 | }, | |
116 | [ C(OP_WRITE) ] = { | |
117 | [ C(RESULT_ACCESS) ] = -1, | |
118 | [ C(RESULT_MISS) ] = -1, | |
119 | }, | |
120 | [ C(OP_PREFETCH) ] = { | |
121 | [ C(RESULT_ACCESS) ] = 0x0, | |
122 | [ C(RESULT_MISS) ] = 0x0, | |
123 | }, | |
124 | }, | |
125 | [ C(LL ) ] = { | |
126 | [ C(OP_READ) ] = { | |
127 | [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ | |
128 | [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ | |
129 | }, | |
130 | [ C(OP_WRITE) ] = { | |
131 | [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ | |
132 | [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ | |
133 | }, | |
134 | [ C(OP_PREFETCH) ] = { | |
135 | [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ | |
136 | [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ | |
137 | }, | |
138 | }, | |
139 | [ C(DTLB) ] = { | |
140 | [ C(OP_READ) ] = { | |
141 | [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ | |
142 | [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ | |
143 | }, | |
144 | [ C(OP_WRITE) ] = { | |
145 | [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ | |
146 | [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ | |
147 | }, | |
148 | [ C(OP_PREFETCH) ] = { | |
149 | [ C(RESULT_ACCESS) ] = 0x0, | |
150 | [ C(RESULT_MISS) ] = 0x0, | |
151 | }, | |
152 | }, | |
153 | [ C(ITLB) ] = { | |
154 | [ C(OP_READ) ] = { | |
155 | [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ | |
156 | [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */ | |
157 | }, | |
158 | [ C(OP_WRITE) ] = { | |
159 | [ C(RESULT_ACCESS) ] = -1, | |
160 | [ C(RESULT_MISS) ] = -1, | |
161 | }, | |
162 | [ C(OP_PREFETCH) ] = { | |
163 | [ C(RESULT_ACCESS) ] = -1, | |
164 | [ C(RESULT_MISS) ] = -1, | |
165 | }, | |
166 | }, | |
167 | [ C(BPU ) ] = { | |
168 | [ C(OP_READ) ] = { | |
169 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ | |
170 | [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ | |
171 | }, | |
172 | [ C(OP_WRITE) ] = { | |
173 | [ C(RESULT_ACCESS) ] = -1, | |
174 | [ C(RESULT_MISS) ] = -1, | |
175 | }, | |
176 | [ C(OP_PREFETCH) ] = { | |
177 | [ C(RESULT_ACCESS) ] = -1, | |
178 | [ C(RESULT_MISS) ] = -1, | |
179 | }, | |
180 | }, | |
181 | }; | |
182 | ||
caaa8be3 | 183 | static __initconst const u64 nehalem_hw_cache_event_ids |
f22f54f4 PZ |
184 | [PERF_COUNT_HW_CACHE_MAX] |
185 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
186 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
187 | { | |
188 | [ C(L1D) ] = { | |
189 | [ C(OP_READ) ] = { | |
190 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ | |
191 | [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ | |
192 | }, | |
193 | [ C(OP_WRITE) ] = { | |
194 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ | |
195 | [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ | |
196 | }, | |
197 | [ C(OP_PREFETCH) ] = { | |
198 | [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ | |
199 | [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ | |
200 | }, | |
201 | }, | |
202 | [ C(L1I ) ] = { | |
203 | [ C(OP_READ) ] = { | |
204 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ | |
205 | [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ | |
206 | }, | |
207 | [ C(OP_WRITE) ] = { | |
208 | [ C(RESULT_ACCESS) ] = -1, | |
209 | [ C(RESULT_MISS) ] = -1, | |
210 | }, | |
211 | [ C(OP_PREFETCH) ] = { | |
212 | [ C(RESULT_ACCESS) ] = 0x0, | |
213 | [ C(RESULT_MISS) ] = 0x0, | |
214 | }, | |
215 | }, | |
216 | [ C(LL ) ] = { | |
217 | [ C(OP_READ) ] = { | |
218 | [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ | |
219 | [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ | |
220 | }, | |
221 | [ C(OP_WRITE) ] = { | |
222 | [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ | |
223 | [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ | |
224 | }, | |
225 | [ C(OP_PREFETCH) ] = { | |
226 | [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ | |
227 | [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ | |
228 | }, | |
229 | }, | |
230 | [ C(DTLB) ] = { | |
231 | [ C(OP_READ) ] = { | |
232 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ | |
233 | [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ | |
234 | }, | |
235 | [ C(OP_WRITE) ] = { | |
236 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ | |
237 | [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ | |
238 | }, | |
239 | [ C(OP_PREFETCH) ] = { | |
240 | [ C(RESULT_ACCESS) ] = 0x0, | |
241 | [ C(RESULT_MISS) ] = 0x0, | |
242 | }, | |
243 | }, | |
244 | [ C(ITLB) ] = { | |
245 | [ C(OP_READ) ] = { | |
246 | [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ | |
247 | [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */ | |
248 | }, | |
249 | [ C(OP_WRITE) ] = { | |
250 | [ C(RESULT_ACCESS) ] = -1, | |
251 | [ C(RESULT_MISS) ] = -1, | |
252 | }, | |
253 | [ C(OP_PREFETCH) ] = { | |
254 | [ C(RESULT_ACCESS) ] = -1, | |
255 | [ C(RESULT_MISS) ] = -1, | |
256 | }, | |
257 | }, | |
258 | [ C(BPU ) ] = { | |
259 | [ C(OP_READ) ] = { | |
260 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ | |
261 | [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ | |
262 | }, | |
263 | [ C(OP_WRITE) ] = { | |
264 | [ C(RESULT_ACCESS) ] = -1, | |
265 | [ C(RESULT_MISS) ] = -1, | |
266 | }, | |
267 | [ C(OP_PREFETCH) ] = { | |
268 | [ C(RESULT_ACCESS) ] = -1, | |
269 | [ C(RESULT_MISS) ] = -1, | |
270 | }, | |
271 | }, | |
272 | }; | |
273 | ||
caaa8be3 | 274 | static __initconst const u64 core2_hw_cache_event_ids |
f22f54f4 PZ |
275 | [PERF_COUNT_HW_CACHE_MAX] |
276 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
277 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
278 | { | |
279 | [ C(L1D) ] = { | |
280 | [ C(OP_READ) ] = { | |
281 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ | |
282 | [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ | |
283 | }, | |
284 | [ C(OP_WRITE) ] = { | |
285 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ | |
286 | [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ | |
287 | }, | |
288 | [ C(OP_PREFETCH) ] = { | |
289 | [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */ | |
290 | [ C(RESULT_MISS) ] = 0, | |
291 | }, | |
292 | }, | |
293 | [ C(L1I ) ] = { | |
294 | [ C(OP_READ) ] = { | |
295 | [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */ | |
296 | [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */ | |
297 | }, | |
298 | [ C(OP_WRITE) ] = { | |
299 | [ C(RESULT_ACCESS) ] = -1, | |
300 | [ C(RESULT_MISS) ] = -1, | |
301 | }, | |
302 | [ C(OP_PREFETCH) ] = { | |
303 | [ C(RESULT_ACCESS) ] = 0, | |
304 | [ C(RESULT_MISS) ] = 0, | |
305 | }, | |
306 | }, | |
307 | [ C(LL ) ] = { | |
308 | [ C(OP_READ) ] = { | |
309 | [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ | |
310 | [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ | |
311 | }, | |
312 | [ C(OP_WRITE) ] = { | |
313 | [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ | |
314 | [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ | |
315 | }, | |
316 | [ C(OP_PREFETCH) ] = { | |
317 | [ C(RESULT_ACCESS) ] = 0, | |
318 | [ C(RESULT_MISS) ] = 0, | |
319 | }, | |
320 | }, | |
321 | [ C(DTLB) ] = { | |
322 | [ C(OP_READ) ] = { | |
323 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ | |
324 | [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */ | |
325 | }, | |
326 | [ C(OP_WRITE) ] = { | |
327 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ | |
328 | [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */ | |
329 | }, | |
330 | [ C(OP_PREFETCH) ] = { | |
331 | [ C(RESULT_ACCESS) ] = 0, | |
332 | [ C(RESULT_MISS) ] = 0, | |
333 | }, | |
334 | }, | |
335 | [ C(ITLB) ] = { | |
336 | [ C(OP_READ) ] = { | |
337 | [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ | |
338 | [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */ | |
339 | }, | |
340 | [ C(OP_WRITE) ] = { | |
341 | [ C(RESULT_ACCESS) ] = -1, | |
342 | [ C(RESULT_MISS) ] = -1, | |
343 | }, | |
344 | [ C(OP_PREFETCH) ] = { | |
345 | [ C(RESULT_ACCESS) ] = -1, | |
346 | [ C(RESULT_MISS) ] = -1, | |
347 | }, | |
348 | }, | |
349 | [ C(BPU ) ] = { | |
350 | [ C(OP_READ) ] = { | |
351 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ | |
352 | [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ | |
353 | }, | |
354 | [ C(OP_WRITE) ] = { | |
355 | [ C(RESULT_ACCESS) ] = -1, | |
356 | [ C(RESULT_MISS) ] = -1, | |
357 | }, | |
358 | [ C(OP_PREFETCH) ] = { | |
359 | [ C(RESULT_ACCESS) ] = -1, | |
360 | [ C(RESULT_MISS) ] = -1, | |
361 | }, | |
362 | }, | |
363 | }; | |
364 | ||
caaa8be3 | 365 | static __initconst const u64 atom_hw_cache_event_ids |
f22f54f4 PZ |
366 | [PERF_COUNT_HW_CACHE_MAX] |
367 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
368 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
369 | { | |
370 | [ C(L1D) ] = { | |
371 | [ C(OP_READ) ] = { | |
372 | [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */ | |
373 | [ C(RESULT_MISS) ] = 0, | |
374 | }, | |
375 | [ C(OP_WRITE) ] = { | |
376 | [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */ | |
377 | [ C(RESULT_MISS) ] = 0, | |
378 | }, | |
379 | [ C(OP_PREFETCH) ] = { | |
380 | [ C(RESULT_ACCESS) ] = 0x0, | |
381 | [ C(RESULT_MISS) ] = 0, | |
382 | }, | |
383 | }, | |
384 | [ C(L1I ) ] = { | |
385 | [ C(OP_READ) ] = { | |
386 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ | |
387 | [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ | |
388 | }, | |
389 | [ C(OP_WRITE) ] = { | |
390 | [ C(RESULT_ACCESS) ] = -1, | |
391 | [ C(RESULT_MISS) ] = -1, | |
392 | }, | |
393 | [ C(OP_PREFETCH) ] = { | |
394 | [ C(RESULT_ACCESS) ] = 0, | |
395 | [ C(RESULT_MISS) ] = 0, | |
396 | }, | |
397 | }, | |
398 | [ C(LL ) ] = { | |
399 | [ C(OP_READ) ] = { | |
400 | [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ | |
401 | [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ | |
402 | }, | |
403 | [ C(OP_WRITE) ] = { | |
404 | [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ | |
405 | [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ | |
406 | }, | |
407 | [ C(OP_PREFETCH) ] = { | |
408 | [ C(RESULT_ACCESS) ] = 0, | |
409 | [ C(RESULT_MISS) ] = 0, | |
410 | }, | |
411 | }, | |
412 | [ C(DTLB) ] = { | |
413 | [ C(OP_READ) ] = { | |
414 | [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */ | |
415 | [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */ | |
416 | }, | |
417 | [ C(OP_WRITE) ] = { | |
418 | [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */ | |
419 | [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */ | |
420 | }, | |
421 | [ C(OP_PREFETCH) ] = { | |
422 | [ C(RESULT_ACCESS) ] = 0, | |
423 | [ C(RESULT_MISS) ] = 0, | |
424 | }, | |
425 | }, | |
426 | [ C(ITLB) ] = { | |
427 | [ C(OP_READ) ] = { | |
428 | [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ | |
429 | [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */ | |
430 | }, | |
431 | [ C(OP_WRITE) ] = { | |
432 | [ C(RESULT_ACCESS) ] = -1, | |
433 | [ C(RESULT_MISS) ] = -1, | |
434 | }, | |
435 | [ C(OP_PREFETCH) ] = { | |
436 | [ C(RESULT_ACCESS) ] = -1, | |
437 | [ C(RESULT_MISS) ] = -1, | |
438 | }, | |
439 | }, | |
440 | [ C(BPU ) ] = { | |
441 | [ C(OP_READ) ] = { | |
442 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ | |
443 | [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ | |
444 | }, | |
445 | [ C(OP_WRITE) ] = { | |
446 | [ C(RESULT_ACCESS) ] = -1, | |
447 | [ C(RESULT_MISS) ] = -1, | |
448 | }, | |
449 | [ C(OP_PREFETCH) ] = { | |
450 | [ C(RESULT_ACCESS) ] = -1, | |
451 | [ C(RESULT_MISS) ] = -1, | |
452 | }, | |
453 | }, | |
454 | }; | |
455 | ||
f22f54f4 PZ |
456 | static void intel_pmu_disable_all(void) |
457 | { | |
458 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
459 | ||
460 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); | |
461 | ||
462 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) | |
463 | intel_pmu_disable_bts(); | |
ca037701 PZ |
464 | |
465 | intel_pmu_pebs_disable_all(); | |
caff2bef | 466 | intel_pmu_lbr_disable_all(); |
f22f54f4 PZ |
467 | } |
468 | ||
11164cd4 | 469 | static void intel_pmu_enable_all(int added) |
f22f54f4 PZ |
470 | { |
471 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
472 | ||
d329527e PZ |
473 | intel_pmu_pebs_enable_all(); |
474 | intel_pmu_lbr_enable_all(); | |
f22f54f4 PZ |
475 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); |
476 | ||
477 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { | |
478 | struct perf_event *event = | |
479 | cpuc->events[X86_PMC_IDX_FIXED_BTS]; | |
480 | ||
481 | if (WARN_ON_ONCE(!event)) | |
482 | return; | |
483 | ||
484 | intel_pmu_enable_bts(event->hw.config); | |
485 | } | |
486 | } | |
487 | ||
11164cd4 PZ |
488 | /* |
489 | * Workaround for: | |
490 | * Intel Errata AAK100 (model 26) | |
491 | * Intel Errata AAP53 (model 30) | |
40b91cd1 | 492 | * Intel Errata BD53 (model 44) |
11164cd4 | 493 | * |
351af072 ZY |
494 | * The official story: |
495 | * These chips need to be 'reset' when adding counters by programming the | |
496 | * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either | |
497 | * in sequence on the same PMC or on different PMCs. | |
498 | * | |
499 | * In practise it appears some of these events do in fact count, and | |
500 | * we need to programm all 4 events. | |
11164cd4 | 501 | */ |
351af072 | 502 | static void intel_pmu_nhm_workaround(void) |
11164cd4 | 503 | { |
351af072 ZY |
504 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
505 | static const unsigned long nhm_magic[4] = { | |
506 | 0x4300B5, | |
507 | 0x4300D2, | |
508 | 0x4300B1, | |
509 | 0x4300B1 | |
510 | }; | |
511 | struct perf_event *event; | |
512 | int i; | |
11164cd4 | 513 | |
351af072 ZY |
514 | /* |
515 | * The Errata requires below steps: | |
516 | * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL; | |
517 | * 2) Configure 4 PERFEVTSELx with the magic events and clear | |
518 | * the corresponding PMCx; | |
519 | * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL; | |
520 | * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL; | |
521 | * 5) Clear 4 pairs of ERFEVTSELx and PMCx; | |
522 | */ | |
11164cd4 | 523 | |
351af072 ZY |
524 | /* |
525 | * The real steps we choose are a little different from above. | |
526 | * A) To reduce MSR operations, we don't run step 1) as they | |
527 | * are already cleared before this function is called; | |
528 | * B) Call x86_perf_event_update to save PMCx before configuring | |
529 | * PERFEVTSELx with magic number; | |
530 | * C) With step 5), we do clear only when the PERFEVTSELx is | |
531 | * not used currently. | |
532 | * D) Call x86_perf_event_set_period to restore PMCx; | |
533 | */ | |
11164cd4 | 534 | |
351af072 ZY |
535 | /* We always operate 4 pairs of PERF Counters */ |
536 | for (i = 0; i < 4; i++) { | |
537 | event = cpuc->events[i]; | |
538 | if (event) | |
539 | x86_perf_event_update(event); | |
540 | } | |
11164cd4 | 541 | |
351af072 ZY |
542 | for (i = 0; i < 4; i++) { |
543 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]); | |
544 | wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0); | |
545 | } | |
546 | ||
547 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf); | |
548 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); | |
11164cd4 | 549 | |
351af072 ZY |
550 | for (i = 0; i < 4; i++) { |
551 | event = cpuc->events[i]; | |
552 | ||
553 | if (event) { | |
554 | x86_perf_event_set_period(event); | |
31fa58af | 555 | __x86_pmu_enable_event(&event->hw, |
351af072 ZY |
556 | ARCH_PERFMON_EVENTSEL_ENABLE); |
557 | } else | |
558 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0); | |
11164cd4 | 559 | } |
351af072 ZY |
560 | } |
561 | ||
562 | static void intel_pmu_nhm_enable_all(int added) | |
563 | { | |
564 | if (added) | |
565 | intel_pmu_nhm_workaround(); | |
11164cd4 PZ |
566 | intel_pmu_enable_all(added); |
567 | } | |
568 | ||
f22f54f4 PZ |
569 | static inline u64 intel_pmu_get_status(void) |
570 | { | |
571 | u64 status; | |
572 | ||
573 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); | |
574 | ||
575 | return status; | |
576 | } | |
577 | ||
578 | static inline void intel_pmu_ack_status(u64 ack) | |
579 | { | |
580 | wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); | |
581 | } | |
582 | ||
ca037701 | 583 | static void intel_pmu_disable_fixed(struct hw_perf_event *hwc) |
f22f54f4 | 584 | { |
aff3d91a | 585 | int idx = hwc->idx - X86_PMC_IDX_FIXED; |
f22f54f4 PZ |
586 | u64 ctrl_val, mask; |
587 | ||
588 | mask = 0xfULL << (idx * 4); | |
589 | ||
590 | rdmsrl(hwc->config_base, ctrl_val); | |
591 | ctrl_val &= ~mask; | |
7645a24c | 592 | wrmsrl(hwc->config_base, ctrl_val); |
f22f54f4 PZ |
593 | } |
594 | ||
ca037701 | 595 | static void intel_pmu_disable_event(struct perf_event *event) |
f22f54f4 | 596 | { |
aff3d91a PZ |
597 | struct hw_perf_event *hwc = &event->hw; |
598 | ||
599 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | |
f22f54f4 PZ |
600 | intel_pmu_disable_bts(); |
601 | intel_pmu_drain_bts_buffer(); | |
602 | return; | |
603 | } | |
604 | ||
605 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | |
aff3d91a | 606 | intel_pmu_disable_fixed(hwc); |
f22f54f4 PZ |
607 | return; |
608 | } | |
609 | ||
aff3d91a | 610 | x86_pmu_disable_event(event); |
ca037701 | 611 | |
ab608344 | 612 | if (unlikely(event->attr.precise_ip)) |
ef21f683 | 613 | intel_pmu_pebs_disable(event); |
f22f54f4 PZ |
614 | } |
615 | ||
ca037701 | 616 | static void intel_pmu_enable_fixed(struct hw_perf_event *hwc) |
f22f54f4 | 617 | { |
aff3d91a | 618 | int idx = hwc->idx - X86_PMC_IDX_FIXED; |
f22f54f4 | 619 | u64 ctrl_val, bits, mask; |
f22f54f4 PZ |
620 | |
621 | /* | |
622 | * Enable IRQ generation (0x8), | |
623 | * and enable ring-3 counting (0x2) and ring-0 counting (0x1) | |
624 | * if requested: | |
625 | */ | |
626 | bits = 0x8ULL; | |
627 | if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) | |
628 | bits |= 0x2; | |
629 | if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) | |
630 | bits |= 0x1; | |
631 | ||
632 | /* | |
633 | * ANY bit is supported in v3 and up | |
634 | */ | |
635 | if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) | |
636 | bits |= 0x4; | |
637 | ||
638 | bits <<= (idx * 4); | |
639 | mask = 0xfULL << (idx * 4); | |
640 | ||
641 | rdmsrl(hwc->config_base, ctrl_val); | |
642 | ctrl_val &= ~mask; | |
643 | ctrl_val |= bits; | |
7645a24c | 644 | wrmsrl(hwc->config_base, ctrl_val); |
f22f54f4 PZ |
645 | } |
646 | ||
aff3d91a | 647 | static void intel_pmu_enable_event(struct perf_event *event) |
f22f54f4 | 648 | { |
aff3d91a PZ |
649 | struct hw_perf_event *hwc = &event->hw; |
650 | ||
651 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | |
f22f54f4 PZ |
652 | if (!__get_cpu_var(cpu_hw_events).enabled) |
653 | return; | |
654 | ||
655 | intel_pmu_enable_bts(hwc->config); | |
656 | return; | |
657 | } | |
658 | ||
659 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | |
aff3d91a | 660 | intel_pmu_enable_fixed(hwc); |
f22f54f4 PZ |
661 | return; |
662 | } | |
663 | ||
ab608344 | 664 | if (unlikely(event->attr.precise_ip)) |
ef21f683 | 665 | intel_pmu_pebs_enable(event); |
ca037701 | 666 | |
31fa58af | 667 | __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); |
f22f54f4 PZ |
668 | } |
669 | ||
670 | /* | |
671 | * Save and restart an expired event. Called by NMI contexts, | |
672 | * so it has to be careful about preempting normal event ops: | |
673 | */ | |
674 | static int intel_pmu_save_and_restart(struct perf_event *event) | |
675 | { | |
cc2ad4ba PZ |
676 | x86_perf_event_update(event); |
677 | return x86_perf_event_set_period(event); | |
f22f54f4 PZ |
678 | } |
679 | ||
680 | static void intel_pmu_reset(void) | |
681 | { | |
682 | struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds; | |
683 | unsigned long flags; | |
684 | int idx; | |
685 | ||
948b1bb8 | 686 | if (!x86_pmu.num_counters) |
f22f54f4 PZ |
687 | return; |
688 | ||
689 | local_irq_save(flags); | |
690 | ||
691 | printk("clearing PMU state on CPU#%d\n", smp_processor_id()); | |
692 | ||
948b1bb8 | 693 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
f22f54f4 PZ |
694 | checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); |
695 | checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); | |
696 | } | |
948b1bb8 | 697 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) |
f22f54f4 | 698 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); |
948b1bb8 | 699 | |
f22f54f4 PZ |
700 | if (ds) |
701 | ds->bts_index = ds->bts_buffer_base; | |
702 | ||
703 | local_irq_restore(flags); | |
704 | } | |
705 | ||
706 | /* | |
707 | * This handler is triggered by the local APIC, so the APIC IRQ handling | |
708 | * rules apply: | |
709 | */ | |
710 | static int intel_pmu_handle_irq(struct pt_regs *regs) | |
711 | { | |
712 | struct perf_sample_data data; | |
713 | struct cpu_hw_events *cpuc; | |
714 | int bit, loops; | |
2e556b5b | 715 | u64 status; |
b0b2072d | 716 | int handled; |
f22f54f4 | 717 | |
dc1d628a | 718 | perf_sample_data_init(&data, 0); |
f22f54f4 PZ |
719 | |
720 | cpuc = &__get_cpu_var(cpu_hw_events); | |
721 | ||
3fb2b8dd | 722 | intel_pmu_disable_all(); |
b0b2072d | 723 | handled = intel_pmu_drain_bts_buffer(); |
f22f54f4 PZ |
724 | status = intel_pmu_get_status(); |
725 | if (!status) { | |
11164cd4 | 726 | intel_pmu_enable_all(0); |
b0b2072d | 727 | return handled; |
f22f54f4 PZ |
728 | } |
729 | ||
730 | loops = 0; | |
731 | again: | |
2e556b5b | 732 | intel_pmu_ack_status(status); |
f22f54f4 PZ |
733 | if (++loops > 100) { |
734 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); | |
735 | perf_event_print_debug(); | |
736 | intel_pmu_reset(); | |
3fb2b8dd | 737 | goto done; |
f22f54f4 PZ |
738 | } |
739 | ||
740 | inc_irq_stat(apic_perf_irqs); | |
ca037701 | 741 | |
caff2bef PZ |
742 | intel_pmu_lbr_read(); |
743 | ||
ca037701 PZ |
744 | /* |
745 | * PEBS overflow sets bit 62 in the global status register | |
746 | */ | |
de725dec PZ |
747 | if (__test_and_clear_bit(62, (unsigned long *)&status)) { |
748 | handled++; | |
ca037701 | 749 | x86_pmu.drain_pebs(regs); |
de725dec | 750 | } |
ca037701 | 751 | |
984b3f57 | 752 | for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { |
f22f54f4 PZ |
753 | struct perf_event *event = cpuc->events[bit]; |
754 | ||
de725dec PZ |
755 | handled++; |
756 | ||
f22f54f4 PZ |
757 | if (!test_bit(bit, cpuc->active_mask)) |
758 | continue; | |
759 | ||
760 | if (!intel_pmu_save_and_restart(event)) | |
761 | continue; | |
762 | ||
763 | data.period = event->hw.last_period; | |
764 | ||
765 | if (perf_event_overflow(event, 1, &data, regs)) | |
a4eaf7f1 | 766 | x86_pmu_stop(event, 0); |
f22f54f4 PZ |
767 | } |
768 | ||
f22f54f4 PZ |
769 | /* |
770 | * Repeat if there is more work to be done: | |
771 | */ | |
772 | status = intel_pmu_get_status(); | |
773 | if (status) | |
774 | goto again; | |
775 | ||
3fb2b8dd | 776 | done: |
11164cd4 | 777 | intel_pmu_enable_all(0); |
de725dec | 778 | return handled; |
f22f54f4 PZ |
779 | } |
780 | ||
f22f54f4 | 781 | static struct event_constraint * |
ca037701 | 782 | intel_bts_constraints(struct perf_event *event) |
f22f54f4 | 783 | { |
ca037701 PZ |
784 | struct hw_perf_event *hwc = &event->hw; |
785 | unsigned int hw_event, bts_event; | |
f22f54f4 | 786 | |
ca037701 PZ |
787 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; |
788 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); | |
f22f54f4 | 789 | |
ca037701 | 790 | if (unlikely(hw_event == bts_event && hwc->sample_period == 1)) |
f22f54f4 | 791 | return &bts_constraint; |
ca037701 | 792 | |
f22f54f4 PZ |
793 | return NULL; |
794 | } | |
795 | ||
796 | static struct event_constraint * | |
797 | intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | |
798 | { | |
799 | struct event_constraint *c; | |
800 | ||
ca037701 PZ |
801 | c = intel_bts_constraints(event); |
802 | if (c) | |
803 | return c; | |
804 | ||
805 | c = intel_pebs_constraints(event); | |
f22f54f4 PZ |
806 | if (c) |
807 | return c; | |
808 | ||
809 | return x86_get_event_constraints(cpuc, event); | |
810 | } | |
811 | ||
b4cdc5c2 PZ |
812 | static int intel_pmu_hw_config(struct perf_event *event) |
813 | { | |
814 | int ret = x86_pmu_hw_config(event); | |
815 | ||
816 | if (ret) | |
817 | return ret; | |
818 | ||
819 | if (event->attr.type != PERF_TYPE_RAW) | |
820 | return 0; | |
821 | ||
822 | if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY)) | |
823 | return 0; | |
824 | ||
825 | if (x86_pmu.version < 3) | |
826 | return -EINVAL; | |
827 | ||
828 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | |
829 | return -EACCES; | |
830 | ||
831 | event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY; | |
832 | ||
833 | return 0; | |
834 | } | |
835 | ||
caaa8be3 | 836 | static __initconst const struct x86_pmu core_pmu = { |
f22f54f4 PZ |
837 | .name = "core", |
838 | .handle_irq = x86_pmu_handle_irq, | |
839 | .disable_all = x86_pmu_disable_all, | |
840 | .enable_all = x86_pmu_enable_all, | |
841 | .enable = x86_pmu_enable_event, | |
842 | .disable = x86_pmu_disable_event, | |
b4cdc5c2 | 843 | .hw_config = x86_pmu_hw_config, |
a072738e | 844 | .schedule_events = x86_schedule_events, |
f22f54f4 PZ |
845 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, |
846 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | |
847 | .event_map = intel_pmu_event_map, | |
f22f54f4 PZ |
848 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), |
849 | .apic = 1, | |
850 | /* | |
851 | * Intel PMCs cannot be accessed sanely above 32 bit width, | |
852 | * so we install an artificial 1<<31 period regardless of | |
853 | * the generic event period: | |
854 | */ | |
855 | .max_period = (1ULL << 31) - 1, | |
856 | .get_event_constraints = intel_get_event_constraints, | |
857 | .event_constraints = intel_core_event_constraints, | |
858 | }; | |
859 | ||
74846d35 PZ |
860 | static void intel_pmu_cpu_starting(int cpu) |
861 | { | |
862 | init_debug_store_on_cpu(cpu); | |
863 | /* | |
864 | * Deal with CPUs that don't clear their LBRs on power-up. | |
865 | */ | |
866 | intel_pmu_lbr_reset(); | |
867 | } | |
868 | ||
869 | static void intel_pmu_cpu_dying(int cpu) | |
870 | { | |
871 | fini_debug_store_on_cpu(cpu); | |
872 | } | |
873 | ||
caaa8be3 | 874 | static __initconst const struct x86_pmu intel_pmu = { |
f22f54f4 PZ |
875 | .name = "Intel", |
876 | .handle_irq = intel_pmu_handle_irq, | |
877 | .disable_all = intel_pmu_disable_all, | |
878 | .enable_all = intel_pmu_enable_all, | |
879 | .enable = intel_pmu_enable_event, | |
880 | .disable = intel_pmu_disable_event, | |
b4cdc5c2 | 881 | .hw_config = intel_pmu_hw_config, |
a072738e | 882 | .schedule_events = x86_schedule_events, |
f22f54f4 PZ |
883 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, |
884 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | |
885 | .event_map = intel_pmu_event_map, | |
f22f54f4 PZ |
886 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), |
887 | .apic = 1, | |
888 | /* | |
889 | * Intel PMCs cannot be accessed sanely above 32 bit width, | |
890 | * so we install an artificial 1<<31 period regardless of | |
891 | * the generic event period: | |
892 | */ | |
893 | .max_period = (1ULL << 31) - 1, | |
3f6da390 PZ |
894 | .get_event_constraints = intel_get_event_constraints, |
895 | ||
74846d35 PZ |
896 | .cpu_starting = intel_pmu_cpu_starting, |
897 | .cpu_dying = intel_pmu_cpu_dying, | |
f22f54f4 PZ |
898 | }; |
899 | ||
3c44780b PZ |
900 | static void intel_clovertown_quirks(void) |
901 | { | |
902 | /* | |
903 | * PEBS is unreliable due to: | |
904 | * | |
905 | * AJ67 - PEBS may experience CPL leaks | |
906 | * AJ68 - PEBS PMI may be delayed by one event | |
907 | * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12] | |
908 | * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS | |
909 | * | |
910 | * AJ67 could be worked around by restricting the OS/USR flags. | |
911 | * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI. | |
912 | * | |
913 | * AJ106 could possibly be worked around by not allowing LBR | |
914 | * usage from PEBS, including the fixup. | |
915 | * AJ68 could possibly be worked around by always programming | |
916 | * a pebs_event_reset[0] value and coping with the lost events. | |
917 | * | |
918 | * But taken together it might just make sense to not enable PEBS on | |
919 | * these chips. | |
920 | */ | |
921 | printk(KERN_WARNING "PEBS disabled due to CPU errata.\n"); | |
922 | x86_pmu.pebs = 0; | |
923 | x86_pmu.pebs_constraints = NULL; | |
924 | } | |
925 | ||
f22f54f4 PZ |
926 | static __init int intel_pmu_init(void) |
927 | { | |
928 | union cpuid10_edx edx; | |
929 | union cpuid10_eax eax; | |
930 | unsigned int unused; | |
931 | unsigned int ebx; | |
932 | int version; | |
933 | ||
934 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | |
a072738e CG |
935 | switch (boot_cpu_data.x86) { |
936 | case 0x6: | |
937 | return p6_pmu_init(); | |
938 | case 0xf: | |
939 | return p4_pmu_init(); | |
940 | } | |
f22f54f4 | 941 | return -ENODEV; |
f22f54f4 PZ |
942 | } |
943 | ||
944 | /* | |
945 | * Check whether the Architectural PerfMon supports | |
946 | * Branch Misses Retired hw_event or not. | |
947 | */ | |
948 | cpuid(10, &eax.full, &ebx, &unused, &edx.full); | |
949 | if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) | |
950 | return -ENODEV; | |
951 | ||
952 | version = eax.split.version_id; | |
953 | if (version < 2) | |
954 | x86_pmu = core_pmu; | |
955 | else | |
956 | x86_pmu = intel_pmu; | |
957 | ||
958 | x86_pmu.version = version; | |
948b1bb8 RR |
959 | x86_pmu.num_counters = eax.split.num_counters; |
960 | x86_pmu.cntval_bits = eax.split.bit_width; | |
961 | x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1; | |
f22f54f4 PZ |
962 | |
963 | /* | |
964 | * Quirk: v2 perfmon does not report fixed-purpose events, so | |
965 | * assume at least 3 events: | |
966 | */ | |
967 | if (version > 1) | |
948b1bb8 | 968 | x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); |
f22f54f4 | 969 | |
8db909a7 PZ |
970 | /* |
971 | * v2 and above have a perf capabilities MSR | |
972 | */ | |
973 | if (version > 1) { | |
974 | u64 capabilities; | |
975 | ||
976 | rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities); | |
977 | x86_pmu.intel_cap.capabilities = capabilities; | |
978 | } | |
979 | ||
ca037701 PZ |
980 | intel_ds_init(); |
981 | ||
f22f54f4 PZ |
982 | /* |
983 | * Install the hw-cache-events table: | |
984 | */ | |
985 | switch (boot_cpu_data.x86_model) { | |
986 | case 14: /* 65 nm core solo/duo, "Yonah" */ | |
987 | pr_cont("Core events, "); | |
988 | break; | |
989 | ||
990 | case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ | |
3c44780b | 991 | x86_pmu.quirks = intel_clovertown_quirks; |
f22f54f4 PZ |
992 | case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ |
993 | case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ | |
994 | case 29: /* six-core 45 nm xeon "Dunnington" */ | |
995 | memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, | |
996 | sizeof(hw_cache_event_ids)); | |
997 | ||
caff2bef PZ |
998 | intel_pmu_lbr_init_core(); |
999 | ||
f22f54f4 PZ |
1000 | x86_pmu.event_constraints = intel_core2_event_constraints; |
1001 | pr_cont("Core2 events, "); | |
1002 | break; | |
1003 | ||
1004 | case 26: /* 45 nm nehalem, "Bloomfield" */ | |
1005 | case 30: /* 45 nm nehalem, "Lynnfield" */ | |
134fbadf | 1006 | case 46: /* 45 nm nehalem-ex, "Beckton" */ |
f22f54f4 PZ |
1007 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, |
1008 | sizeof(hw_cache_event_ids)); | |
1009 | ||
caff2bef PZ |
1010 | intel_pmu_lbr_init_nhm(); |
1011 | ||
f22f54f4 | 1012 | x86_pmu.event_constraints = intel_nehalem_event_constraints; |
11164cd4 PZ |
1013 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; |
1014 | pr_cont("Nehalem events, "); | |
f22f54f4 | 1015 | break; |
caff2bef | 1016 | |
b622d644 | 1017 | case 28: /* Atom */ |
f22f54f4 PZ |
1018 | memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, |
1019 | sizeof(hw_cache_event_ids)); | |
1020 | ||
caff2bef PZ |
1021 | intel_pmu_lbr_init_atom(); |
1022 | ||
f22f54f4 PZ |
1023 | x86_pmu.event_constraints = intel_gen_event_constraints; |
1024 | pr_cont("Atom events, "); | |
1025 | break; | |
1026 | ||
1027 | case 37: /* 32 nm nehalem, "Clarkdale" */ | |
1028 | case 44: /* 32 nm nehalem, "Gulftown" */ | |
1029 | memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, | |
1030 | sizeof(hw_cache_event_ids)); | |
1031 | ||
caff2bef PZ |
1032 | intel_pmu_lbr_init_nhm(); |
1033 | ||
f22f54f4 | 1034 | x86_pmu.event_constraints = intel_westmere_event_constraints; |
40b91cd1 | 1035 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; |
f22f54f4 PZ |
1036 | pr_cont("Westmere events, "); |
1037 | break; | |
b622d644 | 1038 | |
f22f54f4 PZ |
1039 | default: |
1040 | /* | |
1041 | * default constraints for v2 and up | |
1042 | */ | |
1043 | x86_pmu.event_constraints = intel_gen_event_constraints; | |
1044 | pr_cont("generic architected perfmon, "); | |
1045 | } | |
1046 | return 0; | |
1047 | } | |
1048 | ||
1049 | #else /* CONFIG_CPU_SUP_INTEL */ | |
1050 | ||
1051 | static int intel_pmu_init(void) | |
1052 | { | |
1053 | return 0; | |
1054 | } | |
1055 | ||
1056 | #endif /* CONFIG_CPU_SUP_INTEL */ |