Commit | Line | Data |
---|---|---|
f22f54f4 PZ |
1 | #ifdef CONFIG_CPU_SUP_INTEL |
2 | ||
3 | /* | |
b622d644 | 4 | * Intel PerfMon, used on Core and later. |
f22f54f4 PZ |
5 | */ |
6 | static const u64 intel_perfmon_event_map[] = | |
7 | { | |
8 | [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, | |
9 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | |
10 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e, | |
11 | [PERF_COUNT_HW_CACHE_MISSES] = 0x412e, | |
12 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, | |
13 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, | |
14 | [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, | |
15 | }; | |
16 | ||
17 | static struct event_constraint intel_core_event_constraints[] = | |
18 | { | |
19 | INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ | |
20 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | |
21 | INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ | |
22 | INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ | |
23 | INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ | |
24 | INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */ | |
25 | EVENT_CONSTRAINT_END | |
26 | }; | |
27 | ||
28 | static struct event_constraint intel_core2_event_constraints[] = | |
29 | { | |
b622d644 PZ |
30 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
31 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | |
32 | /* | |
33 | * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event | |
34 | * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed | |
35 | * ratio between these counters. | |
36 | */ | |
37 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | |
f22f54f4 PZ |
38 | INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ |
39 | INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ | |
40 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | |
41 | INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ | |
42 | INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ | |
43 | INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ | |
44 | INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ | |
45 | INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ | |
b622d644 | 46 | INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */ |
f22f54f4 PZ |
47 | INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ |
48 | EVENT_CONSTRAINT_END | |
49 | }; | |
50 | ||
51 | static struct event_constraint intel_nehalem_event_constraints[] = | |
52 | { | |
b622d644 PZ |
53 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
54 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | |
55 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | |
f22f54f4 PZ |
56 | INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ |
57 | INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ | |
58 | INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ | |
59 | INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */ | |
60 | INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */ | |
61 | INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */ | |
62 | INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ | |
63 | INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ | |
64 | EVENT_CONSTRAINT_END | |
65 | }; | |
66 | ||
67 | static struct event_constraint intel_westmere_event_constraints[] = | |
68 | { | |
b622d644 PZ |
69 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
70 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | |
71 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | |
f22f54f4 PZ |
72 | INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ |
73 | INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ | |
74 | INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ | |
d1100770 | 75 | INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */ |
f22f54f4 PZ |
76 | EVENT_CONSTRAINT_END |
77 | }; | |
78 | ||
79 | static struct event_constraint intel_gen_event_constraints[] = | |
80 | { | |
b622d644 PZ |
81 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
82 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | |
83 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | |
f22f54f4 PZ |
84 | EVENT_CONSTRAINT_END |
85 | }; | |
86 | ||
87 | static u64 intel_pmu_event_map(int hw_event) | |
88 | { | |
89 | return intel_perfmon_event_map[hw_event]; | |
90 | } | |
91 | ||
caaa8be3 | 92 | static __initconst const u64 westmere_hw_cache_event_ids |
f22f54f4 PZ |
93 | [PERF_COUNT_HW_CACHE_MAX] |
94 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
95 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
96 | { | |
97 | [ C(L1D) ] = { | |
98 | [ C(OP_READ) ] = { | |
99 | [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ | |
100 | [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ | |
101 | }, | |
102 | [ C(OP_WRITE) ] = { | |
103 | [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ | |
104 | [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ | |
105 | }, | |
106 | [ C(OP_PREFETCH) ] = { | |
107 | [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ | |
108 | [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ | |
109 | }, | |
110 | }, | |
111 | [ C(L1I ) ] = { | |
112 | [ C(OP_READ) ] = { | |
113 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ | |
114 | [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ | |
115 | }, | |
116 | [ C(OP_WRITE) ] = { | |
117 | [ C(RESULT_ACCESS) ] = -1, | |
118 | [ C(RESULT_MISS) ] = -1, | |
119 | }, | |
120 | [ C(OP_PREFETCH) ] = { | |
121 | [ C(RESULT_ACCESS) ] = 0x0, | |
122 | [ C(RESULT_MISS) ] = 0x0, | |
123 | }, | |
124 | }, | |
125 | [ C(LL ) ] = { | |
126 | [ C(OP_READ) ] = { | |
127 | [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ | |
128 | [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ | |
129 | }, | |
130 | [ C(OP_WRITE) ] = { | |
131 | [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ | |
132 | [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ | |
133 | }, | |
134 | [ C(OP_PREFETCH) ] = { | |
135 | [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ | |
136 | [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ | |
137 | }, | |
138 | }, | |
139 | [ C(DTLB) ] = { | |
140 | [ C(OP_READ) ] = { | |
141 | [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ | |
142 | [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ | |
143 | }, | |
144 | [ C(OP_WRITE) ] = { | |
145 | [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ | |
146 | [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ | |
147 | }, | |
148 | [ C(OP_PREFETCH) ] = { | |
149 | [ C(RESULT_ACCESS) ] = 0x0, | |
150 | [ C(RESULT_MISS) ] = 0x0, | |
151 | }, | |
152 | }, | |
153 | [ C(ITLB) ] = { | |
154 | [ C(OP_READ) ] = { | |
155 | [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ | |
156 | [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */ | |
157 | }, | |
158 | [ C(OP_WRITE) ] = { | |
159 | [ C(RESULT_ACCESS) ] = -1, | |
160 | [ C(RESULT_MISS) ] = -1, | |
161 | }, | |
162 | [ C(OP_PREFETCH) ] = { | |
163 | [ C(RESULT_ACCESS) ] = -1, | |
164 | [ C(RESULT_MISS) ] = -1, | |
165 | }, | |
166 | }, | |
167 | [ C(BPU ) ] = { | |
168 | [ C(OP_READ) ] = { | |
169 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ | |
170 | [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ | |
171 | }, | |
172 | [ C(OP_WRITE) ] = { | |
173 | [ C(RESULT_ACCESS) ] = -1, | |
174 | [ C(RESULT_MISS) ] = -1, | |
175 | }, | |
176 | [ C(OP_PREFETCH) ] = { | |
177 | [ C(RESULT_ACCESS) ] = -1, | |
178 | [ C(RESULT_MISS) ] = -1, | |
179 | }, | |
180 | }, | |
181 | }; | |
182 | ||
caaa8be3 | 183 | static __initconst const u64 nehalem_hw_cache_event_ids |
f22f54f4 PZ |
184 | [PERF_COUNT_HW_CACHE_MAX] |
185 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
186 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
187 | { | |
188 | [ C(L1D) ] = { | |
189 | [ C(OP_READ) ] = { | |
190 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ | |
191 | [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ | |
192 | }, | |
193 | [ C(OP_WRITE) ] = { | |
194 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ | |
195 | [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ | |
196 | }, | |
197 | [ C(OP_PREFETCH) ] = { | |
198 | [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ | |
199 | [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ | |
200 | }, | |
201 | }, | |
202 | [ C(L1I ) ] = { | |
203 | [ C(OP_READ) ] = { | |
204 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ | |
205 | [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ | |
206 | }, | |
207 | [ C(OP_WRITE) ] = { | |
208 | [ C(RESULT_ACCESS) ] = -1, | |
209 | [ C(RESULT_MISS) ] = -1, | |
210 | }, | |
211 | [ C(OP_PREFETCH) ] = { | |
212 | [ C(RESULT_ACCESS) ] = 0x0, | |
213 | [ C(RESULT_MISS) ] = 0x0, | |
214 | }, | |
215 | }, | |
216 | [ C(LL ) ] = { | |
217 | [ C(OP_READ) ] = { | |
218 | [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ | |
219 | [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ | |
220 | }, | |
221 | [ C(OP_WRITE) ] = { | |
222 | [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ | |
223 | [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ | |
224 | }, | |
225 | [ C(OP_PREFETCH) ] = { | |
226 | [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ | |
227 | [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ | |
228 | }, | |
229 | }, | |
230 | [ C(DTLB) ] = { | |
231 | [ C(OP_READ) ] = { | |
232 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ | |
233 | [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ | |
234 | }, | |
235 | [ C(OP_WRITE) ] = { | |
236 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ | |
237 | [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ | |
238 | }, | |
239 | [ C(OP_PREFETCH) ] = { | |
240 | [ C(RESULT_ACCESS) ] = 0x0, | |
241 | [ C(RESULT_MISS) ] = 0x0, | |
242 | }, | |
243 | }, | |
244 | [ C(ITLB) ] = { | |
245 | [ C(OP_READ) ] = { | |
246 | [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ | |
247 | [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */ | |
248 | }, | |
249 | [ C(OP_WRITE) ] = { | |
250 | [ C(RESULT_ACCESS) ] = -1, | |
251 | [ C(RESULT_MISS) ] = -1, | |
252 | }, | |
253 | [ C(OP_PREFETCH) ] = { | |
254 | [ C(RESULT_ACCESS) ] = -1, | |
255 | [ C(RESULT_MISS) ] = -1, | |
256 | }, | |
257 | }, | |
258 | [ C(BPU ) ] = { | |
259 | [ C(OP_READ) ] = { | |
260 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ | |
261 | [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ | |
262 | }, | |
263 | [ C(OP_WRITE) ] = { | |
264 | [ C(RESULT_ACCESS) ] = -1, | |
265 | [ C(RESULT_MISS) ] = -1, | |
266 | }, | |
267 | [ C(OP_PREFETCH) ] = { | |
268 | [ C(RESULT_ACCESS) ] = -1, | |
269 | [ C(RESULT_MISS) ] = -1, | |
270 | }, | |
271 | }, | |
272 | }; | |
273 | ||
caaa8be3 | 274 | static __initconst const u64 core2_hw_cache_event_ids |
f22f54f4 PZ |
275 | [PERF_COUNT_HW_CACHE_MAX] |
276 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
277 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
278 | { | |
279 | [ C(L1D) ] = { | |
280 | [ C(OP_READ) ] = { | |
281 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ | |
282 | [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ | |
283 | }, | |
284 | [ C(OP_WRITE) ] = { | |
285 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ | |
286 | [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ | |
287 | }, | |
288 | [ C(OP_PREFETCH) ] = { | |
289 | [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */ | |
290 | [ C(RESULT_MISS) ] = 0, | |
291 | }, | |
292 | }, | |
293 | [ C(L1I ) ] = { | |
294 | [ C(OP_READ) ] = { | |
295 | [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */ | |
296 | [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */ | |
297 | }, | |
298 | [ C(OP_WRITE) ] = { | |
299 | [ C(RESULT_ACCESS) ] = -1, | |
300 | [ C(RESULT_MISS) ] = -1, | |
301 | }, | |
302 | [ C(OP_PREFETCH) ] = { | |
303 | [ C(RESULT_ACCESS) ] = 0, | |
304 | [ C(RESULT_MISS) ] = 0, | |
305 | }, | |
306 | }, | |
307 | [ C(LL ) ] = { | |
308 | [ C(OP_READ) ] = { | |
309 | [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ | |
310 | [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ | |
311 | }, | |
312 | [ C(OP_WRITE) ] = { | |
313 | [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ | |
314 | [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ | |
315 | }, | |
316 | [ C(OP_PREFETCH) ] = { | |
317 | [ C(RESULT_ACCESS) ] = 0, | |
318 | [ C(RESULT_MISS) ] = 0, | |
319 | }, | |
320 | }, | |
321 | [ C(DTLB) ] = { | |
322 | [ C(OP_READ) ] = { | |
323 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ | |
324 | [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */ | |
325 | }, | |
326 | [ C(OP_WRITE) ] = { | |
327 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ | |
328 | [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */ | |
329 | }, | |
330 | [ C(OP_PREFETCH) ] = { | |
331 | [ C(RESULT_ACCESS) ] = 0, | |
332 | [ C(RESULT_MISS) ] = 0, | |
333 | }, | |
334 | }, | |
335 | [ C(ITLB) ] = { | |
336 | [ C(OP_READ) ] = { | |
337 | [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ | |
338 | [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */ | |
339 | }, | |
340 | [ C(OP_WRITE) ] = { | |
341 | [ C(RESULT_ACCESS) ] = -1, | |
342 | [ C(RESULT_MISS) ] = -1, | |
343 | }, | |
344 | [ C(OP_PREFETCH) ] = { | |
345 | [ C(RESULT_ACCESS) ] = -1, | |
346 | [ C(RESULT_MISS) ] = -1, | |
347 | }, | |
348 | }, | |
349 | [ C(BPU ) ] = { | |
350 | [ C(OP_READ) ] = { | |
351 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ | |
352 | [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ | |
353 | }, | |
354 | [ C(OP_WRITE) ] = { | |
355 | [ C(RESULT_ACCESS) ] = -1, | |
356 | [ C(RESULT_MISS) ] = -1, | |
357 | }, | |
358 | [ C(OP_PREFETCH) ] = { | |
359 | [ C(RESULT_ACCESS) ] = -1, | |
360 | [ C(RESULT_MISS) ] = -1, | |
361 | }, | |
362 | }, | |
363 | }; | |
364 | ||
caaa8be3 | 365 | static __initconst const u64 atom_hw_cache_event_ids |
f22f54f4 PZ |
366 | [PERF_COUNT_HW_CACHE_MAX] |
367 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
368 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
369 | { | |
370 | [ C(L1D) ] = { | |
371 | [ C(OP_READ) ] = { | |
372 | [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */ | |
373 | [ C(RESULT_MISS) ] = 0, | |
374 | }, | |
375 | [ C(OP_WRITE) ] = { | |
376 | [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */ | |
377 | [ C(RESULT_MISS) ] = 0, | |
378 | }, | |
379 | [ C(OP_PREFETCH) ] = { | |
380 | [ C(RESULT_ACCESS) ] = 0x0, | |
381 | [ C(RESULT_MISS) ] = 0, | |
382 | }, | |
383 | }, | |
384 | [ C(L1I ) ] = { | |
385 | [ C(OP_READ) ] = { | |
386 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ | |
387 | [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ | |
388 | }, | |
389 | [ C(OP_WRITE) ] = { | |
390 | [ C(RESULT_ACCESS) ] = -1, | |
391 | [ C(RESULT_MISS) ] = -1, | |
392 | }, | |
393 | [ C(OP_PREFETCH) ] = { | |
394 | [ C(RESULT_ACCESS) ] = 0, | |
395 | [ C(RESULT_MISS) ] = 0, | |
396 | }, | |
397 | }, | |
398 | [ C(LL ) ] = { | |
399 | [ C(OP_READ) ] = { | |
400 | [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ | |
401 | [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ | |
402 | }, | |
403 | [ C(OP_WRITE) ] = { | |
404 | [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ | |
405 | [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ | |
406 | }, | |
407 | [ C(OP_PREFETCH) ] = { | |
408 | [ C(RESULT_ACCESS) ] = 0, | |
409 | [ C(RESULT_MISS) ] = 0, | |
410 | }, | |
411 | }, | |
412 | [ C(DTLB) ] = { | |
413 | [ C(OP_READ) ] = { | |
414 | [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */ | |
415 | [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */ | |
416 | }, | |
417 | [ C(OP_WRITE) ] = { | |
418 | [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */ | |
419 | [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */ | |
420 | }, | |
421 | [ C(OP_PREFETCH) ] = { | |
422 | [ C(RESULT_ACCESS) ] = 0, | |
423 | [ C(RESULT_MISS) ] = 0, | |
424 | }, | |
425 | }, | |
426 | [ C(ITLB) ] = { | |
427 | [ C(OP_READ) ] = { | |
428 | [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ | |
429 | [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */ | |
430 | }, | |
431 | [ C(OP_WRITE) ] = { | |
432 | [ C(RESULT_ACCESS) ] = -1, | |
433 | [ C(RESULT_MISS) ] = -1, | |
434 | }, | |
435 | [ C(OP_PREFETCH) ] = { | |
436 | [ C(RESULT_ACCESS) ] = -1, | |
437 | [ C(RESULT_MISS) ] = -1, | |
438 | }, | |
439 | }, | |
440 | [ C(BPU ) ] = { | |
441 | [ C(OP_READ) ] = { | |
442 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ | |
443 | [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ | |
444 | }, | |
445 | [ C(OP_WRITE) ] = { | |
446 | [ C(RESULT_ACCESS) ] = -1, | |
447 | [ C(RESULT_MISS) ] = -1, | |
448 | }, | |
449 | [ C(OP_PREFETCH) ] = { | |
450 | [ C(RESULT_ACCESS) ] = -1, | |
451 | [ C(RESULT_MISS) ] = -1, | |
452 | }, | |
453 | }, | |
454 | }; | |
455 | ||
f22f54f4 PZ |
456 | static void intel_pmu_disable_all(void) |
457 | { | |
458 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
459 | ||
460 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); | |
461 | ||
462 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) | |
463 | intel_pmu_disable_bts(); | |
ca037701 PZ |
464 | |
465 | intel_pmu_pebs_disable_all(); | |
caff2bef | 466 | intel_pmu_lbr_disable_all(); |
f22f54f4 PZ |
467 | } |
468 | ||
11164cd4 | 469 | static void intel_pmu_enable_all(int added) |
f22f54f4 PZ |
470 | { |
471 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
472 | ||
d329527e PZ |
473 | intel_pmu_pebs_enable_all(); |
474 | intel_pmu_lbr_enable_all(); | |
f22f54f4 PZ |
475 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); |
476 | ||
477 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { | |
478 | struct perf_event *event = | |
479 | cpuc->events[X86_PMC_IDX_FIXED_BTS]; | |
480 | ||
481 | if (WARN_ON_ONCE(!event)) | |
482 | return; | |
483 | ||
484 | intel_pmu_enable_bts(event->hw.config); | |
485 | } | |
486 | } | |
487 | ||
11164cd4 PZ |
488 | /* |
489 | * Workaround for: | |
490 | * Intel Errata AAK100 (model 26) | |
491 | * Intel Errata AAP53 (model 30) | |
40b91cd1 | 492 | * Intel Errata BD53 (model 44) |
11164cd4 PZ |
493 | * |
494 | * These chips need to be 'reset' when adding counters by programming | |
495 | * the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5 | |
496 | * either in sequence on the same PMC or on different PMCs. | |
497 | */ | |
498 | static void intel_pmu_nhm_enable_all(int added) | |
499 | { | |
500 | if (added) { | |
501 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
502 | int i; | |
503 | ||
504 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2); | |
505 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1); | |
506 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5); | |
507 | ||
508 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3); | |
509 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); | |
510 | ||
511 | for (i = 0; i < 3; i++) { | |
512 | struct perf_event *event = cpuc->events[i]; | |
513 | ||
514 | if (!event) | |
515 | continue; | |
516 | ||
31fa58af RR |
517 | __x86_pmu_enable_event(&event->hw, |
518 | ARCH_PERFMON_EVENTSEL_ENABLE); | |
11164cd4 PZ |
519 | } |
520 | } | |
521 | intel_pmu_enable_all(added); | |
522 | } | |
523 | ||
f22f54f4 PZ |
524 | static inline u64 intel_pmu_get_status(void) |
525 | { | |
526 | u64 status; | |
527 | ||
528 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); | |
529 | ||
530 | return status; | |
531 | } | |
532 | ||
533 | static inline void intel_pmu_ack_status(u64 ack) | |
534 | { | |
535 | wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); | |
536 | } | |
537 | ||
ca037701 | 538 | static void intel_pmu_disable_fixed(struct hw_perf_event *hwc) |
f22f54f4 | 539 | { |
aff3d91a | 540 | int idx = hwc->idx - X86_PMC_IDX_FIXED; |
f22f54f4 PZ |
541 | u64 ctrl_val, mask; |
542 | ||
543 | mask = 0xfULL << (idx * 4); | |
544 | ||
545 | rdmsrl(hwc->config_base, ctrl_val); | |
546 | ctrl_val &= ~mask; | |
7645a24c | 547 | wrmsrl(hwc->config_base, ctrl_val); |
f22f54f4 PZ |
548 | } |
549 | ||
ca037701 | 550 | static void intel_pmu_disable_event(struct perf_event *event) |
f22f54f4 | 551 | { |
aff3d91a PZ |
552 | struct hw_perf_event *hwc = &event->hw; |
553 | ||
554 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | |
f22f54f4 PZ |
555 | intel_pmu_disable_bts(); |
556 | intel_pmu_drain_bts_buffer(); | |
557 | return; | |
558 | } | |
559 | ||
560 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | |
aff3d91a | 561 | intel_pmu_disable_fixed(hwc); |
f22f54f4 PZ |
562 | return; |
563 | } | |
564 | ||
aff3d91a | 565 | x86_pmu_disable_event(event); |
ca037701 | 566 | |
ab608344 | 567 | if (unlikely(event->attr.precise_ip)) |
ef21f683 | 568 | intel_pmu_pebs_disable(event); |
f22f54f4 PZ |
569 | } |
570 | ||
ca037701 | 571 | static void intel_pmu_enable_fixed(struct hw_perf_event *hwc) |
f22f54f4 | 572 | { |
aff3d91a | 573 | int idx = hwc->idx - X86_PMC_IDX_FIXED; |
f22f54f4 | 574 | u64 ctrl_val, bits, mask; |
f22f54f4 PZ |
575 | |
576 | /* | |
577 | * Enable IRQ generation (0x8), | |
578 | * and enable ring-3 counting (0x2) and ring-0 counting (0x1) | |
579 | * if requested: | |
580 | */ | |
581 | bits = 0x8ULL; | |
582 | if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) | |
583 | bits |= 0x2; | |
584 | if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) | |
585 | bits |= 0x1; | |
586 | ||
587 | /* | |
588 | * ANY bit is supported in v3 and up | |
589 | */ | |
590 | if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) | |
591 | bits |= 0x4; | |
592 | ||
593 | bits <<= (idx * 4); | |
594 | mask = 0xfULL << (idx * 4); | |
595 | ||
596 | rdmsrl(hwc->config_base, ctrl_val); | |
597 | ctrl_val &= ~mask; | |
598 | ctrl_val |= bits; | |
7645a24c | 599 | wrmsrl(hwc->config_base, ctrl_val); |
f22f54f4 PZ |
600 | } |
601 | ||
aff3d91a | 602 | static void intel_pmu_enable_event(struct perf_event *event) |
f22f54f4 | 603 | { |
aff3d91a PZ |
604 | struct hw_perf_event *hwc = &event->hw; |
605 | ||
606 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | |
f22f54f4 PZ |
607 | if (!__get_cpu_var(cpu_hw_events).enabled) |
608 | return; | |
609 | ||
610 | intel_pmu_enable_bts(hwc->config); | |
611 | return; | |
612 | } | |
613 | ||
614 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | |
aff3d91a | 615 | intel_pmu_enable_fixed(hwc); |
f22f54f4 PZ |
616 | return; |
617 | } | |
618 | ||
ab608344 | 619 | if (unlikely(event->attr.precise_ip)) |
ef21f683 | 620 | intel_pmu_pebs_enable(event); |
ca037701 | 621 | |
31fa58af | 622 | __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); |
f22f54f4 PZ |
623 | } |
624 | ||
625 | /* | |
626 | * Save and restart an expired event. Called by NMI contexts, | |
627 | * so it has to be careful about preempting normal event ops: | |
628 | */ | |
629 | static int intel_pmu_save_and_restart(struct perf_event *event) | |
630 | { | |
cc2ad4ba PZ |
631 | x86_perf_event_update(event); |
632 | return x86_perf_event_set_period(event); | |
f22f54f4 PZ |
633 | } |
634 | ||
635 | static void intel_pmu_reset(void) | |
636 | { | |
637 | struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds; | |
638 | unsigned long flags; | |
639 | int idx; | |
640 | ||
948b1bb8 | 641 | if (!x86_pmu.num_counters) |
f22f54f4 PZ |
642 | return; |
643 | ||
644 | local_irq_save(flags); | |
645 | ||
646 | printk("clearing PMU state on CPU#%d\n", smp_processor_id()); | |
647 | ||
948b1bb8 | 648 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
f22f54f4 PZ |
649 | checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); |
650 | checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); | |
651 | } | |
948b1bb8 | 652 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) |
f22f54f4 | 653 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); |
948b1bb8 | 654 | |
f22f54f4 PZ |
655 | if (ds) |
656 | ds->bts_index = ds->bts_buffer_base; | |
657 | ||
658 | local_irq_restore(flags); | |
659 | } | |
660 | ||
661 | /* | |
662 | * This handler is triggered by the local APIC, so the APIC IRQ handling | |
663 | * rules apply: | |
664 | */ | |
665 | static int intel_pmu_handle_irq(struct pt_regs *regs) | |
666 | { | |
667 | struct perf_sample_data data; | |
668 | struct cpu_hw_events *cpuc; | |
669 | int bit, loops; | |
670 | u64 ack, status; | |
671 | ||
dc1d628a | 672 | perf_sample_data_init(&data, 0); |
f22f54f4 PZ |
673 | |
674 | cpuc = &__get_cpu_var(cpu_hw_events); | |
675 | ||
3fb2b8dd | 676 | intel_pmu_disable_all(); |
f22f54f4 PZ |
677 | intel_pmu_drain_bts_buffer(); |
678 | status = intel_pmu_get_status(); | |
679 | if (!status) { | |
11164cd4 | 680 | intel_pmu_enable_all(0); |
f22f54f4 PZ |
681 | return 0; |
682 | } | |
683 | ||
684 | loops = 0; | |
685 | again: | |
686 | if (++loops > 100) { | |
687 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); | |
688 | perf_event_print_debug(); | |
689 | intel_pmu_reset(); | |
3fb2b8dd | 690 | goto done; |
f22f54f4 PZ |
691 | } |
692 | ||
693 | inc_irq_stat(apic_perf_irqs); | |
694 | ack = status; | |
ca037701 | 695 | |
caff2bef PZ |
696 | intel_pmu_lbr_read(); |
697 | ||
ca037701 PZ |
698 | /* |
699 | * PEBS overflow sets bit 62 in the global status register | |
700 | */ | |
701 | if (__test_and_clear_bit(62, (unsigned long *)&status)) | |
702 | x86_pmu.drain_pebs(regs); | |
703 | ||
984b3f57 | 704 | for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { |
f22f54f4 PZ |
705 | struct perf_event *event = cpuc->events[bit]; |
706 | ||
f22f54f4 PZ |
707 | if (!test_bit(bit, cpuc->active_mask)) |
708 | continue; | |
709 | ||
710 | if (!intel_pmu_save_and_restart(event)) | |
711 | continue; | |
712 | ||
713 | data.period = event->hw.last_period; | |
714 | ||
715 | if (perf_event_overflow(event, 1, &data, regs)) | |
71e2d282 | 716 | x86_pmu_stop(event); |
f22f54f4 PZ |
717 | } |
718 | ||
719 | intel_pmu_ack_status(ack); | |
720 | ||
721 | /* | |
722 | * Repeat if there is more work to be done: | |
723 | */ | |
724 | status = intel_pmu_get_status(); | |
725 | if (status) | |
726 | goto again; | |
727 | ||
3fb2b8dd | 728 | done: |
11164cd4 | 729 | intel_pmu_enable_all(0); |
f22f54f4 PZ |
730 | return 1; |
731 | } | |
732 | ||
f22f54f4 | 733 | static struct event_constraint * |
ca037701 | 734 | intel_bts_constraints(struct perf_event *event) |
f22f54f4 | 735 | { |
ca037701 PZ |
736 | struct hw_perf_event *hwc = &event->hw; |
737 | unsigned int hw_event, bts_event; | |
f22f54f4 | 738 | |
ca037701 PZ |
739 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; |
740 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); | |
f22f54f4 | 741 | |
ca037701 | 742 | if (unlikely(hw_event == bts_event && hwc->sample_period == 1)) |
f22f54f4 | 743 | return &bts_constraint; |
ca037701 | 744 | |
f22f54f4 PZ |
745 | return NULL; |
746 | } | |
747 | ||
748 | static struct event_constraint * | |
749 | intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | |
750 | { | |
751 | struct event_constraint *c; | |
752 | ||
ca037701 PZ |
753 | c = intel_bts_constraints(event); |
754 | if (c) | |
755 | return c; | |
756 | ||
757 | c = intel_pebs_constraints(event); | |
f22f54f4 PZ |
758 | if (c) |
759 | return c; | |
760 | ||
761 | return x86_get_event_constraints(cpuc, event); | |
762 | } | |
763 | ||
b4cdc5c2 PZ |
764 | static int intel_pmu_hw_config(struct perf_event *event) |
765 | { | |
766 | int ret = x86_pmu_hw_config(event); | |
767 | ||
768 | if (ret) | |
769 | return ret; | |
770 | ||
771 | if (event->attr.type != PERF_TYPE_RAW) | |
772 | return 0; | |
773 | ||
774 | if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY)) | |
775 | return 0; | |
776 | ||
777 | if (x86_pmu.version < 3) | |
778 | return -EINVAL; | |
779 | ||
780 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | |
781 | return -EACCES; | |
782 | ||
783 | event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY; | |
784 | ||
785 | return 0; | |
786 | } | |
787 | ||
caaa8be3 | 788 | static __initconst const struct x86_pmu core_pmu = { |
f22f54f4 PZ |
789 | .name = "core", |
790 | .handle_irq = x86_pmu_handle_irq, | |
791 | .disable_all = x86_pmu_disable_all, | |
792 | .enable_all = x86_pmu_enable_all, | |
793 | .enable = x86_pmu_enable_event, | |
794 | .disable = x86_pmu_disable_event, | |
b4cdc5c2 | 795 | .hw_config = x86_pmu_hw_config, |
a072738e | 796 | .schedule_events = x86_schedule_events, |
f22f54f4 PZ |
797 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, |
798 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | |
799 | .event_map = intel_pmu_event_map, | |
f22f54f4 PZ |
800 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), |
801 | .apic = 1, | |
802 | /* | |
803 | * Intel PMCs cannot be accessed sanely above 32 bit width, | |
804 | * so we install an artificial 1<<31 period regardless of | |
805 | * the generic event period: | |
806 | */ | |
807 | .max_period = (1ULL << 31) - 1, | |
808 | .get_event_constraints = intel_get_event_constraints, | |
809 | .event_constraints = intel_core_event_constraints, | |
810 | }; | |
811 | ||
74846d35 PZ |
812 | static void intel_pmu_cpu_starting(int cpu) |
813 | { | |
814 | init_debug_store_on_cpu(cpu); | |
815 | /* | |
816 | * Deal with CPUs that don't clear their LBRs on power-up. | |
817 | */ | |
818 | intel_pmu_lbr_reset(); | |
819 | } | |
820 | ||
821 | static void intel_pmu_cpu_dying(int cpu) | |
822 | { | |
823 | fini_debug_store_on_cpu(cpu); | |
824 | } | |
825 | ||
caaa8be3 | 826 | static __initconst const struct x86_pmu intel_pmu = { |
f22f54f4 PZ |
827 | .name = "Intel", |
828 | .handle_irq = intel_pmu_handle_irq, | |
829 | .disable_all = intel_pmu_disable_all, | |
830 | .enable_all = intel_pmu_enable_all, | |
831 | .enable = intel_pmu_enable_event, | |
832 | .disable = intel_pmu_disable_event, | |
b4cdc5c2 | 833 | .hw_config = intel_pmu_hw_config, |
a072738e | 834 | .schedule_events = x86_schedule_events, |
f22f54f4 PZ |
835 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, |
836 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | |
837 | .event_map = intel_pmu_event_map, | |
f22f54f4 PZ |
838 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), |
839 | .apic = 1, | |
840 | /* | |
841 | * Intel PMCs cannot be accessed sanely above 32 bit width, | |
842 | * so we install an artificial 1<<31 period regardless of | |
843 | * the generic event period: | |
844 | */ | |
845 | .max_period = (1ULL << 31) - 1, | |
3f6da390 PZ |
846 | .get_event_constraints = intel_get_event_constraints, |
847 | ||
74846d35 PZ |
848 | .cpu_starting = intel_pmu_cpu_starting, |
849 | .cpu_dying = intel_pmu_cpu_dying, | |
f22f54f4 PZ |
850 | }; |
851 | ||
3c44780b PZ |
852 | static void intel_clovertown_quirks(void) |
853 | { | |
854 | /* | |
855 | * PEBS is unreliable due to: | |
856 | * | |
857 | * AJ67 - PEBS may experience CPL leaks | |
858 | * AJ68 - PEBS PMI may be delayed by one event | |
859 | * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12] | |
860 | * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS | |
861 | * | |
862 | * AJ67 could be worked around by restricting the OS/USR flags. | |
863 | * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI. | |
864 | * | |
865 | * AJ106 could possibly be worked around by not allowing LBR | |
866 | * usage from PEBS, including the fixup. | |
867 | * AJ68 could possibly be worked around by always programming | |
868 | * a pebs_event_reset[0] value and coping with the lost events. | |
869 | * | |
870 | * But taken together it might just make sense to not enable PEBS on | |
871 | * these chips. | |
872 | */ | |
873 | printk(KERN_WARNING "PEBS disabled due to CPU errata.\n"); | |
874 | x86_pmu.pebs = 0; | |
875 | x86_pmu.pebs_constraints = NULL; | |
876 | } | |
877 | ||
f22f54f4 PZ |
878 | static __init int intel_pmu_init(void) |
879 | { | |
880 | union cpuid10_edx edx; | |
881 | union cpuid10_eax eax; | |
882 | unsigned int unused; | |
883 | unsigned int ebx; | |
884 | int version; | |
885 | ||
886 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | |
a072738e CG |
887 | switch (boot_cpu_data.x86) { |
888 | case 0x6: | |
889 | return p6_pmu_init(); | |
890 | case 0xf: | |
891 | return p4_pmu_init(); | |
892 | } | |
f22f54f4 | 893 | return -ENODEV; |
f22f54f4 PZ |
894 | } |
895 | ||
896 | /* | |
897 | * Check whether the Architectural PerfMon supports | |
898 | * Branch Misses Retired hw_event or not. | |
899 | */ | |
900 | cpuid(10, &eax.full, &ebx, &unused, &edx.full); | |
901 | if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) | |
902 | return -ENODEV; | |
903 | ||
904 | version = eax.split.version_id; | |
905 | if (version < 2) | |
906 | x86_pmu = core_pmu; | |
907 | else | |
908 | x86_pmu = intel_pmu; | |
909 | ||
910 | x86_pmu.version = version; | |
948b1bb8 RR |
911 | x86_pmu.num_counters = eax.split.num_counters; |
912 | x86_pmu.cntval_bits = eax.split.bit_width; | |
913 | x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1; | |
f22f54f4 PZ |
914 | |
915 | /* | |
916 | * Quirk: v2 perfmon does not report fixed-purpose events, so | |
917 | * assume at least 3 events: | |
918 | */ | |
919 | if (version > 1) | |
948b1bb8 | 920 | x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); |
f22f54f4 | 921 | |
8db909a7 PZ |
922 | /* |
923 | * v2 and above have a perf capabilities MSR | |
924 | */ | |
925 | if (version > 1) { | |
926 | u64 capabilities; | |
927 | ||
928 | rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities); | |
929 | x86_pmu.intel_cap.capabilities = capabilities; | |
930 | } | |
931 | ||
ca037701 PZ |
932 | intel_ds_init(); |
933 | ||
f22f54f4 PZ |
934 | /* |
935 | * Install the hw-cache-events table: | |
936 | */ | |
937 | switch (boot_cpu_data.x86_model) { | |
938 | case 14: /* 65 nm core solo/duo, "Yonah" */ | |
939 | pr_cont("Core events, "); | |
940 | break; | |
941 | ||
942 | case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ | |
3c44780b | 943 | x86_pmu.quirks = intel_clovertown_quirks; |
f22f54f4 PZ |
944 | case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ |
945 | case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ | |
946 | case 29: /* six-core 45 nm xeon "Dunnington" */ | |
947 | memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, | |
948 | sizeof(hw_cache_event_ids)); | |
949 | ||
caff2bef PZ |
950 | intel_pmu_lbr_init_core(); |
951 | ||
f22f54f4 PZ |
952 | x86_pmu.event_constraints = intel_core2_event_constraints; |
953 | pr_cont("Core2 events, "); | |
954 | break; | |
955 | ||
956 | case 26: /* 45 nm nehalem, "Bloomfield" */ | |
957 | case 30: /* 45 nm nehalem, "Lynnfield" */ | |
134fbadf | 958 | case 46: /* 45 nm nehalem-ex, "Beckton" */ |
f22f54f4 PZ |
959 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, |
960 | sizeof(hw_cache_event_ids)); | |
961 | ||
caff2bef PZ |
962 | intel_pmu_lbr_init_nhm(); |
963 | ||
f22f54f4 | 964 | x86_pmu.event_constraints = intel_nehalem_event_constraints; |
11164cd4 PZ |
965 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; |
966 | pr_cont("Nehalem events, "); | |
f22f54f4 | 967 | break; |
caff2bef | 968 | |
b622d644 | 969 | case 28: /* Atom */ |
f22f54f4 PZ |
970 | memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, |
971 | sizeof(hw_cache_event_ids)); | |
972 | ||
caff2bef PZ |
973 | intel_pmu_lbr_init_atom(); |
974 | ||
f22f54f4 PZ |
975 | x86_pmu.event_constraints = intel_gen_event_constraints; |
976 | pr_cont("Atom events, "); | |
977 | break; | |
978 | ||
979 | case 37: /* 32 nm nehalem, "Clarkdale" */ | |
980 | case 44: /* 32 nm nehalem, "Gulftown" */ | |
981 | memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, | |
982 | sizeof(hw_cache_event_ids)); | |
983 | ||
caff2bef PZ |
984 | intel_pmu_lbr_init_nhm(); |
985 | ||
f22f54f4 | 986 | x86_pmu.event_constraints = intel_westmere_event_constraints; |
40b91cd1 | 987 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; |
f22f54f4 PZ |
988 | pr_cont("Westmere events, "); |
989 | break; | |
b622d644 | 990 | |
f22f54f4 PZ |
991 | default: |
992 | /* | |
993 | * default constraints for v2 and up | |
994 | */ | |
995 | x86_pmu.event_constraints = intel_gen_event_constraints; | |
996 | pr_cont("generic architected perfmon, "); | |
997 | } | |
998 | return 0; | |
999 | } | |
1000 | ||
1001 | #else /* CONFIG_CPU_SUP_INTEL */ | |
1002 | ||
1003 | static int intel_pmu_init(void) | |
1004 | { | |
1005 | return 0; | |
1006 | } | |
1007 | ||
1008 | #endif /* CONFIG_CPU_SUP_INTEL */ |