Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
46866b59 | 2 | /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */ |
92553e40 | 3 | #include "uncore.h" |
92807ffd | 4 | |
0140e614 | 5 | /* Uncore IMC PCI IDs */ |
76a16b21 GK |
6 | #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100 |
7 | #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154 | |
8 | #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150 | |
9 | #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 | |
10 | #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 | |
11 | #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 | |
12 | #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904 | |
13 | #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c | |
14 | #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900 | |
15 | #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 | |
16 | #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f | |
17 | #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f | |
e7438304 | 18 | #define PCI_DEVICE_ID_INTEL_SKL_E3_IMC 0x1918 |
76a16b21 GK |
19 | #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c |
20 | #define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904 | |
21 | #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914 | |
22 | #define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f | |
23 | #define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f | |
6e86d3db GK |
24 | #define PCI_DEVICE_ID_INTEL_KBL_HQ_IMC 0x5910 |
25 | #define PCI_DEVICE_ID_INTEL_KBL_WQ_IMC 0x5918 | |
76a16b21 GK |
26 | #define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc |
27 | #define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0 | |
28 | #define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10 | |
29 | #define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4 | |
c10a8de0 KL |
30 | #define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f |
31 | #define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f | |
32 | #define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2 | |
33 | #define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30 | |
34 | #define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18 | |
35 | #define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6 | |
36 | #define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31 | |
37 | #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33 | |
38 | #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca | |
39 | #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32 | |
6e86d3db GK |
40 | #define PCI_DEVICE_ID_INTEL_AML_YD_IMC 0x590c |
41 | #define PCI_DEVICE_ID_INTEL_AML_YQ_IMC 0x590d | |
42 | #define PCI_DEVICE_ID_INTEL_WHL_UQ_IMC 0x3ed0 | |
43 | #define PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC 0x3e34 | |
44 | #define PCI_DEVICE_ID_INTEL_WHL_UD_IMC 0x3e35 | |
bb85429a KL |
45 | #define PCI_DEVICE_ID_INTEL_CML_H1_IMC 0x9b44 |
46 | #define PCI_DEVICE_ID_INTEL_CML_H2_IMC 0x9b54 | |
47 | #define PCI_DEVICE_ID_INTEL_CML_H3_IMC 0x9b64 | |
48 | #define PCI_DEVICE_ID_INTEL_CML_U1_IMC 0x9b51 | |
49 | #define PCI_DEVICE_ID_INTEL_CML_U2_IMC 0x9b61 | |
50 | #define PCI_DEVICE_ID_INTEL_CML_U3_IMC 0x9b71 | |
51 | #define PCI_DEVICE_ID_INTEL_CML_S1_IMC 0x9b33 | |
52 | #define PCI_DEVICE_ID_INTEL_CML_S2_IMC 0x9b43 | |
53 | #define PCI_DEVICE_ID_INTEL_CML_S3_IMC 0x9b53 | |
54 | #define PCI_DEVICE_ID_INTEL_CML_S4_IMC 0x9b63 | |
55 | #define PCI_DEVICE_ID_INTEL_CML_S5_IMC 0x9b73 | |
6e394376 KL |
56 | #define PCI_DEVICE_ID_INTEL_ICL_U_IMC 0x8a02 |
57 | #define PCI_DEVICE_ID_INTEL_ICL_U2_IMC 0x8a12 | |
fdb64822 KL |
58 | #define PCI_DEVICE_ID_INTEL_TGL_U1_IMC 0x9a02 |
59 | #define PCI_DEVICE_ID_INTEL_TGL_U2_IMC 0x9a04 | |
60 | #define PCI_DEVICE_ID_INTEL_TGL_U3_IMC 0x9a12 | |
61 | #define PCI_DEVICE_ID_INTEL_TGL_U4_IMC 0x9a14 | |
62 | #define PCI_DEVICE_ID_INTEL_TGL_H_IMC 0x9a36 | |
80bcffb3 | 63 | |
6e86d3db | 64 | |
92807ffd YZ |
65 | /* SNB event control */ |
66 | #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff | |
67 | #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 | |
68 | #define SNB_UNC_CTL_EDGE_DET (1 << 18) | |
69 | #define SNB_UNC_CTL_EN (1 << 22) | |
70 | #define SNB_UNC_CTL_INVERT (1 << 23) | |
71 | #define SNB_UNC_CTL_CMASK_MASK 0x1f000000 | |
72 | #define NHM_UNC_CTL_CMASK_MASK 0xff000000 | |
73 | #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0) | |
74 | ||
75 | #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ | |
76 | SNB_UNC_CTL_UMASK_MASK | \ | |
77 | SNB_UNC_CTL_EDGE_DET | \ | |
78 | SNB_UNC_CTL_INVERT | \ | |
79 | SNB_UNC_CTL_CMASK_MASK) | |
80 | ||
81 | #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ | |
82 | SNB_UNC_CTL_UMASK_MASK | \ | |
83 | SNB_UNC_CTL_EDGE_DET | \ | |
84 | SNB_UNC_CTL_INVERT | \ | |
85 | NHM_UNC_CTL_CMASK_MASK) | |
86 | ||
87 | /* SNB global control register */ | |
88 | #define SNB_UNC_PERF_GLOBAL_CTL 0x391 | |
89 | #define SNB_UNC_FIXED_CTR_CTRL 0x394 | |
90 | #define SNB_UNC_FIXED_CTR 0x395 | |
91 | ||
92 | /* SNB uncore global control */ | |
93 | #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1) | |
94 | #define SNB_UNC_GLOBAL_CTL_EN (1 << 29) | |
95 | ||
96 | /* SNB Cbo register */ | |
97 | #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700 | |
98 | #define SNB_UNC_CBO_0_PER_CTR0 0x706 | |
99 | #define SNB_UNC_CBO_MSR_OFFSET 0x10 | |
100 | ||
e3a13192 AK |
101 | /* SNB ARB register */ |
102 | #define SNB_UNC_ARB_PER_CTR0 0x3b0 | |
103 | #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2 | |
104 | #define SNB_UNC_ARB_MSR_OFFSET 0x10 | |
105 | ||
92807ffd YZ |
106 | /* NHM global control register */ |
107 | #define NHM_UNC_PERF_GLOBAL_CTL 0x391 | |
108 | #define NHM_UNC_FIXED_CTR 0x394 | |
109 | #define NHM_UNC_FIXED_CTR_CTRL 0x395 | |
110 | ||
111 | /* NHM uncore global control */ | |
112 | #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1) | |
113 | #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) | |
114 | ||
115 | /* NHM uncore register */ | |
116 | #define NHM_UNC_PERFEVTSEL0 0x3c0 | |
117 | #define NHM_UNC_UNCORE_PMC0 0x3b0 | |
118 | ||
46866b59 KL |
119 | /* SKL uncore global control */ |
120 | #define SKL_UNC_PERF_GLOBAL_CTL 0xe01 | |
121 | #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1) | |
122 | ||
6e394376 KL |
123 | /* ICL Cbo register */ |
124 | #define ICL_UNC_CBO_CONFIG 0x396 | |
125 | #define ICL_UNC_NUM_CBO_MASK 0xf | |
126 | #define ICL_UNC_CBO_0_PER_CTR0 0x702 | |
127 | #define ICL_UNC_CBO_MSR_OFFSET 0x8 | |
128 | ||
92807ffd YZ |
129 | DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); |
130 | DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); | |
131 | DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); | |
132 | DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); | |
133 | DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28"); | |
134 | DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31"); | |
135 | ||
136 | /* Sandy Bridge uncore support */ | |
137 | static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | |
138 | { | |
139 | struct hw_perf_event *hwc = &event->hw; | |
140 | ||
141 | if (hwc->idx < UNCORE_PMC_IDX_FIXED) | |
142 | wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); | |
143 | else | |
144 | wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); | |
145 | } | |
146 | ||
147 | static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) | |
148 | { | |
149 | wrmsrl(event->hw.config_base, 0); | |
150 | } | |
151 | ||
152 | static void snb_uncore_msr_init_box(struct intel_uncore_box *box) | |
153 | { | |
154 | if (box->pmu->pmu_idx == 0) { | |
155 | wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, | |
156 | SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); | |
157 | } | |
158 | } | |
159 | ||
95f3be79 KL |
160 | static void snb_uncore_msr_enable_box(struct intel_uncore_box *box) |
161 | { | |
162 | wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, | |
163 | SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); | |
164 | } | |
165 | ||
a46195f1 TG |
166 | static void snb_uncore_msr_exit_box(struct intel_uncore_box *box) |
167 | { | |
168 | if (box->pmu->pmu_idx == 0) | |
169 | wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0); | |
170 | } | |
171 | ||
92807ffd YZ |
172 | static struct uncore_event_desc snb_uncore_events[] = { |
173 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), | |
174 | { /* end: all zeroes */ }, | |
175 | }; | |
176 | ||
177 | static struct attribute *snb_uncore_formats_attr[] = { | |
178 | &format_attr_event.attr, | |
179 | &format_attr_umask.attr, | |
180 | &format_attr_edge.attr, | |
181 | &format_attr_inv.attr, | |
182 | &format_attr_cmask5.attr, | |
183 | NULL, | |
184 | }; | |
185 | ||
45bd07ad | 186 | static const struct attribute_group snb_uncore_format_group = { |
92807ffd YZ |
187 | .name = "format", |
188 | .attrs = snb_uncore_formats_attr, | |
189 | }; | |
190 | ||
191 | static struct intel_uncore_ops snb_uncore_msr_ops = { | |
192 | .init_box = snb_uncore_msr_init_box, | |
95f3be79 | 193 | .enable_box = snb_uncore_msr_enable_box, |
a46195f1 | 194 | .exit_box = snb_uncore_msr_exit_box, |
92807ffd YZ |
195 | .disable_event = snb_uncore_msr_disable_event, |
196 | .enable_event = snb_uncore_msr_enable_event, | |
197 | .read_counter = uncore_msr_read_counter, | |
198 | }; | |
199 | ||
e3a13192 | 200 | static struct event_constraint snb_uncore_arb_constraints[] = { |
92807ffd YZ |
201 | UNCORE_EVENT_CONSTRAINT(0x80, 0x1), |
202 | UNCORE_EVENT_CONSTRAINT(0x83, 0x1), | |
203 | EVENT_CONSTRAINT_END | |
204 | }; | |
205 | ||
206 | static struct intel_uncore_type snb_uncore_cbox = { | |
207 | .name = "cbox", | |
208 | .num_counters = 2, | |
209 | .num_boxes = 4, | |
210 | .perf_ctr_bits = 44, | |
211 | .fixed_ctr_bits = 48, | |
212 | .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, | |
213 | .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, | |
214 | .fixed_ctr = SNB_UNC_FIXED_CTR, | |
215 | .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, | |
216 | .single_fixed = 1, | |
217 | .event_mask = SNB_UNC_RAW_EVENT_MASK, | |
218 | .msr_offset = SNB_UNC_CBO_MSR_OFFSET, | |
92807ffd YZ |
219 | .ops = &snb_uncore_msr_ops, |
220 | .format_group = &snb_uncore_format_group, | |
221 | .event_descs = snb_uncore_events, | |
222 | }; | |
223 | ||
e3a13192 AK |
224 | static struct intel_uncore_type snb_uncore_arb = { |
225 | .name = "arb", | |
226 | .num_counters = 2, | |
227 | .num_boxes = 1, | |
228 | .perf_ctr_bits = 44, | |
229 | .perf_ctr = SNB_UNC_ARB_PER_CTR0, | |
230 | .event_ctl = SNB_UNC_ARB_PERFEVTSEL0, | |
231 | .event_mask = SNB_UNC_RAW_EVENT_MASK, | |
232 | .msr_offset = SNB_UNC_ARB_MSR_OFFSET, | |
233 | .constraints = snb_uncore_arb_constraints, | |
234 | .ops = &snb_uncore_msr_ops, | |
235 | .format_group = &snb_uncore_format_group, | |
236 | }; | |
237 | ||
92807ffd YZ |
238 | static struct intel_uncore_type *snb_msr_uncores[] = { |
239 | &snb_uncore_cbox, | |
e3a13192 | 240 | &snb_uncore_arb, |
92807ffd YZ |
241 | NULL, |
242 | }; | |
243 | ||
244 | void snb_uncore_cpu_init(void) | |
245 | { | |
246 | uncore_msr_uncores = snb_msr_uncores; | |
247 | if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) | |
248 | snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; | |
249 | } | |
250 | ||
46866b59 KL |
251 | static void skl_uncore_msr_init_box(struct intel_uncore_box *box) |
252 | { | |
253 | if (box->pmu->pmu_idx == 0) { | |
254 | wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, | |
255 | SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); | |
256 | } | |
4d47d640 KL |
257 | |
258 | /* The 8th CBOX has different MSR space */ | |
259 | if (box->pmu->pmu_idx == 7) | |
260 | __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags); | |
46866b59 KL |
261 | } |
262 | ||
95f3be79 KL |
263 | static void skl_uncore_msr_enable_box(struct intel_uncore_box *box) |
264 | { | |
265 | wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, | |
266 | SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); | |
267 | } | |
268 | ||
46866b59 KL |
269 | static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) |
270 | { | |
271 | if (box->pmu->pmu_idx == 0) | |
272 | wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0); | |
273 | } | |
274 | ||
275 | static struct intel_uncore_ops skl_uncore_msr_ops = { | |
276 | .init_box = skl_uncore_msr_init_box, | |
95f3be79 | 277 | .enable_box = skl_uncore_msr_enable_box, |
46866b59 KL |
278 | .exit_box = skl_uncore_msr_exit_box, |
279 | .disable_event = snb_uncore_msr_disable_event, | |
280 | .enable_event = snb_uncore_msr_enable_event, | |
281 | .read_counter = uncore_msr_read_counter, | |
282 | }; | |
283 | ||
284 | static struct intel_uncore_type skl_uncore_cbox = { | |
285 | .name = "cbox", | |
286 | .num_counters = 4, | |
4d47d640 | 287 | .num_boxes = 8, |
46866b59 KL |
288 | .perf_ctr_bits = 44, |
289 | .fixed_ctr_bits = 48, | |
290 | .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, | |
291 | .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, | |
292 | .fixed_ctr = SNB_UNC_FIXED_CTR, | |
293 | .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, | |
294 | .single_fixed = 1, | |
295 | .event_mask = SNB_UNC_RAW_EVENT_MASK, | |
296 | .msr_offset = SNB_UNC_CBO_MSR_OFFSET, | |
297 | .ops = &skl_uncore_msr_ops, | |
298 | .format_group = &snb_uncore_format_group, | |
299 | .event_descs = snb_uncore_events, | |
300 | }; | |
301 | ||
302 | static struct intel_uncore_type *skl_msr_uncores[] = { | |
303 | &skl_uncore_cbox, | |
304 | &snb_uncore_arb, | |
305 | NULL, | |
306 | }; | |
307 | ||
308 | void skl_uncore_cpu_init(void) | |
309 | { | |
310 | uncore_msr_uncores = skl_msr_uncores; | |
311 | if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) | |
312 | skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; | |
313 | snb_uncore_arb.ops = &skl_uncore_msr_ops; | |
314 | } | |
315 | ||
6e394376 KL |
316 | static struct intel_uncore_type icl_uncore_cbox = { |
317 | .name = "cbox", | |
318 | .num_counters = 4, | |
319 | .perf_ctr_bits = 44, | |
320 | .perf_ctr = ICL_UNC_CBO_0_PER_CTR0, | |
321 | .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, | |
322 | .event_mask = SNB_UNC_RAW_EVENT_MASK, | |
323 | .msr_offset = ICL_UNC_CBO_MSR_OFFSET, | |
324 | .ops = &skl_uncore_msr_ops, | |
325 | .format_group = &snb_uncore_format_group, | |
326 | }; | |
327 | ||
328 | static struct uncore_event_desc icl_uncore_events[] = { | |
329 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"), | |
330 | { /* end: all zeroes */ }, | |
331 | }; | |
332 | ||
333 | static struct attribute *icl_uncore_clock_formats_attr[] = { | |
334 | &format_attr_event.attr, | |
335 | NULL, | |
336 | }; | |
337 | ||
338 | static struct attribute_group icl_uncore_clock_format_group = { | |
339 | .name = "format", | |
340 | .attrs = icl_uncore_clock_formats_attr, | |
341 | }; | |
342 | ||
343 | static struct intel_uncore_type icl_uncore_clockbox = { | |
344 | .name = "clock", | |
345 | .num_counters = 1, | |
346 | .num_boxes = 1, | |
347 | .fixed_ctr_bits = 48, | |
348 | .fixed_ctr = SNB_UNC_FIXED_CTR, | |
349 | .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, | |
350 | .single_fixed = 1, | |
351 | .event_mask = SNB_UNC_CTL_EV_SEL_MASK, | |
352 | .format_group = &icl_uncore_clock_format_group, | |
353 | .ops = &skl_uncore_msr_ops, | |
354 | .event_descs = icl_uncore_events, | |
355 | }; | |
356 | ||
357 | static struct intel_uncore_type *icl_msr_uncores[] = { | |
358 | &icl_uncore_cbox, | |
359 | &snb_uncore_arb, | |
360 | &icl_uncore_clockbox, | |
361 | NULL, | |
362 | }; | |
363 | ||
364 | static int icl_get_cbox_num(void) | |
365 | { | |
366 | u64 num_boxes; | |
367 | ||
368 | rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes); | |
369 | ||
370 | return num_boxes & ICL_UNC_NUM_CBO_MASK; | |
371 | } | |
372 | ||
373 | void icl_uncore_cpu_init(void) | |
374 | { | |
375 | uncore_msr_uncores = icl_msr_uncores; | |
376 | icl_uncore_cbox.num_boxes = icl_get_cbox_num(); | |
377 | snb_uncore_arb.ops = &skl_uncore_msr_ops; | |
378 | } | |
379 | ||
92807ffd YZ |
380 | enum { |
381 | SNB_PCI_UNCORE_IMC, | |
382 | }; | |
383 | ||
384 | static struct uncore_event_desc snb_uncore_imc_events[] = { | |
385 | INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"), | |
386 | INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"), | |
387 | INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"), | |
388 | ||
389 | INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"), | |
390 | INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"), | |
391 | INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"), | |
392 | ||
24633d90 VS |
393 | INTEL_UNCORE_EVENT_DESC(gt_requests, "event=0x03"), |
394 | INTEL_UNCORE_EVENT_DESC(gt_requests.scale, "6.103515625e-5"), | |
395 | INTEL_UNCORE_EVENT_DESC(gt_requests.unit, "MiB"), | |
396 | ||
397 | INTEL_UNCORE_EVENT_DESC(ia_requests, "event=0x04"), | |
398 | INTEL_UNCORE_EVENT_DESC(ia_requests.scale, "6.103515625e-5"), | |
399 | INTEL_UNCORE_EVENT_DESC(ia_requests.unit, "MiB"), | |
400 | ||
401 | INTEL_UNCORE_EVENT_DESC(io_requests, "event=0x05"), | |
402 | INTEL_UNCORE_EVENT_DESC(io_requests.scale, "6.103515625e-5"), | |
403 | INTEL_UNCORE_EVENT_DESC(io_requests.unit, "MiB"), | |
404 | ||
92807ffd YZ |
405 | { /* end: all zeroes */ }, |
406 | }; | |
407 | ||
408 | #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff | |
409 | #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48 | |
410 | ||
411 | /* page size multiple covering all config regs */ | |
412 | #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000 | |
413 | ||
414 | #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1 | |
415 | #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050 | |
416 | #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2 | |
417 | #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054 | |
418 | #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE | |
419 | ||
24633d90 VS |
420 | /* BW break down- legacy counters */ |
421 | #define SNB_UNCORE_PCI_IMC_GT_REQUESTS 0x3 | |
422 | #define SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE 0x5040 | |
423 | #define SNB_UNCORE_PCI_IMC_IA_REQUESTS 0x4 | |
424 | #define SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE 0x5044 | |
425 | #define SNB_UNCORE_PCI_IMC_IO_REQUESTS 0x5 | |
426 | #define SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE 0x5048 | |
427 | ||
9aae1780 | 428 | enum perf_snb_uncore_imc_freerunning_types { |
24633d90 VS |
429 | SNB_PCI_UNCORE_IMC_DATA_READS = 0, |
430 | SNB_PCI_UNCORE_IMC_DATA_WRITES, | |
431 | SNB_PCI_UNCORE_IMC_GT_REQUESTS, | |
432 | SNB_PCI_UNCORE_IMC_IA_REQUESTS, | |
433 | SNB_PCI_UNCORE_IMC_IO_REQUESTS, | |
434 | ||
9aae1780 KL |
435 | SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX, |
436 | }; | |
437 | ||
438 | static struct freerunning_counters snb_uncore_imc_freerunning[] = { | |
24633d90 VS |
439 | [SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, |
440 | 0x0, 0x0, 1, 32 }, | |
441 | [SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE, | |
442 | 0x0, 0x0, 1, 32 }, | |
443 | [SNB_PCI_UNCORE_IMC_GT_REQUESTS] = { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE, | |
444 | 0x0, 0x0, 1, 32 }, | |
445 | [SNB_PCI_UNCORE_IMC_IA_REQUESTS] = { SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE, | |
446 | 0x0, 0x0, 1, 32 }, | |
447 | [SNB_PCI_UNCORE_IMC_IO_REQUESTS] = { SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE, | |
448 | 0x0, 0x0, 1, 32 }, | |
9aae1780 KL |
449 | }; |
450 | ||
92807ffd YZ |
451 | static struct attribute *snb_uncore_imc_formats_attr[] = { |
452 | &format_attr_event.attr, | |
453 | NULL, | |
454 | }; | |
455 | ||
45bd07ad | 456 | static const struct attribute_group snb_uncore_imc_format_group = { |
92807ffd YZ |
457 | .name = "format", |
458 | .attrs = snb_uncore_imc_formats_attr, | |
459 | }; | |
460 | ||
461 | static void snb_uncore_imc_init_box(struct intel_uncore_box *box) | |
462 | { | |
1b94d31d | 463 | struct intel_uncore_type *type = box->pmu->type; |
92807ffd YZ |
464 | struct pci_dev *pdev = box->pci_dev; |
465 | int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET; | |
466 | resource_size_t addr; | |
467 | u32 pci_dword; | |
468 | ||
469 | pci_read_config_dword(pdev, where, &pci_dword); | |
470 | addr = pci_dword; | |
471 | ||
472 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | |
473 | pci_read_config_dword(pdev, where + 4, &pci_dword); | |
474 | addr |= ((resource_size_t)pci_dword << 32); | |
475 | #endif | |
476 | ||
477 | addr &= ~(PAGE_SIZE - 1); | |
478 | ||
1b94d31d KL |
479 | box->io_addr = ioremap(addr, type->mmio_map_size); |
480 | if (!box->io_addr) | |
481 | pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); | |
482 | ||
92807ffd YZ |
483 | box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL; |
484 | } | |
485 | ||
486 | static void snb_uncore_imc_enable_box(struct intel_uncore_box *box) | |
487 | {} | |
488 | ||
489 | static void snb_uncore_imc_disable_box(struct intel_uncore_box *box) | |
490 | {} | |
491 | ||
492 | static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event) | |
493 | {} | |
494 | ||
495 | static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) | |
496 | {} | |
497 | ||
92807ffd | 498 | /* |
9aae1780 KL |
499 | * Keep the custom event_init() function compatible with old event |
500 | * encoding for free running counters. | |
92807ffd YZ |
501 | */ |
502 | static int snb_uncore_imc_event_init(struct perf_event *event) | |
503 | { | |
504 | struct intel_uncore_pmu *pmu; | |
505 | struct intel_uncore_box *box; | |
506 | struct hw_perf_event *hwc = &event->hw; | |
507 | u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK; | |
508 | int idx, base; | |
509 | ||
510 | if (event->attr.type != event->pmu->type) | |
511 | return -ENOENT; | |
512 | ||
513 | pmu = uncore_event_to_pmu(event); | |
514 | /* no device found for this pmu */ | |
515 | if (pmu->func_id < 0) | |
516 | return -ENOENT; | |
517 | ||
518 | /* Sampling not supported yet */ | |
519 | if (hwc->sample_period) | |
520 | return -EINVAL; | |
521 | ||
522 | /* unsupported modes and filters */ | |
2ff40250 | 523 | if (event->attr.sample_period) /* no sampling */ |
92807ffd YZ |
524 | return -EINVAL; |
525 | ||
526 | /* | |
527 | * Place all uncore events for a particular physical package | |
528 | * onto a single cpu | |
529 | */ | |
530 | if (event->cpu < 0) | |
531 | return -EINVAL; | |
532 | ||
533 | /* check only supported bits are set */ | |
534 | if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK) | |
535 | return -EINVAL; | |
536 | ||
537 | box = uncore_pmu_to_box(pmu, event->cpu); | |
538 | if (!box || box->cpu < 0) | |
539 | return -EINVAL; | |
540 | ||
541 | event->cpu = box->cpu; | |
1f2569fa | 542 | event->pmu_private = box; |
92807ffd | 543 | |
e64cd6f7 DCC |
544 | event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; |
545 | ||
92807ffd YZ |
546 | event->hw.idx = -1; |
547 | event->hw.last_tag = ~0ULL; | |
548 | event->hw.extra_reg.idx = EXTRA_REG_NONE; | |
549 | event->hw.branch_reg.idx = EXTRA_REG_NONE; | |
550 | /* | |
551 | * check event is known (whitelist, determines counter) | |
552 | */ | |
553 | switch (cfg) { | |
554 | case SNB_UNCORE_PCI_IMC_DATA_READS: | |
555 | base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE; | |
9aae1780 | 556 | idx = UNCORE_PMC_IDX_FREERUNNING; |
92807ffd YZ |
557 | break; |
558 | case SNB_UNCORE_PCI_IMC_DATA_WRITES: | |
559 | base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE; | |
9aae1780 | 560 | idx = UNCORE_PMC_IDX_FREERUNNING; |
92807ffd | 561 | break; |
24633d90 VS |
562 | case SNB_UNCORE_PCI_IMC_GT_REQUESTS: |
563 | base = SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE; | |
564 | idx = UNCORE_PMC_IDX_FREERUNNING; | |
565 | break; | |
566 | case SNB_UNCORE_PCI_IMC_IA_REQUESTS: | |
567 | base = SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE; | |
568 | idx = UNCORE_PMC_IDX_FREERUNNING; | |
569 | break; | |
570 | case SNB_UNCORE_PCI_IMC_IO_REQUESTS: | |
571 | base = SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE; | |
572 | idx = UNCORE_PMC_IDX_FREERUNNING; | |
573 | break; | |
92807ffd YZ |
574 | default: |
575 | return -EINVAL; | |
576 | } | |
577 | ||
578 | /* must be done before validate_group */ | |
579 | event->hw.event_base = base; | |
92807ffd YZ |
580 | event->hw.idx = idx; |
581 | ||
8041ffd3 KL |
582 | /* Convert to standard encoding format for freerunning counters */ |
583 | event->hw.config = ((cfg - 1) << 8) | 0x10ff; | |
584 | ||
92807ffd YZ |
585 | /* no group validation needed, we have free running counters */ |
586 | ||
587 | return 0; | |
588 | } | |
589 | ||
590 | static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event) | |
591 | { | |
592 | return 0; | |
593 | } | |
594 | ||
77af0037 | 595 | int snb_pci2phy_map_init(int devid) |
92807ffd YZ |
596 | { |
597 | struct pci_dev *dev = NULL; | |
712df65c TI |
598 | struct pci2phy_map *map; |
599 | int bus, segment; | |
92807ffd YZ |
600 | |
601 | dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev); | |
602 | if (!dev) | |
603 | return -ENOTTY; | |
604 | ||
605 | bus = dev->bus->number; | |
712df65c TI |
606 | segment = pci_domain_nr(dev->bus); |
607 | ||
608 | raw_spin_lock(&pci2phy_map_lock); | |
609 | map = __find_pci2phy_map(segment); | |
610 | if (!map) { | |
611 | raw_spin_unlock(&pci2phy_map_lock); | |
612 | pci_dev_put(dev); | |
613 | return -ENOMEM; | |
614 | } | |
615 | map->pbus_to_physid[bus] = 0; | |
616 | raw_spin_unlock(&pci2phy_map_lock); | |
92807ffd YZ |
617 | |
618 | pci_dev_put(dev); | |
619 | ||
620 | return 0; | |
621 | } | |
622 | ||
623 | static struct pmu snb_uncore_imc_pmu = { | |
624 | .task_ctx_nr = perf_invalid_context, | |
625 | .event_init = snb_uncore_imc_event_init, | |
9aae1780 KL |
626 | .add = uncore_pmu_event_add, |
627 | .del = uncore_pmu_event_del, | |
628 | .start = uncore_pmu_event_start, | |
629 | .stop = uncore_pmu_event_stop, | |
630 | .read = uncore_pmu_event_read, | |
2ff40250 | 631 | .capabilities = PERF_PMU_CAP_NO_EXCLUDE, |
92807ffd YZ |
632 | }; |
633 | ||
634 | static struct intel_uncore_ops snb_uncore_imc_ops = { | |
635 | .init_box = snb_uncore_imc_init_box, | |
07ce734d | 636 | .exit_box = uncore_mmio_exit_box, |
92807ffd YZ |
637 | .enable_box = snb_uncore_imc_enable_box, |
638 | .disable_box = snb_uncore_imc_disable_box, | |
639 | .disable_event = snb_uncore_imc_disable_event, | |
640 | .enable_event = snb_uncore_imc_enable_event, | |
641 | .hw_config = snb_uncore_imc_hw_config, | |
07ce734d | 642 | .read_counter = uncore_mmio_read_counter, |
92807ffd YZ |
643 | }; |
644 | ||
645 | static struct intel_uncore_type snb_uncore_imc = { | |
646 | .name = "imc", | |
24633d90 | 647 | .num_counters = 5, |
92807ffd | 648 | .num_boxes = 1, |
9aae1780 | 649 | .num_freerunning_types = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX, |
1b94d31d | 650 | .mmio_map_size = SNB_UNCORE_PCI_IMC_MAP_SIZE, |
9aae1780 | 651 | .freerunning = snb_uncore_imc_freerunning, |
92807ffd YZ |
652 | .event_descs = snb_uncore_imc_events, |
653 | .format_group = &snb_uncore_imc_format_group, | |
92807ffd YZ |
654 | .ops = &snb_uncore_imc_ops, |
655 | .pmu = &snb_uncore_imc_pmu, | |
656 | }; | |
657 | ||
658 | static struct intel_uncore_type *snb_pci_uncores[] = { | |
659 | [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc, | |
660 | NULL, | |
661 | }; | |
662 | ||
83bc90e1 | 663 | static const struct pci_device_id snb_uncore_pci_ids[] = { |
92807ffd YZ |
664 | { /* IMC */ |
665 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC), | |
666 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
667 | }, | |
668 | { /* end: all zeroes */ }, | |
669 | }; | |
670 | ||
83bc90e1 | 671 | static const struct pci_device_id ivb_uncore_pci_ids[] = { |
92807ffd YZ |
672 | { /* IMC */ |
673 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC), | |
674 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
675 | }, | |
521e8bac SE |
676 | { /* IMC */ |
677 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC), | |
678 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
679 | }, | |
92807ffd YZ |
680 | { /* end: all zeroes */ }, |
681 | }; | |
682 | ||
83bc90e1 | 683 | static const struct pci_device_id hsw_uncore_pci_ids[] = { |
92807ffd YZ |
684 | { /* IMC */ |
685 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC), | |
686 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
687 | }, | |
80bcffb3 SR |
688 | { /* IMC */ |
689 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC), | |
690 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
691 | }, | |
92807ffd YZ |
692 | { /* end: all zeroes */ }, |
693 | }; | |
694 | ||
a41f3c8c SE |
695 | static const struct pci_device_id bdw_uncore_pci_ids[] = { |
696 | { /* IMC */ | |
697 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC), | |
698 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
699 | }, | |
700 | { /* end: all zeroes */ }, | |
701 | }; | |
702 | ||
0e1eb0a1 SE |
703 | static const struct pci_device_id skl_uncore_pci_ids[] = { |
704 | { /* IMC */ | |
d786810b | 705 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC), |
0e1eb0a1 SE |
706 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), |
707 | }, | |
46866b59 KL |
708 | { /* IMC */ |
709 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC), | |
710 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
711 | }, | |
d786810b KL |
712 | { /* IMC */ |
713 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC), | |
714 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
715 | }, | |
716 | { /* IMC */ | |
717 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC), | |
718 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
719 | }, | |
720 | { /* IMC */ | |
721 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC), | |
722 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
723 | }, | |
724 | { /* IMC */ | |
725 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC), | |
726 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
727 | }, | |
e7438304 KL |
728 | { /* IMC */ |
729 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_E3_IMC), | |
730 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
731 | }, | |
c10a8de0 KL |
732 | { /* IMC */ |
733 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC), | |
734 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
735 | }, | |
736 | { /* IMC */ | |
737 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC), | |
738 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
739 | }, | |
740 | { /* IMC */ | |
741 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC), | |
742 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
743 | }, | |
744 | { /* IMC */ | |
745 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC), | |
746 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
747 | }, | |
748 | { /* IMC */ | |
749 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC), | |
750 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
751 | }, | |
6e86d3db GK |
752 | { /* IMC */ |
753 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_HQ_IMC), | |
754 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
755 | }, | |
756 | { /* IMC */ | |
757 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_WQ_IMC), | |
758 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
759 | }, | |
c10a8de0 KL |
760 | { /* IMC */ |
761 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC), | |
762 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
763 | }, | |
764 | { /* IMC */ | |
765 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC), | |
766 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
767 | }, | |
768 | { /* IMC */ | |
769 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC), | |
770 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
771 | }, | |
772 | { /* IMC */ | |
773 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC), | |
774 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
775 | }, | |
776 | { /* IMC */ | |
777 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC), | |
778 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
779 | }, | |
780 | { /* IMC */ | |
781 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC), | |
782 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
783 | }, | |
784 | { /* IMC */ | |
785 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC), | |
786 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
787 | }, | |
788 | { /* IMC */ | |
789 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC), | |
790 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
791 | }, | |
792 | { /* IMC */ | |
793 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC), | |
794 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
795 | }, | |
796 | { /* IMC */ | |
797 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC), | |
798 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
799 | }, | |
800 | { /* IMC */ | |
801 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC), | |
802 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
803 | }, | |
804 | { /* IMC */ | |
805 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC), | |
806 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
807 | }, | |
808 | { /* IMC */ | |
809 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC), | |
810 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
811 | }, | |
812 | { /* IMC */ | |
813 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC), | |
814 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
815 | }, | |
6e86d3db GK |
816 | { /* IMC */ |
817 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YD_IMC), | |
818 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
819 | }, | |
820 | { /* IMC */ | |
821 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YQ_IMC), | |
822 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
823 | }, | |
824 | { /* IMC */ | |
825 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UQ_IMC), | |
826 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
827 | }, | |
828 | { /* IMC */ | |
829 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC), | |
830 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
831 | }, | |
832 | { /* IMC */ | |
833 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UD_IMC), | |
834 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
835 | }, | |
bb85429a KL |
836 | { /* IMC */ |
837 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H1_IMC), | |
838 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
839 | }, | |
840 | { /* IMC */ | |
841 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H2_IMC), | |
842 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
843 | }, | |
844 | { /* IMC */ | |
845 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H3_IMC), | |
846 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
847 | }, | |
848 | { /* IMC */ | |
849 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U1_IMC), | |
850 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
851 | }, | |
852 | { /* IMC */ | |
853 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U2_IMC), | |
854 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
855 | }, | |
856 | { /* IMC */ | |
857 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U3_IMC), | |
858 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
859 | }, | |
860 | { /* IMC */ | |
861 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S1_IMC), | |
862 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
863 | }, | |
864 | { /* IMC */ | |
865 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S2_IMC), | |
866 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
867 | }, | |
868 | { /* IMC */ | |
869 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S3_IMC), | |
870 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
871 | }, | |
872 | { /* IMC */ | |
873 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S4_IMC), | |
874 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
875 | }, | |
876 | { /* IMC */ | |
877 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S5_IMC), | |
878 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
879 | }, | |
0e1eb0a1 SE |
880 | { /* end: all zeroes */ }, |
881 | }; | |
882 | ||
6e394376 KL |
883 | static const struct pci_device_id icl_uncore_pci_ids[] = { |
884 | { /* IMC */ | |
885 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U_IMC), | |
886 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
887 | }, | |
888 | { /* IMC */ | |
889 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U2_IMC), | |
890 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
891 | }, | |
892 | { /* end: all zeroes */ }, | |
893 | }; | |
894 | ||
92807ffd YZ |
895 | static struct pci_driver snb_uncore_pci_driver = { |
896 | .name = "snb_uncore", | |
897 | .id_table = snb_uncore_pci_ids, | |
898 | }; | |
899 | ||
900 | static struct pci_driver ivb_uncore_pci_driver = { | |
901 | .name = "ivb_uncore", | |
902 | .id_table = ivb_uncore_pci_ids, | |
903 | }; | |
904 | ||
905 | static struct pci_driver hsw_uncore_pci_driver = { | |
906 | .name = "hsw_uncore", | |
907 | .id_table = hsw_uncore_pci_ids, | |
908 | }; | |
909 | ||
a41f3c8c SE |
910 | static struct pci_driver bdw_uncore_pci_driver = { |
911 | .name = "bdw_uncore", | |
912 | .id_table = bdw_uncore_pci_ids, | |
913 | }; | |
914 | ||
0e1eb0a1 SE |
915 | static struct pci_driver skl_uncore_pci_driver = { |
916 | .name = "skl_uncore", | |
917 | .id_table = skl_uncore_pci_ids, | |
918 | }; | |
919 | ||
6e394376 KL |
920 | static struct pci_driver icl_uncore_pci_driver = { |
921 | .name = "icl_uncore", | |
922 | .id_table = icl_uncore_pci_ids, | |
923 | }; | |
924 | ||
521e8bac SE |
925 | struct imc_uncore_pci_dev { |
926 | __u32 pci_id; | |
927 | struct pci_driver *driver; | |
928 | }; | |
929 | #define IMC_DEV(a, d) \ | |
930 | { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) } | |
931 | ||
932 | static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { | |
933 | IMC_DEV(SNB_IMC, &snb_uncore_pci_driver), | |
934 | IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */ | |
935 | IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */ | |
936 | IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ | |
80bcffb3 | 937 | IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ |
a41f3c8c | 938 | IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ |
d786810b | 939 | IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */ |
46866b59 | 940 | IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */ |
d786810b KL |
941 | IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */ |
942 | IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ | |
943 | IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ | |
944 | IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ | |
e7438304 | 945 | IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver), /* Xeon E3 V5 Gen Core processor */ |
c10a8de0 KL |
946 | IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */ |
947 | IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */ | |
948 | IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */ | |
949 | IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */ | |
950 | IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */ | |
6e86d3db GK |
951 | IMC_DEV(KBL_HQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core H Quad Core */ |
952 | IMC_DEV(KBL_WQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S 4 cores Work Station */ | |
c10a8de0 KL |
953 | IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */ |
954 | IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */ | |
955 | IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */ | |
956 | IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */ | |
957 | IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */ | |
958 | IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */ | |
959 | IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */ | |
960 | IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */ | |
961 | IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */ | |
962 | IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */ | |
963 | IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */ | |
964 | IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */ | |
965 | IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */ | |
966 | IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */ | |
6e86d3db GK |
967 | IMC_DEV(AML_YD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core Y Mobile Dual Core */ |
968 | IMC_DEV(AML_YQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core Y Mobile Quad Core */ | |
969 | IMC_DEV(WHL_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */ | |
970 | IMC_DEV(WHL_4_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */ | |
971 | IMC_DEV(WHL_UD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Dual Core */ | |
bb85429a KL |
972 | IMC_DEV(CML_H1_IMC, &skl_uncore_pci_driver), |
973 | IMC_DEV(CML_H2_IMC, &skl_uncore_pci_driver), | |
974 | IMC_DEV(CML_H3_IMC, &skl_uncore_pci_driver), | |
975 | IMC_DEV(CML_U1_IMC, &skl_uncore_pci_driver), | |
976 | IMC_DEV(CML_U2_IMC, &skl_uncore_pci_driver), | |
977 | IMC_DEV(CML_U3_IMC, &skl_uncore_pci_driver), | |
978 | IMC_DEV(CML_S1_IMC, &skl_uncore_pci_driver), | |
979 | IMC_DEV(CML_S2_IMC, &skl_uncore_pci_driver), | |
980 | IMC_DEV(CML_S3_IMC, &skl_uncore_pci_driver), | |
981 | IMC_DEV(CML_S4_IMC, &skl_uncore_pci_driver), | |
982 | IMC_DEV(CML_S5_IMC, &skl_uncore_pci_driver), | |
6e394376 KL |
983 | IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */ |
984 | IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */ | |
521e8bac SE |
985 | { /* end marker */ } |
986 | }; | |
987 | ||
988 | ||
989 | #define for_each_imc_pci_id(x, t) \ | |
990 | for (x = (t); (x)->pci_id; x++) | |
991 | ||
992 | static struct pci_driver *imc_uncore_find_dev(void) | |
92807ffd | 993 | { |
521e8bac SE |
994 | const struct imc_uncore_pci_dev *p; |
995 | int ret; | |
996 | ||
997 | for_each_imc_pci_id(p, desktop_imc_pci_ids) { | |
998 | ret = snb_pci2phy_map_init(p->pci_id); | |
999 | if (ret == 0) | |
1000 | return p->driver; | |
1001 | } | |
1002 | return NULL; | |
92807ffd YZ |
1003 | } |
1004 | ||
521e8bac | 1005 | static int imc_uncore_pci_init(void) |
92807ffd | 1006 | { |
521e8bac SE |
1007 | struct pci_driver *imc_drv = imc_uncore_find_dev(); |
1008 | ||
1009 | if (!imc_drv) | |
1010 | return -ENODEV; | |
1011 | ||
92807ffd | 1012 | uncore_pci_uncores = snb_pci_uncores; |
521e8bac SE |
1013 | uncore_pci_driver = imc_drv; |
1014 | ||
92807ffd YZ |
1015 | return 0; |
1016 | } | |
1017 | ||
521e8bac SE |
1018 | int snb_uncore_pci_init(void) |
1019 | { | |
1020 | return imc_uncore_pci_init(); | |
1021 | } | |
1022 | ||
1023 | int ivb_uncore_pci_init(void) | |
1024 | { | |
1025 | return imc_uncore_pci_init(); | |
1026 | } | |
92807ffd YZ |
1027 | int hsw_uncore_pci_init(void) |
1028 | { | |
521e8bac | 1029 | return imc_uncore_pci_init(); |
92807ffd YZ |
1030 | } |
1031 | ||
a41f3c8c SE |
1032 | int bdw_uncore_pci_init(void) |
1033 | { | |
1034 | return imc_uncore_pci_init(); | |
1035 | } | |
1036 | ||
0e1eb0a1 SE |
1037 | int skl_uncore_pci_init(void) |
1038 | { | |
1039 | return imc_uncore_pci_init(); | |
1040 | } | |
1041 | ||
92807ffd YZ |
1042 | /* end of Sandy Bridge uncore support */ |
1043 | ||
1044 | /* Nehalem uncore support */ | |
1045 | static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) | |
1046 | { | |
1047 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0); | |
1048 | } | |
1049 | ||
1050 | static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) | |
1051 | { | |
1052 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); | |
1053 | } | |
1054 | ||
1055 | static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | |
1056 | { | |
1057 | struct hw_perf_event *hwc = &event->hw; | |
1058 | ||
1059 | if (hwc->idx < UNCORE_PMC_IDX_FIXED) | |
1060 | wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); | |
1061 | else | |
1062 | wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); | |
1063 | } | |
1064 | ||
1065 | static struct attribute *nhm_uncore_formats_attr[] = { | |
1066 | &format_attr_event.attr, | |
1067 | &format_attr_umask.attr, | |
1068 | &format_attr_edge.attr, | |
1069 | &format_attr_inv.attr, | |
1070 | &format_attr_cmask8.attr, | |
1071 | NULL, | |
1072 | }; | |
1073 | ||
45bd07ad | 1074 | static const struct attribute_group nhm_uncore_format_group = { |
92807ffd YZ |
1075 | .name = "format", |
1076 | .attrs = nhm_uncore_formats_attr, | |
1077 | }; | |
1078 | ||
1079 | static struct uncore_event_desc nhm_uncore_events[] = { | |
1080 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), | |
1081 | INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"), | |
1082 | INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"), | |
1083 | INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"), | |
1084 | INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"), | |
1085 | INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"), | |
1086 | INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"), | |
1087 | INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"), | |
1088 | INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"), | |
1089 | { /* end: all zeroes */ }, | |
1090 | }; | |
1091 | ||
1092 | static struct intel_uncore_ops nhm_uncore_msr_ops = { | |
1093 | .disable_box = nhm_uncore_msr_disable_box, | |
1094 | .enable_box = nhm_uncore_msr_enable_box, | |
1095 | .disable_event = snb_uncore_msr_disable_event, | |
1096 | .enable_event = nhm_uncore_msr_enable_event, | |
1097 | .read_counter = uncore_msr_read_counter, | |
1098 | }; | |
1099 | ||
1100 | static struct intel_uncore_type nhm_uncore = { | |
1101 | .name = "", | |
1102 | .num_counters = 8, | |
1103 | .num_boxes = 1, | |
1104 | .perf_ctr_bits = 48, | |
1105 | .fixed_ctr_bits = 48, | |
1106 | .event_ctl = NHM_UNC_PERFEVTSEL0, | |
1107 | .perf_ctr = NHM_UNC_UNCORE_PMC0, | |
1108 | .fixed_ctr = NHM_UNC_FIXED_CTR, | |
1109 | .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL, | |
1110 | .event_mask = NHM_UNC_RAW_EVENT_MASK, | |
1111 | .event_descs = nhm_uncore_events, | |
1112 | .ops = &nhm_uncore_msr_ops, | |
1113 | .format_group = &nhm_uncore_format_group, | |
1114 | }; | |
1115 | ||
1116 | static struct intel_uncore_type *nhm_msr_uncores[] = { | |
1117 | &nhm_uncore, | |
1118 | NULL, | |
1119 | }; | |
1120 | ||
1121 | void nhm_uncore_cpu_init(void) | |
1122 | { | |
1123 | uncore_msr_uncores = nhm_msr_uncores; | |
1124 | } | |
1125 | ||
1126 | /* end of Nehalem uncore support */ | |
fdb64822 KL |
1127 | |
1128 | /* Tiger Lake MMIO uncore support */ | |
1129 | ||
1130 | static const struct pci_device_id tgl_uncore_pci_ids[] = { | |
1131 | { /* IMC */ | |
1132 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U1_IMC), | |
1133 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1134 | }, | |
1135 | { /* IMC */ | |
1136 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U2_IMC), | |
1137 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1138 | }, | |
1139 | { /* IMC */ | |
1140 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U3_IMC), | |
1141 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1142 | }, | |
1143 | { /* IMC */ | |
1144 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U4_IMC), | |
1145 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1146 | }, | |
1147 | { /* IMC */ | |
1148 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_H_IMC), | |
1149 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1150 | }, | |
1151 | { /* end: all zeroes */ } | |
1152 | }; | |
1153 | ||
1154 | enum perf_tgl_uncore_imc_freerunning_types { | |
1155 | TGL_MMIO_UNCORE_IMC_DATA_TOTAL, | |
1156 | TGL_MMIO_UNCORE_IMC_DATA_READ, | |
1157 | TGL_MMIO_UNCORE_IMC_DATA_WRITE, | |
1158 | TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX | |
1159 | }; | |
1160 | ||
1161 | static struct freerunning_counters tgl_l_uncore_imc_freerunning[] = { | |
1162 | [TGL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0x5040, 0x0, 0x0, 1, 64 }, | |
1163 | [TGL_MMIO_UNCORE_IMC_DATA_READ] = { 0x5058, 0x0, 0x0, 1, 64 }, | |
1164 | [TGL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0x50A0, 0x0, 0x0, 1, 64 }, | |
1165 | }; | |
1166 | ||
1167 | static struct freerunning_counters tgl_uncore_imc_freerunning[] = { | |
1168 | [TGL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0xd840, 0x0, 0x0, 1, 64 }, | |
1169 | [TGL_MMIO_UNCORE_IMC_DATA_READ] = { 0xd858, 0x0, 0x0, 1, 64 }, | |
1170 | [TGL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0xd8A0, 0x0, 0x0, 1, 64 }, | |
1171 | }; | |
1172 | ||
1173 | static struct uncore_event_desc tgl_uncore_imc_events[] = { | |
1174 | INTEL_UNCORE_EVENT_DESC(data_total, "event=0xff,umask=0x10"), | |
1175 | INTEL_UNCORE_EVENT_DESC(data_total.scale, "6.103515625e-5"), | |
1176 | INTEL_UNCORE_EVENT_DESC(data_total.unit, "MiB"), | |
1177 | ||
1178 | INTEL_UNCORE_EVENT_DESC(data_read, "event=0xff,umask=0x20"), | |
1179 | INTEL_UNCORE_EVENT_DESC(data_read.scale, "6.103515625e-5"), | |
1180 | INTEL_UNCORE_EVENT_DESC(data_read.unit, "MiB"), | |
1181 | ||
1182 | INTEL_UNCORE_EVENT_DESC(data_write, "event=0xff,umask=0x30"), | |
1183 | INTEL_UNCORE_EVENT_DESC(data_write.scale, "6.103515625e-5"), | |
1184 | INTEL_UNCORE_EVENT_DESC(data_write.unit, "MiB"), | |
1185 | ||
1186 | { /* end: all zeroes */ } | |
1187 | }; | |
1188 | ||
1189 | static struct pci_dev *tgl_uncore_get_mc_dev(void) | |
1190 | { | |
1191 | const struct pci_device_id *ids = tgl_uncore_pci_ids; | |
1192 | struct pci_dev *mc_dev = NULL; | |
1193 | ||
1194 | while (ids && ids->vendor) { | |
1195 | mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, ids->device, NULL); | |
1196 | if (mc_dev) | |
1197 | return mc_dev; | |
1198 | ids++; | |
1199 | } | |
1200 | ||
1201 | return mc_dev; | |
1202 | } | |
1203 | ||
1204 | #define TGL_UNCORE_MMIO_IMC_MEM_OFFSET 0x10000 | |
2af834f1 | 1205 | #define TGL_UNCORE_PCI_IMC_MAP_SIZE 0xe000 |
fdb64822 KL |
1206 | |
1207 | static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) | |
1208 | { | |
1209 | struct pci_dev *pdev = tgl_uncore_get_mc_dev(); | |
1210 | struct intel_uncore_pmu *pmu = box->pmu; | |
1b94d31d | 1211 | struct intel_uncore_type *type = pmu->type; |
fdb64822 KL |
1212 | resource_size_t addr; |
1213 | u32 mch_bar; | |
1214 | ||
1215 | if (!pdev) { | |
1216 | pr_warn("perf uncore: Cannot find matched IMC device.\n"); | |
1217 | return; | |
1218 | } | |
1219 | ||
1220 | pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET, &mch_bar); | |
1221 | /* MCHBAR is disabled */ | |
1222 | if (!(mch_bar & BIT(0))) { | |
1223 | pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n"); | |
1224 | return; | |
1225 | } | |
1226 | mch_bar &= ~BIT(0); | |
1227 | addr = (resource_size_t)(mch_bar + TGL_UNCORE_MMIO_IMC_MEM_OFFSET * pmu->pmu_idx); | |
1228 | ||
1229 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | |
1230 | pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET + 4, &mch_bar); | |
1231 | addr |= ((resource_size_t)mch_bar << 32); | |
1232 | #endif | |
1233 | ||
1b94d31d KL |
1234 | box->io_addr = ioremap(addr, type->mmio_map_size); |
1235 | if (!box->io_addr) | |
1236 | pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); | |
fdb64822 KL |
1237 | } |
1238 | ||
1239 | static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = { | |
1240 | .init_box = tgl_uncore_imc_freerunning_init_box, | |
1241 | .exit_box = uncore_mmio_exit_box, | |
1242 | .read_counter = uncore_mmio_read_counter, | |
1243 | .hw_config = uncore_freerunning_hw_config, | |
1244 | }; | |
1245 | ||
1246 | static struct attribute *tgl_uncore_imc_formats_attr[] = { | |
1247 | &format_attr_event.attr, | |
1248 | &format_attr_umask.attr, | |
1249 | NULL | |
1250 | }; | |
1251 | ||
1252 | static const struct attribute_group tgl_uncore_imc_format_group = { | |
1253 | .name = "format", | |
1254 | .attrs = tgl_uncore_imc_formats_attr, | |
1255 | }; | |
1256 | ||
1257 | static struct intel_uncore_type tgl_uncore_imc_free_running = { | |
1258 | .name = "imc_free_running", | |
1259 | .num_counters = 3, | |
1260 | .num_boxes = 2, | |
1261 | .num_freerunning_types = TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX, | |
1b94d31d | 1262 | .mmio_map_size = TGL_UNCORE_PCI_IMC_MAP_SIZE, |
fdb64822 KL |
1263 | .freerunning = tgl_uncore_imc_freerunning, |
1264 | .ops = &tgl_uncore_imc_freerunning_ops, | |
1265 | .event_descs = tgl_uncore_imc_events, | |
1266 | .format_group = &tgl_uncore_imc_format_group, | |
1267 | }; | |
1268 | ||
1269 | static struct intel_uncore_type *tgl_mmio_uncores[] = { | |
1270 | &tgl_uncore_imc_free_running, | |
1271 | NULL | |
1272 | }; | |
1273 | ||
1274 | void tgl_l_uncore_mmio_init(void) | |
1275 | { | |
1276 | tgl_uncore_imc_free_running.freerunning = tgl_l_uncore_imc_freerunning; | |
1277 | uncore_mmio_uncores = tgl_mmio_uncores; | |
1278 | } | |
1279 | ||
1280 | void tgl_uncore_mmio_init(void) | |
1281 | { | |
1282 | uncore_mmio_uncores = tgl_mmio_uncores; | |
1283 | } | |
1284 | ||
1285 | /* end of Tiger Lake MMIO uncore support */ |