Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
46866b59 | 2 | /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */ |
92553e40 | 3 | #include "uncore.h" |
92807ffd | 4 | |
0140e614 | 5 | /* Uncore IMC PCI IDs */ |
76a16b21 GK |
6 | #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100 |
7 | #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154 | |
8 | #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150 | |
9 | #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 | |
10 | #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 | |
11 | #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 | |
12 | #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904 | |
13 | #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c | |
14 | #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900 | |
15 | #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 | |
16 | #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f | |
17 | #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f | |
e7438304 | 18 | #define PCI_DEVICE_ID_INTEL_SKL_E3_IMC 0x1918 |
76a16b21 GK |
19 | #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c |
20 | #define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904 | |
21 | #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914 | |
22 | #define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f | |
23 | #define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f | |
6e86d3db GK |
24 | #define PCI_DEVICE_ID_INTEL_KBL_HQ_IMC 0x5910 |
25 | #define PCI_DEVICE_ID_INTEL_KBL_WQ_IMC 0x5918 | |
76a16b21 GK |
26 | #define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc |
27 | #define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0 | |
28 | #define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10 | |
29 | #define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4 | |
c10a8de0 KL |
30 | #define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f |
31 | #define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f | |
32 | #define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2 | |
33 | #define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30 | |
34 | #define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18 | |
35 | #define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6 | |
36 | #define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31 | |
37 | #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33 | |
38 | #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca | |
39 | #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32 | |
6e86d3db GK |
40 | #define PCI_DEVICE_ID_INTEL_AML_YD_IMC 0x590c |
41 | #define PCI_DEVICE_ID_INTEL_AML_YQ_IMC 0x590d | |
42 | #define PCI_DEVICE_ID_INTEL_WHL_UQ_IMC 0x3ed0 | |
43 | #define PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC 0x3e34 | |
44 | #define PCI_DEVICE_ID_INTEL_WHL_UD_IMC 0x3e35 | |
bb85429a KL |
45 | #define PCI_DEVICE_ID_INTEL_CML_H1_IMC 0x9b44 |
46 | #define PCI_DEVICE_ID_INTEL_CML_H2_IMC 0x9b54 | |
47 | #define PCI_DEVICE_ID_INTEL_CML_H3_IMC 0x9b64 | |
48 | #define PCI_DEVICE_ID_INTEL_CML_U1_IMC 0x9b51 | |
49 | #define PCI_DEVICE_ID_INTEL_CML_U2_IMC 0x9b61 | |
50 | #define PCI_DEVICE_ID_INTEL_CML_U3_IMC 0x9b71 | |
51 | #define PCI_DEVICE_ID_INTEL_CML_S1_IMC 0x9b33 | |
52 | #define PCI_DEVICE_ID_INTEL_CML_S2_IMC 0x9b43 | |
53 | #define PCI_DEVICE_ID_INTEL_CML_S3_IMC 0x9b53 | |
54 | #define PCI_DEVICE_ID_INTEL_CML_S4_IMC 0x9b63 | |
55 | #define PCI_DEVICE_ID_INTEL_CML_S5_IMC 0x9b73 | |
6e394376 KL |
56 | #define PCI_DEVICE_ID_INTEL_ICL_U_IMC 0x8a02 |
57 | #define PCI_DEVICE_ID_INTEL_ICL_U2_IMC 0x8a12 | |
fdb64822 KL |
58 | #define PCI_DEVICE_ID_INTEL_TGL_U1_IMC 0x9a02 |
59 | #define PCI_DEVICE_ID_INTEL_TGL_U2_IMC 0x9a04 | |
60 | #define PCI_DEVICE_ID_INTEL_TGL_U3_IMC 0x9a12 | |
61 | #define PCI_DEVICE_ID_INTEL_TGL_U4_IMC 0x9a14 | |
62 | #define PCI_DEVICE_ID_INTEL_TGL_H_IMC 0x9a36 | |
43bc103a KL |
63 | #define PCI_DEVICE_ID_INTEL_RKL_1_IMC 0x4c43 |
64 | #define PCI_DEVICE_ID_INTEL_RKL_2_IMC 0x4c53 | |
772ed05f KL |
65 | #define PCI_DEVICE_ID_INTEL_ADL_1_IMC 0x4660 |
66 | #define PCI_DEVICE_ID_INTEL_ADL_2_IMC 0x4641 | |
6e86d3db | 67 | |
92807ffd YZ |
68 | /* SNB event control */ |
69 | #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff | |
70 | #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 | |
71 | #define SNB_UNC_CTL_EDGE_DET (1 << 18) | |
72 | #define SNB_UNC_CTL_EN (1 << 22) | |
73 | #define SNB_UNC_CTL_INVERT (1 << 23) | |
74 | #define SNB_UNC_CTL_CMASK_MASK 0x1f000000 | |
75 | #define NHM_UNC_CTL_CMASK_MASK 0xff000000 | |
76 | #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0) | |
77 | ||
78 | #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ | |
79 | SNB_UNC_CTL_UMASK_MASK | \ | |
80 | SNB_UNC_CTL_EDGE_DET | \ | |
81 | SNB_UNC_CTL_INVERT | \ | |
82 | SNB_UNC_CTL_CMASK_MASK) | |
83 | ||
84 | #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ | |
85 | SNB_UNC_CTL_UMASK_MASK | \ | |
86 | SNB_UNC_CTL_EDGE_DET | \ | |
87 | SNB_UNC_CTL_INVERT | \ | |
88 | NHM_UNC_CTL_CMASK_MASK) | |
89 | ||
90 | /* SNB global control register */ | |
91 | #define SNB_UNC_PERF_GLOBAL_CTL 0x391 | |
92 | #define SNB_UNC_FIXED_CTR_CTRL 0x394 | |
93 | #define SNB_UNC_FIXED_CTR 0x395 | |
94 | ||
95 | /* SNB uncore global control */ | |
96 | #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1) | |
97 | #define SNB_UNC_GLOBAL_CTL_EN (1 << 29) | |
98 | ||
99 | /* SNB Cbo register */ | |
100 | #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700 | |
101 | #define SNB_UNC_CBO_0_PER_CTR0 0x706 | |
102 | #define SNB_UNC_CBO_MSR_OFFSET 0x10 | |
103 | ||
e3a13192 AK |
104 | /* SNB ARB register */ |
105 | #define SNB_UNC_ARB_PER_CTR0 0x3b0 | |
106 | #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2 | |
107 | #define SNB_UNC_ARB_MSR_OFFSET 0x10 | |
108 | ||
92807ffd YZ |
109 | /* NHM global control register */ |
110 | #define NHM_UNC_PERF_GLOBAL_CTL 0x391 | |
111 | #define NHM_UNC_FIXED_CTR 0x394 | |
112 | #define NHM_UNC_FIXED_CTR_CTRL 0x395 | |
113 | ||
114 | /* NHM uncore global control */ | |
115 | #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1) | |
116 | #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) | |
117 | ||
118 | /* NHM uncore register */ | |
119 | #define NHM_UNC_PERFEVTSEL0 0x3c0 | |
120 | #define NHM_UNC_UNCORE_PMC0 0x3b0 | |
121 | ||
46866b59 KL |
122 | /* SKL uncore global control */ |
123 | #define SKL_UNC_PERF_GLOBAL_CTL 0xe01 | |
124 | #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1) | |
125 | ||
6e394376 KL |
126 | /* ICL Cbo register */ |
127 | #define ICL_UNC_CBO_CONFIG 0x396 | |
128 | #define ICL_UNC_NUM_CBO_MASK 0xf | |
129 | #define ICL_UNC_CBO_0_PER_CTR0 0x702 | |
130 | #define ICL_UNC_CBO_MSR_OFFSET 0x8 | |
131 | ||
8f5d41f3 KL |
132 | /* ICL ARB register */ |
133 | #define ICL_UNC_ARB_PER_CTR 0x3b1 | |
134 | #define ICL_UNC_ARB_PERFEVTSEL 0x3b3 | |
135 | ||
772ed05f KL |
136 | /* ADL uncore global control */ |
137 | #define ADL_UNC_PERF_GLOBAL_CTL 0x2ff0 | |
138 | #define ADL_UNC_FIXED_CTR_CTRL 0x2fde | |
139 | #define ADL_UNC_FIXED_CTR 0x2fdf | |
140 | ||
141 | /* ADL Cbo register */ | |
142 | #define ADL_UNC_CBO_0_PER_CTR0 0x2002 | |
143 | #define ADL_UNC_CBO_0_PERFEVTSEL0 0x2000 | |
144 | #define ADL_UNC_CTL_THRESHOLD 0x3f000000 | |
145 | #define ADL_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ | |
146 | SNB_UNC_CTL_UMASK_MASK | \ | |
147 | SNB_UNC_CTL_EDGE_DET | \ | |
148 | SNB_UNC_CTL_INVERT | \ | |
149 | ADL_UNC_CTL_THRESHOLD) | |
150 | ||
151 | /* ADL ARB register */ | |
152 | #define ADL_UNC_ARB_PER_CTR0 0x2FD2 | |
153 | #define ADL_UNC_ARB_PERFEVTSEL0 0x2FD0 | |
154 | #define ADL_UNC_ARB_MSR_OFFSET 0x8 | |
155 | ||
92807ffd YZ |
156 | DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); |
157 | DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); | |
158 | DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); | |
159 | DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); | |
160 | DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28"); | |
161 | DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31"); | |
772ed05f | 162 | DEFINE_UNCORE_FORMAT_ATTR(threshold, threshold, "config:24-29"); |
92807ffd YZ |
163 | |
164 | /* Sandy Bridge uncore support */ | |
165 | static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | |
166 | { | |
167 | struct hw_perf_event *hwc = &event->hw; | |
168 | ||
169 | if (hwc->idx < UNCORE_PMC_IDX_FIXED) | |
170 | wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); | |
171 | else | |
172 | wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); | |
173 | } | |
174 | ||
175 | static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) | |
176 | { | |
177 | wrmsrl(event->hw.config_base, 0); | |
178 | } | |
179 | ||
180 | static void snb_uncore_msr_init_box(struct intel_uncore_box *box) | |
181 | { | |
182 | if (box->pmu->pmu_idx == 0) { | |
183 | wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, | |
184 | SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); | |
185 | } | |
186 | } | |
187 | ||
95f3be79 KL |
188 | static void snb_uncore_msr_enable_box(struct intel_uncore_box *box) |
189 | { | |
190 | wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, | |
191 | SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); | |
192 | } | |
193 | ||
a46195f1 TG |
194 | static void snb_uncore_msr_exit_box(struct intel_uncore_box *box) |
195 | { | |
196 | if (box->pmu->pmu_idx == 0) | |
197 | wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0); | |
198 | } | |
199 | ||
92807ffd YZ |
200 | static struct uncore_event_desc snb_uncore_events[] = { |
201 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), | |
202 | { /* end: all zeroes */ }, | |
203 | }; | |
204 | ||
205 | static struct attribute *snb_uncore_formats_attr[] = { | |
206 | &format_attr_event.attr, | |
207 | &format_attr_umask.attr, | |
208 | &format_attr_edge.attr, | |
209 | &format_attr_inv.attr, | |
210 | &format_attr_cmask5.attr, | |
211 | NULL, | |
212 | }; | |
213 | ||
45bd07ad | 214 | static const struct attribute_group snb_uncore_format_group = { |
92807ffd YZ |
215 | .name = "format", |
216 | .attrs = snb_uncore_formats_attr, | |
217 | }; | |
218 | ||
219 | static struct intel_uncore_ops snb_uncore_msr_ops = { | |
220 | .init_box = snb_uncore_msr_init_box, | |
95f3be79 | 221 | .enable_box = snb_uncore_msr_enable_box, |
a46195f1 | 222 | .exit_box = snb_uncore_msr_exit_box, |
92807ffd YZ |
223 | .disable_event = snb_uncore_msr_disable_event, |
224 | .enable_event = snb_uncore_msr_enable_event, | |
225 | .read_counter = uncore_msr_read_counter, | |
226 | }; | |
227 | ||
e3a13192 | 228 | static struct event_constraint snb_uncore_arb_constraints[] = { |
92807ffd YZ |
229 | UNCORE_EVENT_CONSTRAINT(0x80, 0x1), |
230 | UNCORE_EVENT_CONSTRAINT(0x83, 0x1), | |
231 | EVENT_CONSTRAINT_END | |
232 | }; | |
233 | ||
234 | static struct intel_uncore_type snb_uncore_cbox = { | |
235 | .name = "cbox", | |
236 | .num_counters = 2, | |
237 | .num_boxes = 4, | |
238 | .perf_ctr_bits = 44, | |
239 | .fixed_ctr_bits = 48, | |
240 | .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, | |
241 | .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, | |
242 | .fixed_ctr = SNB_UNC_FIXED_CTR, | |
243 | .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, | |
244 | .single_fixed = 1, | |
245 | .event_mask = SNB_UNC_RAW_EVENT_MASK, | |
246 | .msr_offset = SNB_UNC_CBO_MSR_OFFSET, | |
92807ffd YZ |
247 | .ops = &snb_uncore_msr_ops, |
248 | .format_group = &snb_uncore_format_group, | |
249 | .event_descs = snb_uncore_events, | |
250 | }; | |
251 | ||
e3a13192 AK |
252 | static struct intel_uncore_type snb_uncore_arb = { |
253 | .name = "arb", | |
254 | .num_counters = 2, | |
255 | .num_boxes = 1, | |
256 | .perf_ctr_bits = 44, | |
257 | .perf_ctr = SNB_UNC_ARB_PER_CTR0, | |
258 | .event_ctl = SNB_UNC_ARB_PERFEVTSEL0, | |
259 | .event_mask = SNB_UNC_RAW_EVENT_MASK, | |
260 | .msr_offset = SNB_UNC_ARB_MSR_OFFSET, | |
261 | .constraints = snb_uncore_arb_constraints, | |
262 | .ops = &snb_uncore_msr_ops, | |
263 | .format_group = &snb_uncore_format_group, | |
264 | }; | |
265 | ||
92807ffd YZ |
266 | static struct intel_uncore_type *snb_msr_uncores[] = { |
267 | &snb_uncore_cbox, | |
e3a13192 | 268 | &snb_uncore_arb, |
92807ffd YZ |
269 | NULL, |
270 | }; | |
271 | ||
272 | void snb_uncore_cpu_init(void) | |
273 | { | |
274 | uncore_msr_uncores = snb_msr_uncores; | |
275 | if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) | |
276 | snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; | |
277 | } | |
278 | ||
46866b59 KL |
279 | static void skl_uncore_msr_init_box(struct intel_uncore_box *box) |
280 | { | |
281 | if (box->pmu->pmu_idx == 0) { | |
282 | wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, | |
283 | SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); | |
284 | } | |
4d47d640 KL |
285 | |
286 | /* The 8th CBOX has different MSR space */ | |
287 | if (box->pmu->pmu_idx == 7) | |
288 | __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags); | |
46866b59 KL |
289 | } |
290 | ||
95f3be79 KL |
291 | static void skl_uncore_msr_enable_box(struct intel_uncore_box *box) |
292 | { | |
293 | wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, | |
294 | SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); | |
295 | } | |
296 | ||
46866b59 KL |
297 | static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) |
298 | { | |
299 | if (box->pmu->pmu_idx == 0) | |
300 | wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0); | |
301 | } | |
302 | ||
303 | static struct intel_uncore_ops skl_uncore_msr_ops = { | |
304 | .init_box = skl_uncore_msr_init_box, | |
95f3be79 | 305 | .enable_box = skl_uncore_msr_enable_box, |
46866b59 KL |
306 | .exit_box = skl_uncore_msr_exit_box, |
307 | .disable_event = snb_uncore_msr_disable_event, | |
308 | .enable_event = snb_uncore_msr_enable_event, | |
309 | .read_counter = uncore_msr_read_counter, | |
310 | }; | |
311 | ||
312 | static struct intel_uncore_type skl_uncore_cbox = { | |
313 | .name = "cbox", | |
314 | .num_counters = 4, | |
4d47d640 | 315 | .num_boxes = 8, |
46866b59 KL |
316 | .perf_ctr_bits = 44, |
317 | .fixed_ctr_bits = 48, | |
318 | .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, | |
319 | .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, | |
320 | .fixed_ctr = SNB_UNC_FIXED_CTR, | |
321 | .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, | |
322 | .single_fixed = 1, | |
323 | .event_mask = SNB_UNC_RAW_EVENT_MASK, | |
324 | .msr_offset = SNB_UNC_CBO_MSR_OFFSET, | |
325 | .ops = &skl_uncore_msr_ops, | |
326 | .format_group = &snb_uncore_format_group, | |
327 | .event_descs = snb_uncore_events, | |
328 | }; | |
329 | ||
330 | static struct intel_uncore_type *skl_msr_uncores[] = { | |
331 | &skl_uncore_cbox, | |
332 | &snb_uncore_arb, | |
333 | NULL, | |
334 | }; | |
335 | ||
336 | void skl_uncore_cpu_init(void) | |
337 | { | |
338 | uncore_msr_uncores = skl_msr_uncores; | |
339 | if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) | |
340 | skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; | |
341 | snb_uncore_arb.ops = &skl_uncore_msr_ops; | |
342 | } | |
343 | ||
8f5d41f3 KL |
344 | static struct intel_uncore_ops icl_uncore_msr_ops = { |
345 | .disable_event = snb_uncore_msr_disable_event, | |
346 | .enable_event = snb_uncore_msr_enable_event, | |
347 | .read_counter = uncore_msr_read_counter, | |
348 | }; | |
349 | ||
6e394376 KL |
350 | static struct intel_uncore_type icl_uncore_cbox = { |
351 | .name = "cbox", | |
ee139385 | 352 | .num_counters = 2, |
6e394376 KL |
353 | .perf_ctr_bits = 44, |
354 | .perf_ctr = ICL_UNC_CBO_0_PER_CTR0, | |
355 | .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, | |
356 | .event_mask = SNB_UNC_RAW_EVENT_MASK, | |
357 | .msr_offset = ICL_UNC_CBO_MSR_OFFSET, | |
8f5d41f3 | 358 | .ops = &icl_uncore_msr_ops, |
6e394376 KL |
359 | .format_group = &snb_uncore_format_group, |
360 | }; | |
361 | ||
362 | static struct uncore_event_desc icl_uncore_events[] = { | |
363 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"), | |
364 | { /* end: all zeroes */ }, | |
365 | }; | |
366 | ||
367 | static struct attribute *icl_uncore_clock_formats_attr[] = { | |
368 | &format_attr_event.attr, | |
369 | NULL, | |
370 | }; | |
371 | ||
372 | static struct attribute_group icl_uncore_clock_format_group = { | |
373 | .name = "format", | |
374 | .attrs = icl_uncore_clock_formats_attr, | |
375 | }; | |
376 | ||
377 | static struct intel_uncore_type icl_uncore_clockbox = { | |
378 | .name = "clock", | |
379 | .num_counters = 1, | |
380 | .num_boxes = 1, | |
381 | .fixed_ctr_bits = 48, | |
382 | .fixed_ctr = SNB_UNC_FIXED_CTR, | |
383 | .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, | |
384 | .single_fixed = 1, | |
385 | .event_mask = SNB_UNC_CTL_EV_SEL_MASK, | |
386 | .format_group = &icl_uncore_clock_format_group, | |
8f5d41f3 | 387 | .ops = &icl_uncore_msr_ops, |
6e394376 KL |
388 | .event_descs = icl_uncore_events, |
389 | }; | |
390 | ||
8f5d41f3 KL |
391 | static struct intel_uncore_type icl_uncore_arb = { |
392 | .name = "arb", | |
393 | .num_counters = 1, | |
394 | .num_boxes = 1, | |
395 | .perf_ctr_bits = 44, | |
396 | .perf_ctr = ICL_UNC_ARB_PER_CTR, | |
397 | .event_ctl = ICL_UNC_ARB_PERFEVTSEL, | |
398 | .event_mask = SNB_UNC_RAW_EVENT_MASK, | |
399 | .ops = &icl_uncore_msr_ops, | |
400 | .format_group = &snb_uncore_format_group, | |
401 | }; | |
402 | ||
6e394376 KL |
403 | static struct intel_uncore_type *icl_msr_uncores[] = { |
404 | &icl_uncore_cbox, | |
8f5d41f3 | 405 | &icl_uncore_arb, |
6e394376 KL |
406 | &icl_uncore_clockbox, |
407 | NULL, | |
408 | }; | |
409 | ||
410 | static int icl_get_cbox_num(void) | |
411 | { | |
412 | u64 num_boxes; | |
413 | ||
414 | rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes); | |
415 | ||
416 | return num_boxes & ICL_UNC_NUM_CBO_MASK; | |
417 | } | |
418 | ||
419 | void icl_uncore_cpu_init(void) | |
420 | { | |
421 | uncore_msr_uncores = icl_msr_uncores; | |
422 | icl_uncore_cbox.num_boxes = icl_get_cbox_num(); | |
6e394376 KL |
423 | } |
424 | ||
8abbcfef KL |
425 | static struct intel_uncore_type *tgl_msr_uncores[] = { |
426 | &icl_uncore_cbox, | |
427 | &snb_uncore_arb, | |
428 | &icl_uncore_clockbox, | |
429 | NULL, | |
430 | }; | |
431 | ||
43bc103a KL |
432 | static void rkl_uncore_msr_init_box(struct intel_uncore_box *box) |
433 | { | |
434 | if (box->pmu->pmu_idx == 0) | |
435 | wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); | |
436 | } | |
437 | ||
8abbcfef KL |
438 | void tgl_uncore_cpu_init(void) |
439 | { | |
440 | uncore_msr_uncores = tgl_msr_uncores; | |
441 | icl_uncore_cbox.num_boxes = icl_get_cbox_num(); | |
442 | icl_uncore_cbox.ops = &skl_uncore_msr_ops; | |
443 | icl_uncore_clockbox.ops = &skl_uncore_msr_ops; | |
444 | snb_uncore_arb.ops = &skl_uncore_msr_ops; | |
43bc103a | 445 | skl_uncore_msr_ops.init_box = rkl_uncore_msr_init_box; |
8abbcfef KL |
446 | } |
447 | ||
772ed05f KL |
448 | static void adl_uncore_msr_init_box(struct intel_uncore_box *box) |
449 | { | |
450 | if (box->pmu->pmu_idx == 0) | |
451 | wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); | |
452 | } | |
453 | ||
454 | static void adl_uncore_msr_enable_box(struct intel_uncore_box *box) | |
455 | { | |
456 | wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN); | |
457 | } | |
458 | ||
459 | static void adl_uncore_msr_disable_box(struct intel_uncore_box *box) | |
460 | { | |
461 | if (box->pmu->pmu_idx == 0) | |
462 | wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0); | |
463 | } | |
464 | ||
465 | static void adl_uncore_msr_exit_box(struct intel_uncore_box *box) | |
466 | { | |
467 | if (box->pmu->pmu_idx == 0) | |
468 | wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0); | |
469 | } | |
470 | ||
471 | static struct intel_uncore_ops adl_uncore_msr_ops = { | |
472 | .init_box = adl_uncore_msr_init_box, | |
473 | .enable_box = adl_uncore_msr_enable_box, | |
474 | .disable_box = adl_uncore_msr_disable_box, | |
475 | .exit_box = adl_uncore_msr_exit_box, | |
476 | .disable_event = snb_uncore_msr_disable_event, | |
477 | .enable_event = snb_uncore_msr_enable_event, | |
478 | .read_counter = uncore_msr_read_counter, | |
479 | }; | |
480 | ||
481 | static struct attribute *adl_uncore_formats_attr[] = { | |
482 | &format_attr_event.attr, | |
483 | &format_attr_umask.attr, | |
484 | &format_attr_edge.attr, | |
485 | &format_attr_inv.attr, | |
486 | &format_attr_threshold.attr, | |
487 | NULL, | |
488 | }; | |
489 | ||
490 | static const struct attribute_group adl_uncore_format_group = { | |
491 | .name = "format", | |
492 | .attrs = adl_uncore_formats_attr, | |
493 | }; | |
494 | ||
495 | static struct intel_uncore_type adl_uncore_cbox = { | |
496 | .name = "cbox", | |
497 | .num_counters = 2, | |
498 | .perf_ctr_bits = 44, | |
499 | .perf_ctr = ADL_UNC_CBO_0_PER_CTR0, | |
500 | .event_ctl = ADL_UNC_CBO_0_PERFEVTSEL0, | |
501 | .event_mask = ADL_UNC_RAW_EVENT_MASK, | |
502 | .msr_offset = ICL_UNC_CBO_MSR_OFFSET, | |
503 | .ops = &adl_uncore_msr_ops, | |
504 | .format_group = &adl_uncore_format_group, | |
505 | }; | |
506 | ||
507 | static struct intel_uncore_type adl_uncore_arb = { | |
508 | .name = "arb", | |
509 | .num_counters = 2, | |
510 | .num_boxes = 2, | |
511 | .perf_ctr_bits = 44, | |
512 | .perf_ctr = ADL_UNC_ARB_PER_CTR0, | |
513 | .event_ctl = ADL_UNC_ARB_PERFEVTSEL0, | |
514 | .event_mask = SNB_UNC_RAW_EVENT_MASK, | |
515 | .msr_offset = ADL_UNC_ARB_MSR_OFFSET, | |
516 | .constraints = snb_uncore_arb_constraints, | |
517 | .ops = &adl_uncore_msr_ops, | |
518 | .format_group = &snb_uncore_format_group, | |
519 | }; | |
520 | ||
521 | static struct intel_uncore_type adl_uncore_clockbox = { | |
522 | .name = "clock", | |
523 | .num_counters = 1, | |
524 | .num_boxes = 1, | |
525 | .fixed_ctr_bits = 48, | |
526 | .fixed_ctr = ADL_UNC_FIXED_CTR, | |
527 | .fixed_ctl = ADL_UNC_FIXED_CTR_CTRL, | |
528 | .single_fixed = 1, | |
529 | .event_mask = SNB_UNC_CTL_EV_SEL_MASK, | |
530 | .format_group = &icl_uncore_clock_format_group, | |
531 | .ops = &adl_uncore_msr_ops, | |
532 | .event_descs = icl_uncore_events, | |
533 | }; | |
534 | ||
535 | static struct intel_uncore_type *adl_msr_uncores[] = { | |
536 | &adl_uncore_cbox, | |
537 | &adl_uncore_arb, | |
538 | &adl_uncore_clockbox, | |
539 | NULL, | |
540 | }; | |
541 | ||
542 | void adl_uncore_cpu_init(void) | |
543 | { | |
544 | adl_uncore_cbox.num_boxes = icl_get_cbox_num(); | |
545 | uncore_msr_uncores = adl_msr_uncores; | |
546 | } | |
547 | ||
92807ffd YZ |
548 | enum { |
549 | SNB_PCI_UNCORE_IMC, | |
550 | }; | |
551 | ||
552 | static struct uncore_event_desc snb_uncore_imc_events[] = { | |
553 | INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"), | |
554 | INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"), | |
555 | INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"), | |
556 | ||
557 | INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"), | |
558 | INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"), | |
559 | INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"), | |
560 | ||
24633d90 VS |
561 | INTEL_UNCORE_EVENT_DESC(gt_requests, "event=0x03"), |
562 | INTEL_UNCORE_EVENT_DESC(gt_requests.scale, "6.103515625e-5"), | |
563 | INTEL_UNCORE_EVENT_DESC(gt_requests.unit, "MiB"), | |
564 | ||
565 | INTEL_UNCORE_EVENT_DESC(ia_requests, "event=0x04"), | |
566 | INTEL_UNCORE_EVENT_DESC(ia_requests.scale, "6.103515625e-5"), | |
567 | INTEL_UNCORE_EVENT_DESC(ia_requests.unit, "MiB"), | |
568 | ||
569 | INTEL_UNCORE_EVENT_DESC(io_requests, "event=0x05"), | |
570 | INTEL_UNCORE_EVENT_DESC(io_requests.scale, "6.103515625e-5"), | |
571 | INTEL_UNCORE_EVENT_DESC(io_requests.unit, "MiB"), | |
572 | ||
92807ffd YZ |
573 | { /* end: all zeroes */ }, |
574 | }; | |
575 | ||
576 | #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff | |
577 | #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48 | |
578 | ||
579 | /* page size multiple covering all config regs */ | |
580 | #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000 | |
581 | ||
582 | #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1 | |
583 | #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050 | |
584 | #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2 | |
585 | #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054 | |
586 | #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE | |
587 | ||
24633d90 VS |
588 | /* BW break down- legacy counters */ |
589 | #define SNB_UNCORE_PCI_IMC_GT_REQUESTS 0x3 | |
590 | #define SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE 0x5040 | |
591 | #define SNB_UNCORE_PCI_IMC_IA_REQUESTS 0x4 | |
592 | #define SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE 0x5044 | |
593 | #define SNB_UNCORE_PCI_IMC_IO_REQUESTS 0x5 | |
594 | #define SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE 0x5048 | |
595 | ||
9aae1780 | 596 | enum perf_snb_uncore_imc_freerunning_types { |
24633d90 VS |
597 | SNB_PCI_UNCORE_IMC_DATA_READS = 0, |
598 | SNB_PCI_UNCORE_IMC_DATA_WRITES, | |
599 | SNB_PCI_UNCORE_IMC_GT_REQUESTS, | |
600 | SNB_PCI_UNCORE_IMC_IA_REQUESTS, | |
601 | SNB_PCI_UNCORE_IMC_IO_REQUESTS, | |
602 | ||
9aae1780 KL |
603 | SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX, |
604 | }; | |
605 | ||
606 | static struct freerunning_counters snb_uncore_imc_freerunning[] = { | |
24633d90 VS |
607 | [SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, |
608 | 0x0, 0x0, 1, 32 }, | |
1a8cfa24 | 609 | [SNB_PCI_UNCORE_IMC_DATA_WRITES] = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE, |
24633d90 VS |
610 | 0x0, 0x0, 1, 32 }, |
611 | [SNB_PCI_UNCORE_IMC_GT_REQUESTS] = { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE, | |
612 | 0x0, 0x0, 1, 32 }, | |
613 | [SNB_PCI_UNCORE_IMC_IA_REQUESTS] = { SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE, | |
614 | 0x0, 0x0, 1, 32 }, | |
615 | [SNB_PCI_UNCORE_IMC_IO_REQUESTS] = { SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE, | |
616 | 0x0, 0x0, 1, 32 }, | |
9aae1780 KL |
617 | }; |
618 | ||
92807ffd YZ |
619 | static struct attribute *snb_uncore_imc_formats_attr[] = { |
620 | &format_attr_event.attr, | |
621 | NULL, | |
622 | }; | |
623 | ||
45bd07ad | 624 | static const struct attribute_group snb_uncore_imc_format_group = { |
92807ffd YZ |
625 | .name = "format", |
626 | .attrs = snb_uncore_imc_formats_attr, | |
627 | }; | |
628 | ||
629 | static void snb_uncore_imc_init_box(struct intel_uncore_box *box) | |
630 | { | |
1b94d31d | 631 | struct intel_uncore_type *type = box->pmu->type; |
92807ffd YZ |
632 | struct pci_dev *pdev = box->pci_dev; |
633 | int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET; | |
634 | resource_size_t addr; | |
635 | u32 pci_dword; | |
636 | ||
637 | pci_read_config_dword(pdev, where, &pci_dword); | |
638 | addr = pci_dword; | |
639 | ||
640 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | |
641 | pci_read_config_dword(pdev, where + 4, &pci_dword); | |
642 | addr |= ((resource_size_t)pci_dword << 32); | |
643 | #endif | |
644 | ||
645 | addr &= ~(PAGE_SIZE - 1); | |
646 | ||
1b94d31d KL |
647 | box->io_addr = ioremap(addr, type->mmio_map_size); |
648 | if (!box->io_addr) | |
649 | pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); | |
650 | ||
92807ffd YZ |
651 | box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL; |
652 | } | |
653 | ||
654 | static void snb_uncore_imc_enable_box(struct intel_uncore_box *box) | |
655 | {} | |
656 | ||
657 | static void snb_uncore_imc_disable_box(struct intel_uncore_box *box) | |
658 | {} | |
659 | ||
660 | static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event) | |
661 | {} | |
662 | ||
663 | static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) | |
664 | {} | |
665 | ||
92807ffd | 666 | /* |
9aae1780 KL |
667 | * Keep the custom event_init() function compatible with old event |
668 | * encoding for free running counters. | |
92807ffd YZ |
669 | */ |
670 | static int snb_uncore_imc_event_init(struct perf_event *event) | |
671 | { | |
672 | struct intel_uncore_pmu *pmu; | |
673 | struct intel_uncore_box *box; | |
674 | struct hw_perf_event *hwc = &event->hw; | |
675 | u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK; | |
676 | int idx, base; | |
677 | ||
678 | if (event->attr.type != event->pmu->type) | |
679 | return -ENOENT; | |
680 | ||
681 | pmu = uncore_event_to_pmu(event); | |
682 | /* no device found for this pmu */ | |
683 | if (pmu->func_id < 0) | |
684 | return -ENOENT; | |
685 | ||
686 | /* Sampling not supported yet */ | |
687 | if (hwc->sample_period) | |
688 | return -EINVAL; | |
689 | ||
690 | /* unsupported modes and filters */ | |
2ff40250 | 691 | if (event->attr.sample_period) /* no sampling */ |
92807ffd YZ |
692 | return -EINVAL; |
693 | ||
694 | /* | |
695 | * Place all uncore events for a particular physical package | |
696 | * onto a single cpu | |
697 | */ | |
698 | if (event->cpu < 0) | |
699 | return -EINVAL; | |
700 | ||
701 | /* check only supported bits are set */ | |
702 | if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK) | |
703 | return -EINVAL; | |
704 | ||
705 | box = uncore_pmu_to_box(pmu, event->cpu); | |
706 | if (!box || box->cpu < 0) | |
707 | return -EINVAL; | |
708 | ||
709 | event->cpu = box->cpu; | |
1f2569fa | 710 | event->pmu_private = box; |
92807ffd | 711 | |
e64cd6f7 DCC |
712 | event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; |
713 | ||
92807ffd YZ |
714 | event->hw.idx = -1; |
715 | event->hw.last_tag = ~0ULL; | |
716 | event->hw.extra_reg.idx = EXTRA_REG_NONE; | |
717 | event->hw.branch_reg.idx = EXTRA_REG_NONE; | |
718 | /* | |
719 | * check event is known (whitelist, determines counter) | |
720 | */ | |
721 | switch (cfg) { | |
722 | case SNB_UNCORE_PCI_IMC_DATA_READS: | |
723 | base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE; | |
9aae1780 | 724 | idx = UNCORE_PMC_IDX_FREERUNNING; |
92807ffd YZ |
725 | break; |
726 | case SNB_UNCORE_PCI_IMC_DATA_WRITES: | |
727 | base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE; | |
9aae1780 | 728 | idx = UNCORE_PMC_IDX_FREERUNNING; |
92807ffd | 729 | break; |
24633d90 VS |
730 | case SNB_UNCORE_PCI_IMC_GT_REQUESTS: |
731 | base = SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE; | |
732 | idx = UNCORE_PMC_IDX_FREERUNNING; | |
733 | break; | |
734 | case SNB_UNCORE_PCI_IMC_IA_REQUESTS: | |
735 | base = SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE; | |
736 | idx = UNCORE_PMC_IDX_FREERUNNING; | |
737 | break; | |
738 | case SNB_UNCORE_PCI_IMC_IO_REQUESTS: | |
739 | base = SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE; | |
740 | idx = UNCORE_PMC_IDX_FREERUNNING; | |
741 | break; | |
92807ffd YZ |
742 | default: |
743 | return -EINVAL; | |
744 | } | |
745 | ||
746 | /* must be done before validate_group */ | |
747 | event->hw.event_base = base; | |
92807ffd YZ |
748 | event->hw.idx = idx; |
749 | ||
8041ffd3 KL |
750 | /* Convert to standard encoding format for freerunning counters */ |
751 | event->hw.config = ((cfg - 1) << 8) | 0x10ff; | |
752 | ||
92807ffd YZ |
753 | /* no group validation needed, we have free running counters */ |
754 | ||
755 | return 0; | |
756 | } | |
757 | ||
758 | static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event) | |
759 | { | |
760 | return 0; | |
761 | } | |
762 | ||
77af0037 | 763 | int snb_pci2phy_map_init(int devid) |
92807ffd YZ |
764 | { |
765 | struct pci_dev *dev = NULL; | |
712df65c TI |
766 | struct pci2phy_map *map; |
767 | int bus, segment; | |
92807ffd YZ |
768 | |
769 | dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev); | |
770 | if (!dev) | |
771 | return -ENOTTY; | |
772 | ||
773 | bus = dev->bus->number; | |
712df65c TI |
774 | segment = pci_domain_nr(dev->bus); |
775 | ||
776 | raw_spin_lock(&pci2phy_map_lock); | |
777 | map = __find_pci2phy_map(segment); | |
778 | if (!map) { | |
779 | raw_spin_unlock(&pci2phy_map_lock); | |
780 | pci_dev_put(dev); | |
781 | return -ENOMEM; | |
782 | } | |
ba9506be | 783 | map->pbus_to_dieid[bus] = 0; |
712df65c | 784 | raw_spin_unlock(&pci2phy_map_lock); |
92807ffd YZ |
785 | |
786 | pci_dev_put(dev); | |
787 | ||
788 | return 0; | |
789 | } | |
790 | ||
791 | static struct pmu snb_uncore_imc_pmu = { | |
792 | .task_ctx_nr = perf_invalid_context, | |
793 | .event_init = snb_uncore_imc_event_init, | |
9aae1780 KL |
794 | .add = uncore_pmu_event_add, |
795 | .del = uncore_pmu_event_del, | |
796 | .start = uncore_pmu_event_start, | |
797 | .stop = uncore_pmu_event_stop, | |
798 | .read = uncore_pmu_event_read, | |
2ff40250 | 799 | .capabilities = PERF_PMU_CAP_NO_EXCLUDE, |
92807ffd YZ |
800 | }; |
801 | ||
802 | static struct intel_uncore_ops snb_uncore_imc_ops = { | |
803 | .init_box = snb_uncore_imc_init_box, | |
07ce734d | 804 | .exit_box = uncore_mmio_exit_box, |
92807ffd YZ |
805 | .enable_box = snb_uncore_imc_enable_box, |
806 | .disable_box = snb_uncore_imc_disable_box, | |
807 | .disable_event = snb_uncore_imc_disable_event, | |
808 | .enable_event = snb_uncore_imc_enable_event, | |
809 | .hw_config = snb_uncore_imc_hw_config, | |
07ce734d | 810 | .read_counter = uncore_mmio_read_counter, |
92807ffd YZ |
811 | }; |
812 | ||
813 | static struct intel_uncore_type snb_uncore_imc = { | |
814 | .name = "imc", | |
24633d90 | 815 | .num_counters = 5, |
92807ffd | 816 | .num_boxes = 1, |
9aae1780 | 817 | .num_freerunning_types = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX, |
1b94d31d | 818 | .mmio_map_size = SNB_UNCORE_PCI_IMC_MAP_SIZE, |
9aae1780 | 819 | .freerunning = snb_uncore_imc_freerunning, |
92807ffd YZ |
820 | .event_descs = snb_uncore_imc_events, |
821 | .format_group = &snb_uncore_imc_format_group, | |
92807ffd YZ |
822 | .ops = &snb_uncore_imc_ops, |
823 | .pmu = &snb_uncore_imc_pmu, | |
824 | }; | |
825 | ||
826 | static struct intel_uncore_type *snb_pci_uncores[] = { | |
827 | [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc, | |
828 | NULL, | |
829 | }; | |
830 | ||
83bc90e1 | 831 | static const struct pci_device_id snb_uncore_pci_ids[] = { |
92807ffd YZ |
832 | { /* IMC */ |
833 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC), | |
834 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
835 | }, | |
836 | { /* end: all zeroes */ }, | |
837 | }; | |
838 | ||
83bc90e1 | 839 | static const struct pci_device_id ivb_uncore_pci_ids[] = { |
92807ffd YZ |
840 | { /* IMC */ |
841 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC), | |
842 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
843 | }, | |
521e8bac SE |
844 | { /* IMC */ |
845 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC), | |
846 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
847 | }, | |
92807ffd YZ |
848 | { /* end: all zeroes */ }, |
849 | }; | |
850 | ||
83bc90e1 | 851 | static const struct pci_device_id hsw_uncore_pci_ids[] = { |
92807ffd YZ |
852 | { /* IMC */ |
853 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC), | |
854 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
855 | }, | |
80bcffb3 SR |
856 | { /* IMC */ |
857 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC), | |
858 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
859 | }, | |
92807ffd YZ |
860 | { /* end: all zeroes */ }, |
861 | }; | |
862 | ||
a41f3c8c SE |
863 | static const struct pci_device_id bdw_uncore_pci_ids[] = { |
864 | { /* IMC */ | |
865 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC), | |
866 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
867 | }, | |
868 | { /* end: all zeroes */ }, | |
869 | }; | |
870 | ||
0e1eb0a1 SE |
871 | static const struct pci_device_id skl_uncore_pci_ids[] = { |
872 | { /* IMC */ | |
d786810b | 873 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC), |
0e1eb0a1 SE |
874 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), |
875 | }, | |
46866b59 KL |
876 | { /* IMC */ |
877 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC), | |
878 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
879 | }, | |
d786810b KL |
880 | { /* IMC */ |
881 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC), | |
882 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
883 | }, | |
884 | { /* IMC */ | |
885 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC), | |
886 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
887 | }, | |
888 | { /* IMC */ | |
889 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC), | |
890 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
891 | }, | |
892 | { /* IMC */ | |
893 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC), | |
894 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
895 | }, | |
e7438304 KL |
896 | { /* IMC */ |
897 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_E3_IMC), | |
898 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
899 | }, | |
c10a8de0 KL |
900 | { /* IMC */ |
901 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC), | |
902 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
903 | }, | |
904 | { /* IMC */ | |
905 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC), | |
906 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
907 | }, | |
908 | { /* IMC */ | |
909 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC), | |
910 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
911 | }, | |
912 | { /* IMC */ | |
913 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC), | |
914 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
915 | }, | |
916 | { /* IMC */ | |
917 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC), | |
918 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
919 | }, | |
6e86d3db GK |
920 | { /* IMC */ |
921 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_HQ_IMC), | |
922 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
923 | }, | |
924 | { /* IMC */ | |
925 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_WQ_IMC), | |
926 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
927 | }, | |
c10a8de0 KL |
928 | { /* IMC */ |
929 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC), | |
930 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
931 | }, | |
932 | { /* IMC */ | |
933 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC), | |
934 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
935 | }, | |
936 | { /* IMC */ | |
937 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC), | |
938 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
939 | }, | |
940 | { /* IMC */ | |
941 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC), | |
942 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
943 | }, | |
944 | { /* IMC */ | |
945 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC), | |
946 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
947 | }, | |
948 | { /* IMC */ | |
949 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC), | |
950 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
951 | }, | |
952 | { /* IMC */ | |
953 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC), | |
954 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
955 | }, | |
956 | { /* IMC */ | |
957 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC), | |
958 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
959 | }, | |
960 | { /* IMC */ | |
961 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC), | |
962 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
963 | }, | |
964 | { /* IMC */ | |
965 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC), | |
966 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
967 | }, | |
968 | { /* IMC */ | |
969 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC), | |
970 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
971 | }, | |
972 | { /* IMC */ | |
973 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC), | |
974 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
975 | }, | |
976 | { /* IMC */ | |
977 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC), | |
978 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
979 | }, | |
980 | { /* IMC */ | |
981 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC), | |
982 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
983 | }, | |
6e86d3db GK |
984 | { /* IMC */ |
985 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YD_IMC), | |
986 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
987 | }, | |
988 | { /* IMC */ | |
989 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YQ_IMC), | |
990 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
991 | }, | |
992 | { /* IMC */ | |
993 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UQ_IMC), | |
994 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
995 | }, | |
996 | { /* IMC */ | |
997 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC), | |
998 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
999 | }, | |
1000 | { /* IMC */ | |
1001 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UD_IMC), | |
1002 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1003 | }, | |
bb85429a KL |
1004 | { /* IMC */ |
1005 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H1_IMC), | |
1006 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1007 | }, | |
1008 | { /* IMC */ | |
1009 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H2_IMC), | |
1010 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1011 | }, | |
1012 | { /* IMC */ | |
1013 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H3_IMC), | |
1014 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1015 | }, | |
1016 | { /* IMC */ | |
1017 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U1_IMC), | |
1018 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1019 | }, | |
1020 | { /* IMC */ | |
1021 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U2_IMC), | |
1022 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1023 | }, | |
1024 | { /* IMC */ | |
1025 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U3_IMC), | |
1026 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1027 | }, | |
1028 | { /* IMC */ | |
1029 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S1_IMC), | |
1030 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1031 | }, | |
1032 | { /* IMC */ | |
1033 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S2_IMC), | |
1034 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1035 | }, | |
1036 | { /* IMC */ | |
1037 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S3_IMC), | |
1038 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1039 | }, | |
1040 | { /* IMC */ | |
1041 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S4_IMC), | |
1042 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1043 | }, | |
1044 | { /* IMC */ | |
1045 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S5_IMC), | |
1046 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1047 | }, | |
0e1eb0a1 SE |
1048 | { /* end: all zeroes */ }, |
1049 | }; | |
1050 | ||
6e394376 KL |
1051 | static const struct pci_device_id icl_uncore_pci_ids[] = { |
1052 | { /* IMC */ | |
1053 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U_IMC), | |
1054 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1055 | }, | |
1056 | { /* IMC */ | |
1057 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U2_IMC), | |
1058 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1059 | }, | |
43bc103a KL |
1060 | { /* IMC */ |
1061 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RKL_1_IMC), | |
1062 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1063 | }, | |
1064 | { /* IMC */ | |
1065 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RKL_2_IMC), | |
1066 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1067 | }, | |
6e394376 KL |
1068 | { /* end: all zeroes */ }, |
1069 | }; | |
1070 | ||
92807ffd YZ |
1071 | static struct pci_driver snb_uncore_pci_driver = { |
1072 | .name = "snb_uncore", | |
1073 | .id_table = snb_uncore_pci_ids, | |
1074 | }; | |
1075 | ||
1076 | static struct pci_driver ivb_uncore_pci_driver = { | |
1077 | .name = "ivb_uncore", | |
1078 | .id_table = ivb_uncore_pci_ids, | |
1079 | }; | |
1080 | ||
1081 | static struct pci_driver hsw_uncore_pci_driver = { | |
1082 | .name = "hsw_uncore", | |
1083 | .id_table = hsw_uncore_pci_ids, | |
1084 | }; | |
1085 | ||
a41f3c8c SE |
1086 | static struct pci_driver bdw_uncore_pci_driver = { |
1087 | .name = "bdw_uncore", | |
1088 | .id_table = bdw_uncore_pci_ids, | |
1089 | }; | |
1090 | ||
0e1eb0a1 SE |
1091 | static struct pci_driver skl_uncore_pci_driver = { |
1092 | .name = "skl_uncore", | |
1093 | .id_table = skl_uncore_pci_ids, | |
1094 | }; | |
1095 | ||
6e394376 KL |
1096 | static struct pci_driver icl_uncore_pci_driver = { |
1097 | .name = "icl_uncore", | |
1098 | .id_table = icl_uncore_pci_ids, | |
1099 | }; | |
1100 | ||
521e8bac SE |
1101 | struct imc_uncore_pci_dev { |
1102 | __u32 pci_id; | |
1103 | struct pci_driver *driver; | |
1104 | }; | |
1105 | #define IMC_DEV(a, d) \ | |
1106 | { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) } | |
1107 | ||
1108 | static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { | |
1109 | IMC_DEV(SNB_IMC, &snb_uncore_pci_driver), | |
1110 | IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */ | |
1111 | IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */ | |
1112 | IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ | |
80bcffb3 | 1113 | IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ |
a41f3c8c | 1114 | IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ |
d786810b | 1115 | IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */ |
46866b59 | 1116 | IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */ |
d786810b KL |
1117 | IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */ |
1118 | IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ | |
1119 | IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ | |
1120 | IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ | |
e7438304 | 1121 | IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver), /* Xeon E3 V5 Gen Core processor */ |
c10a8de0 KL |
1122 | IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */ |
1123 | IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */ | |
1124 | IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */ | |
1125 | IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */ | |
1126 | IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */ | |
6e86d3db GK |
1127 | IMC_DEV(KBL_HQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core H Quad Core */ |
1128 | IMC_DEV(KBL_WQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S 4 cores Work Station */ | |
c10a8de0 KL |
1129 | IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */ |
1130 | IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */ | |
1131 | IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */ | |
1132 | IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */ | |
1133 | IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */ | |
1134 | IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */ | |
1135 | IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */ | |
1136 | IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */ | |
1137 | IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */ | |
1138 | IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */ | |
1139 | IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */ | |
1140 | IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */ | |
1141 | IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */ | |
1142 | IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */ | |
6e86d3db GK |
1143 | IMC_DEV(AML_YD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core Y Mobile Dual Core */ |
1144 | IMC_DEV(AML_YQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core Y Mobile Quad Core */ | |
1145 | IMC_DEV(WHL_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */ | |
1146 | IMC_DEV(WHL_4_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */ | |
1147 | IMC_DEV(WHL_UD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Dual Core */ | |
bb85429a KL |
1148 | IMC_DEV(CML_H1_IMC, &skl_uncore_pci_driver), |
1149 | IMC_DEV(CML_H2_IMC, &skl_uncore_pci_driver), | |
1150 | IMC_DEV(CML_H3_IMC, &skl_uncore_pci_driver), | |
1151 | IMC_DEV(CML_U1_IMC, &skl_uncore_pci_driver), | |
1152 | IMC_DEV(CML_U2_IMC, &skl_uncore_pci_driver), | |
1153 | IMC_DEV(CML_U3_IMC, &skl_uncore_pci_driver), | |
1154 | IMC_DEV(CML_S1_IMC, &skl_uncore_pci_driver), | |
1155 | IMC_DEV(CML_S2_IMC, &skl_uncore_pci_driver), | |
1156 | IMC_DEV(CML_S3_IMC, &skl_uncore_pci_driver), | |
1157 | IMC_DEV(CML_S4_IMC, &skl_uncore_pci_driver), | |
1158 | IMC_DEV(CML_S5_IMC, &skl_uncore_pci_driver), | |
6e394376 KL |
1159 | IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */ |
1160 | IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */ | |
43bc103a KL |
1161 | IMC_DEV(RKL_1_IMC, &icl_uncore_pci_driver), |
1162 | IMC_DEV(RKL_2_IMC, &icl_uncore_pci_driver), | |
521e8bac SE |
1163 | { /* end marker */ } |
1164 | }; | |
1165 | ||
1166 | ||
1167 | #define for_each_imc_pci_id(x, t) \ | |
1168 | for (x = (t); (x)->pci_id; x++) | |
1169 | ||
1170 | static struct pci_driver *imc_uncore_find_dev(void) | |
92807ffd | 1171 | { |
521e8bac SE |
1172 | const struct imc_uncore_pci_dev *p; |
1173 | int ret; | |
1174 | ||
1175 | for_each_imc_pci_id(p, desktop_imc_pci_ids) { | |
1176 | ret = snb_pci2phy_map_init(p->pci_id); | |
1177 | if (ret == 0) | |
1178 | return p->driver; | |
1179 | } | |
1180 | return NULL; | |
92807ffd YZ |
1181 | } |
1182 | ||
521e8bac | 1183 | static int imc_uncore_pci_init(void) |
92807ffd | 1184 | { |
521e8bac SE |
1185 | struct pci_driver *imc_drv = imc_uncore_find_dev(); |
1186 | ||
1187 | if (!imc_drv) | |
1188 | return -ENODEV; | |
1189 | ||
92807ffd | 1190 | uncore_pci_uncores = snb_pci_uncores; |
521e8bac SE |
1191 | uncore_pci_driver = imc_drv; |
1192 | ||
92807ffd YZ |
1193 | return 0; |
1194 | } | |
1195 | ||
521e8bac SE |
1196 | int snb_uncore_pci_init(void) |
1197 | { | |
1198 | return imc_uncore_pci_init(); | |
1199 | } | |
1200 | ||
1201 | int ivb_uncore_pci_init(void) | |
1202 | { | |
1203 | return imc_uncore_pci_init(); | |
1204 | } | |
92807ffd YZ |
1205 | int hsw_uncore_pci_init(void) |
1206 | { | |
521e8bac | 1207 | return imc_uncore_pci_init(); |
92807ffd YZ |
1208 | } |
1209 | ||
a41f3c8c SE |
1210 | int bdw_uncore_pci_init(void) |
1211 | { | |
1212 | return imc_uncore_pci_init(); | |
1213 | } | |
1214 | ||
0e1eb0a1 SE |
1215 | int skl_uncore_pci_init(void) |
1216 | { | |
1217 | return imc_uncore_pci_init(); | |
1218 | } | |
1219 | ||
92807ffd YZ |
1220 | /* end of Sandy Bridge uncore support */ |
1221 | ||
1222 | /* Nehalem uncore support */ | |
1223 | static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) | |
1224 | { | |
1225 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0); | |
1226 | } | |
1227 | ||
1228 | static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) | |
1229 | { | |
1230 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); | |
1231 | } | |
1232 | ||
1233 | static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | |
1234 | { | |
1235 | struct hw_perf_event *hwc = &event->hw; | |
1236 | ||
1237 | if (hwc->idx < UNCORE_PMC_IDX_FIXED) | |
1238 | wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); | |
1239 | else | |
1240 | wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); | |
1241 | } | |
1242 | ||
1243 | static struct attribute *nhm_uncore_formats_attr[] = { | |
1244 | &format_attr_event.attr, | |
1245 | &format_attr_umask.attr, | |
1246 | &format_attr_edge.attr, | |
1247 | &format_attr_inv.attr, | |
1248 | &format_attr_cmask8.attr, | |
1249 | NULL, | |
1250 | }; | |
1251 | ||
45bd07ad | 1252 | static const struct attribute_group nhm_uncore_format_group = { |
92807ffd YZ |
1253 | .name = "format", |
1254 | .attrs = nhm_uncore_formats_attr, | |
1255 | }; | |
1256 | ||
1257 | static struct uncore_event_desc nhm_uncore_events[] = { | |
1258 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), | |
1259 | INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"), | |
1260 | INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"), | |
1261 | INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"), | |
1262 | INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"), | |
1263 | INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"), | |
1264 | INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"), | |
1265 | INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"), | |
1266 | INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"), | |
1267 | { /* end: all zeroes */ }, | |
1268 | }; | |
1269 | ||
1270 | static struct intel_uncore_ops nhm_uncore_msr_ops = { | |
1271 | .disable_box = nhm_uncore_msr_disable_box, | |
1272 | .enable_box = nhm_uncore_msr_enable_box, | |
1273 | .disable_event = snb_uncore_msr_disable_event, | |
1274 | .enable_event = nhm_uncore_msr_enable_event, | |
1275 | .read_counter = uncore_msr_read_counter, | |
1276 | }; | |
1277 | ||
1278 | static struct intel_uncore_type nhm_uncore = { | |
1279 | .name = "", | |
1280 | .num_counters = 8, | |
1281 | .num_boxes = 1, | |
1282 | .perf_ctr_bits = 48, | |
1283 | .fixed_ctr_bits = 48, | |
1284 | .event_ctl = NHM_UNC_PERFEVTSEL0, | |
1285 | .perf_ctr = NHM_UNC_UNCORE_PMC0, | |
1286 | .fixed_ctr = NHM_UNC_FIXED_CTR, | |
1287 | .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL, | |
1288 | .event_mask = NHM_UNC_RAW_EVENT_MASK, | |
1289 | .event_descs = nhm_uncore_events, | |
1290 | .ops = &nhm_uncore_msr_ops, | |
1291 | .format_group = &nhm_uncore_format_group, | |
1292 | }; | |
1293 | ||
1294 | static struct intel_uncore_type *nhm_msr_uncores[] = { | |
1295 | &nhm_uncore, | |
1296 | NULL, | |
1297 | }; | |
1298 | ||
1299 | void nhm_uncore_cpu_init(void) | |
1300 | { | |
1301 | uncore_msr_uncores = nhm_msr_uncores; | |
1302 | } | |
1303 | ||
1304 | /* end of Nehalem uncore support */ | |
fdb64822 KL |
1305 | |
1306 | /* Tiger Lake MMIO uncore support */ | |
1307 | ||
1308 | static const struct pci_device_id tgl_uncore_pci_ids[] = { | |
1309 | { /* IMC */ | |
1310 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U1_IMC), | |
1311 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1312 | }, | |
1313 | { /* IMC */ | |
1314 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U2_IMC), | |
1315 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1316 | }, | |
1317 | { /* IMC */ | |
1318 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U3_IMC), | |
1319 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1320 | }, | |
1321 | { /* IMC */ | |
1322 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U4_IMC), | |
1323 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1324 | }, | |
1325 | { /* IMC */ | |
1326 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_H_IMC), | |
1327 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
772ed05f KL |
1328 | }, |
1329 | { /* IMC */ | |
1330 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_1_IMC), | |
1331 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
1332 | }, | |
1333 | { /* IMC */ | |
1334 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_2_IMC), | |
1335 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
fdb64822 KL |
1336 | }, |
1337 | { /* end: all zeroes */ } | |
1338 | }; | |
1339 | ||
1340 | enum perf_tgl_uncore_imc_freerunning_types { | |
1341 | TGL_MMIO_UNCORE_IMC_DATA_TOTAL, | |
1342 | TGL_MMIO_UNCORE_IMC_DATA_READ, | |
1343 | TGL_MMIO_UNCORE_IMC_DATA_WRITE, | |
1344 | TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX | |
1345 | }; | |
1346 | ||
1347 | static struct freerunning_counters tgl_l_uncore_imc_freerunning[] = { | |
1348 | [TGL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0x5040, 0x0, 0x0, 1, 64 }, | |
1349 | [TGL_MMIO_UNCORE_IMC_DATA_READ] = { 0x5058, 0x0, 0x0, 1, 64 }, | |
1350 | [TGL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0x50A0, 0x0, 0x0, 1, 64 }, | |
1351 | }; | |
1352 | ||
1353 | static struct freerunning_counters tgl_uncore_imc_freerunning[] = { | |
1354 | [TGL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0xd840, 0x0, 0x0, 1, 64 }, | |
1355 | [TGL_MMIO_UNCORE_IMC_DATA_READ] = { 0xd858, 0x0, 0x0, 1, 64 }, | |
1356 | [TGL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0xd8A0, 0x0, 0x0, 1, 64 }, | |
1357 | }; | |
1358 | ||
1359 | static struct uncore_event_desc tgl_uncore_imc_events[] = { | |
1360 | INTEL_UNCORE_EVENT_DESC(data_total, "event=0xff,umask=0x10"), | |
1361 | INTEL_UNCORE_EVENT_DESC(data_total.scale, "6.103515625e-5"), | |
1362 | INTEL_UNCORE_EVENT_DESC(data_total.unit, "MiB"), | |
1363 | ||
1364 | INTEL_UNCORE_EVENT_DESC(data_read, "event=0xff,umask=0x20"), | |
1365 | INTEL_UNCORE_EVENT_DESC(data_read.scale, "6.103515625e-5"), | |
1366 | INTEL_UNCORE_EVENT_DESC(data_read.unit, "MiB"), | |
1367 | ||
1368 | INTEL_UNCORE_EVENT_DESC(data_write, "event=0xff,umask=0x30"), | |
1369 | INTEL_UNCORE_EVENT_DESC(data_write.scale, "6.103515625e-5"), | |
1370 | INTEL_UNCORE_EVENT_DESC(data_write.unit, "MiB"), | |
1371 | ||
1372 | { /* end: all zeroes */ } | |
1373 | }; | |
1374 | ||
1375 | static struct pci_dev *tgl_uncore_get_mc_dev(void) | |
1376 | { | |
1377 | const struct pci_device_id *ids = tgl_uncore_pci_ids; | |
1378 | struct pci_dev *mc_dev = NULL; | |
1379 | ||
1380 | while (ids && ids->vendor) { | |
1381 | mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, ids->device, NULL); | |
1382 | if (mc_dev) | |
1383 | return mc_dev; | |
1384 | ids++; | |
1385 | } | |
1386 | ||
1387 | return mc_dev; | |
1388 | } | |
1389 | ||
1390 | #define TGL_UNCORE_MMIO_IMC_MEM_OFFSET 0x10000 | |
2af834f1 | 1391 | #define TGL_UNCORE_PCI_IMC_MAP_SIZE 0xe000 |
fdb64822 KL |
1392 | |
1393 | static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) | |
1394 | { | |
1395 | struct pci_dev *pdev = tgl_uncore_get_mc_dev(); | |
1396 | struct intel_uncore_pmu *pmu = box->pmu; | |
1b94d31d | 1397 | struct intel_uncore_type *type = pmu->type; |
fdb64822 KL |
1398 | resource_size_t addr; |
1399 | u32 mch_bar; | |
1400 | ||
1401 | if (!pdev) { | |
1402 | pr_warn("perf uncore: Cannot find matched IMC device.\n"); | |
1403 | return; | |
1404 | } | |
1405 | ||
1406 | pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET, &mch_bar); | |
1407 | /* MCHBAR is disabled */ | |
1408 | if (!(mch_bar & BIT(0))) { | |
1409 | pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n"); | |
1410 | return; | |
1411 | } | |
1412 | mch_bar &= ~BIT(0); | |
1413 | addr = (resource_size_t)(mch_bar + TGL_UNCORE_MMIO_IMC_MEM_OFFSET * pmu->pmu_idx); | |
1414 | ||
1415 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | |
1416 | pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET + 4, &mch_bar); | |
1417 | addr |= ((resource_size_t)mch_bar << 32); | |
1418 | #endif | |
1419 | ||
1b94d31d KL |
1420 | box->io_addr = ioremap(addr, type->mmio_map_size); |
1421 | if (!box->io_addr) | |
1422 | pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); | |
fdb64822 KL |
1423 | } |
1424 | ||
1425 | static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = { | |
1426 | .init_box = tgl_uncore_imc_freerunning_init_box, | |
1427 | .exit_box = uncore_mmio_exit_box, | |
1428 | .read_counter = uncore_mmio_read_counter, | |
1429 | .hw_config = uncore_freerunning_hw_config, | |
1430 | }; | |
1431 | ||
1432 | static struct attribute *tgl_uncore_imc_formats_attr[] = { | |
1433 | &format_attr_event.attr, | |
1434 | &format_attr_umask.attr, | |
1435 | NULL | |
1436 | }; | |
1437 | ||
1438 | static const struct attribute_group tgl_uncore_imc_format_group = { | |
1439 | .name = "format", | |
1440 | .attrs = tgl_uncore_imc_formats_attr, | |
1441 | }; | |
1442 | ||
1443 | static struct intel_uncore_type tgl_uncore_imc_free_running = { | |
1444 | .name = "imc_free_running", | |
1445 | .num_counters = 3, | |
1446 | .num_boxes = 2, | |
1447 | .num_freerunning_types = TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX, | |
1b94d31d | 1448 | .mmio_map_size = TGL_UNCORE_PCI_IMC_MAP_SIZE, |
fdb64822 KL |
1449 | .freerunning = tgl_uncore_imc_freerunning, |
1450 | .ops = &tgl_uncore_imc_freerunning_ops, | |
1451 | .event_descs = tgl_uncore_imc_events, | |
1452 | .format_group = &tgl_uncore_imc_format_group, | |
1453 | }; | |
1454 | ||
1455 | static struct intel_uncore_type *tgl_mmio_uncores[] = { | |
1456 | &tgl_uncore_imc_free_running, | |
1457 | NULL | |
1458 | }; | |
1459 | ||
1460 | void tgl_l_uncore_mmio_init(void) | |
1461 | { | |
1462 | tgl_uncore_imc_free_running.freerunning = tgl_l_uncore_imc_freerunning; | |
1463 | uncore_mmio_uncores = tgl_mmio_uncores; | |
1464 | } | |
1465 | ||
1466 | void tgl_uncore_mmio_init(void) | |
1467 | { | |
1468 | uncore_mmio_uncores = tgl_mmio_uncores; | |
1469 | } | |
1470 | ||
1471 | /* end of Tiger Lake MMIO uncore support */ |