Merge remote-tracking branches 'spi/topic/altera', 'spi/topic/atmel', 'spi/topic...
[linux-2.6-block.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.c
CommitLineData
087bfbb0
YZ
1#include "perf_event_intel_uncore.h"
2
3static struct intel_uncore_type *empty_uncore[] = { NULL, };
4static struct intel_uncore_type **msr_uncores = empty_uncore;
14371cce
YZ
5static struct intel_uncore_type **pci_uncores = empty_uncore;
6/* pci bus to socket mapping */
7static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
8
899396cf
YZ
9static struct pci_dev *extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
10
14371cce 11static DEFINE_RAW_SPINLOCK(uncore_box_lock);
087bfbb0
YZ
12
13/* mask of cpus that collect uncore events */
14static cpumask_t uncore_cpu_mask;
15
16/* constraint for the fixed counter */
17static struct event_constraint constraint_fixed =
18 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
6a67943a
YZ
19static struct event_constraint constraint_empty =
20 EVENT_CONSTRAINT(0, 0, 0);
087bfbb0 21
46bdd905
YZ
22#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
23 ((1ULL << (n)) - 1)))
24
fcde10e9 25DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
c1ece48c 26DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
fcde10e9
YZ
27DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
28DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
6a67943a 29DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
fcde10e9
YZ
30DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
31DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
32DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
7c94ee2e
YZ
33DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
34DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
35DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
36DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
37DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
6a67943a 38DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
e850f9c3 39DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
6a67943a 40DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
e850f9c3 41DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
6a67943a 42DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
e850f9c3 43DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
6a67943a 44DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
e850f9c3 45DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
4f3f713f
YZ
46DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
47DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
48DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
49DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
fd1ec259
YZ
50DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
51DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
52DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
53DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
54DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
55DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
56DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
57DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
58DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
59DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
60DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
61DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
62DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
63DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
64DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
65DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
66DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
67DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
7c94ee2e 68
254298c7
YZ
69static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
70{
71 u64 count;
72
73 rdmsrl(event->hw.event_base, count);
74
75 return count;
76}
77
78/*
79 * generic get constraint function for shared match/mask registers.
80 */
81static struct event_constraint *
82uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
83{
84 struct intel_uncore_extra_reg *er;
85 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
86 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
87 unsigned long flags;
88 bool ok = false;
89
90 /*
91 * reg->alloc can be set due to existing state, so for fake box we
92 * need to ignore this, otherwise we might fail to allocate proper
93 * fake state for this extra reg constraint.
94 */
95 if (reg1->idx == EXTRA_REG_NONE ||
96 (!uncore_box_is_fake(box) && reg1->alloc))
97 return NULL;
98
99 er = &box->shared_regs[reg1->idx];
100 raw_spin_lock_irqsave(&er->lock, flags);
101 if (!atomic_read(&er->ref) ||
102 (er->config1 == reg1->config && er->config2 == reg2->config)) {
103 atomic_inc(&er->ref);
104 er->config1 = reg1->config;
105 er->config2 = reg2->config;
106 ok = true;
107 }
108 raw_spin_unlock_irqrestore(&er->lock, flags);
109
110 if (ok) {
111 if (!uncore_box_is_fake(box))
112 reg1->alloc = 1;
113 return NULL;
114 }
115
116 return &constraint_empty;
117}
118
119static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
120{
121 struct intel_uncore_extra_reg *er;
122 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
123
124 /*
125 * Only put constraint if extra reg was actually allocated. Also
126 * takes care of event which do not use an extra shared reg.
127 *
128 * Also, if this is a fake box we shouldn't touch any event state
129 * (reg->alloc) and we don't care about leaving inconsistent box
130 * state either since it will be thrown out.
131 */
132 if (uncore_box_is_fake(box) || !reg1->alloc)
133 return;
134
135 er = &box->shared_regs[reg1->idx];
136 atomic_dec(&er->ref);
137 reg1->alloc = 0;
138}
139
46bdd905
YZ
140static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
141{
142 struct intel_uncore_extra_reg *er;
143 unsigned long flags;
144 u64 config;
145
146 er = &box->shared_regs[idx];
147
148 raw_spin_lock_irqsave(&er->lock, flags);
149 config = er->config;
150 raw_spin_unlock_irqrestore(&er->lock, flags);
151
152 return config;
153}
154
7c94ee2e 155/* Sandy Bridge-EP uncore support */
6a67943a
YZ
156static struct intel_uncore_type snbep_uncore_cbox;
157static struct intel_uncore_type snbep_uncore_pcu;
158
7c94ee2e
YZ
159static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
160{
161 struct pci_dev *pdev = box->pci_dev;
162 int box_ctl = uncore_pci_box_ctl(box);
032c3851 163 u32 config = 0;
7c94ee2e 164
032c3851
YZ
165 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
166 config |= SNBEP_PMON_BOX_CTL_FRZ;
167 pci_write_config_dword(pdev, box_ctl, config);
168 }
7c94ee2e
YZ
169}
170
171static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
172{
173 struct pci_dev *pdev = box->pci_dev;
174 int box_ctl = uncore_pci_box_ctl(box);
032c3851 175 u32 config = 0;
7c94ee2e 176
032c3851
YZ
177 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
178 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
179 pci_write_config_dword(pdev, box_ctl, config);
180 }
7c94ee2e
YZ
181}
182
254298c7 183static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
184{
185 struct pci_dev *pdev = box->pci_dev;
186 struct hw_perf_event *hwc = &event->hw;
187
254298c7 188 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
7c94ee2e
YZ
189}
190
254298c7 191static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
192{
193 struct pci_dev *pdev = box->pci_dev;
194 struct hw_perf_event *hwc = &event->hw;
195
196 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
197}
198
254298c7 199static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
200{
201 struct pci_dev *pdev = box->pci_dev;
202 struct hw_perf_event *hwc = &event->hw;
032c3851 203 u64 count = 0;
7c94ee2e
YZ
204
205 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
206 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
254298c7 207
7c94ee2e
YZ
208 return count;
209}
210
211static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
212{
213 struct pci_dev *pdev = box->pci_dev;
254298c7
YZ
214
215 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
7c94ee2e
YZ
216}
217
218static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
219{
220 u64 config;
221 unsigned msr;
222
223 msr = uncore_msr_box_ctl(box);
224 if (msr) {
225 rdmsrl(msr, config);
226 config |= SNBEP_PMON_BOX_CTL_FRZ;
227 wrmsrl(msr, config);
7c94ee2e
YZ
228 }
229}
230
231static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
232{
233 u64 config;
234 unsigned msr;
235
236 msr = uncore_msr_box_ctl(box);
237 if (msr) {
238 rdmsrl(msr, config);
239 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
240 wrmsrl(msr, config);
7c94ee2e
YZ
241 }
242}
243
254298c7 244static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
245{
246 struct hw_perf_event *hwc = &event->hw;
6a67943a
YZ
247 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
248
249 if (reg1->idx != EXTRA_REG_NONE)
46bdd905 250 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
7c94ee2e
YZ
251
252 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
253}
254
255static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
256 struct perf_event *event)
257{
258 struct hw_perf_event *hwc = &event->hw;
259
260 wrmsrl(hwc->config_base, hwc->config);
261}
262
7c94ee2e
YZ
263static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
264{
265 unsigned msr = uncore_msr_box_ctl(box);
254298c7 266
7c94ee2e
YZ
267 if (msr)
268 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
269}
270
271static struct attribute *snbep_uncore_formats_attr[] = {
272 &format_attr_event.attr,
273 &format_attr_umask.attr,
274 &format_attr_edge.attr,
275 &format_attr_inv.attr,
276 &format_attr_thresh8.attr,
277 NULL,
278};
279
280static struct attribute *snbep_uncore_ubox_formats_attr[] = {
281 &format_attr_event.attr,
282 &format_attr_umask.attr,
283 &format_attr_edge.attr,
284 &format_attr_inv.attr,
285 &format_attr_thresh5.attr,
286 NULL,
287};
288
6a67943a
YZ
289static struct attribute *snbep_uncore_cbox_formats_attr[] = {
290 &format_attr_event.attr,
291 &format_attr_umask.attr,
292 &format_attr_edge.attr,
293 &format_attr_tid_en.attr,
294 &format_attr_inv.attr,
295 &format_attr_thresh8.attr,
296 &format_attr_filter_tid.attr,
297 &format_attr_filter_nid.attr,
298 &format_attr_filter_state.attr,
299 &format_attr_filter_opc.attr,
300 NULL,
301};
302
7c94ee2e 303static struct attribute *snbep_uncore_pcu_formats_attr[] = {
77b339bc 304 &format_attr_event_ext.attr,
7c94ee2e
YZ
305 &format_attr_occ_sel.attr,
306 &format_attr_edge.attr,
307 &format_attr_inv.attr,
308 &format_attr_thresh5.attr,
309 &format_attr_occ_invert.attr,
310 &format_attr_occ_edge.attr,
4f3f713f
YZ
311 &format_attr_filter_band0.attr,
312 &format_attr_filter_band1.attr,
313 &format_attr_filter_band2.attr,
314 &format_attr_filter_band3.attr,
7c94ee2e
YZ
315 NULL,
316};
317
c1ece48c
YZ
318static struct attribute *snbep_uncore_qpi_formats_attr[] = {
319 &format_attr_event_ext.attr,
320 &format_attr_umask.attr,
321 &format_attr_edge.attr,
322 &format_attr_inv.attr,
323 &format_attr_thresh8.attr,
fd1ec259
YZ
324 &format_attr_match_rds.attr,
325 &format_attr_match_rnid30.attr,
326 &format_attr_match_rnid4.attr,
327 &format_attr_match_dnid.attr,
328 &format_attr_match_mc.attr,
329 &format_attr_match_opc.attr,
330 &format_attr_match_vnw.attr,
331 &format_attr_match0.attr,
332 &format_attr_match1.attr,
333 &format_attr_mask_rds.attr,
334 &format_attr_mask_rnid30.attr,
335 &format_attr_mask_rnid4.attr,
336 &format_attr_mask_dnid.attr,
337 &format_attr_mask_mc.attr,
338 &format_attr_mask_opc.attr,
339 &format_attr_mask_vnw.attr,
340 &format_attr_mask0.attr,
341 &format_attr_mask1.attr,
c1ece48c
YZ
342 NULL,
343};
344
7c94ee2e 345static struct uncore_event_desc snbep_uncore_imc_events[] = {
eca26c99 346 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
2992c542
PZ
347 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
348 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
7c94ee2e
YZ
349 { /* end: all zeroes */ },
350};
351
352static struct uncore_event_desc snbep_uncore_qpi_events[] = {
2992c542
PZ
353 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
354 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
c9601247
VW
355 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
356 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
7c94ee2e
YZ
357 { /* end: all zeroes */ },
358};
359
360static struct attribute_group snbep_uncore_format_group = {
361 .name = "format",
362 .attrs = snbep_uncore_formats_attr,
363};
364
365static struct attribute_group snbep_uncore_ubox_format_group = {
366 .name = "format",
367 .attrs = snbep_uncore_ubox_formats_attr,
368};
369
6a67943a
YZ
370static struct attribute_group snbep_uncore_cbox_format_group = {
371 .name = "format",
372 .attrs = snbep_uncore_cbox_formats_attr,
373};
374
7c94ee2e
YZ
375static struct attribute_group snbep_uncore_pcu_format_group = {
376 .name = "format",
377 .attrs = snbep_uncore_pcu_formats_attr,
378};
379
c1ece48c
YZ
380static struct attribute_group snbep_uncore_qpi_format_group = {
381 .name = "format",
382 .attrs = snbep_uncore_qpi_formats_attr,
383};
384
46bdd905
YZ
385#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
386 .init_box = snbep_uncore_msr_init_box, \
387 .disable_box = snbep_uncore_msr_disable_box, \
388 .enable_box = snbep_uncore_msr_enable_box, \
389 .disable_event = snbep_uncore_msr_disable_event, \
390 .enable_event = snbep_uncore_msr_enable_event, \
391 .read_counter = uncore_msr_read_counter
392
7c94ee2e 393static struct intel_uncore_ops snbep_uncore_msr_ops = {
46bdd905 394 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
7c94ee2e
YZ
395};
396
fd1ec259
YZ
397#define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
398 .init_box = snbep_uncore_pci_init_box, \
399 .disable_box = snbep_uncore_pci_disable_box, \
400 .enable_box = snbep_uncore_pci_enable_box, \
401 .disable_event = snbep_uncore_pci_disable_event, \
402 .read_counter = snbep_uncore_pci_read_counter
403
7c94ee2e 404static struct intel_uncore_ops snbep_uncore_pci_ops = {
fd1ec259
YZ
405 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
406 .enable_event = snbep_uncore_pci_enable_event, \
7c94ee2e
YZ
407};
408
409static struct event_constraint snbep_uncore_cbox_constraints[] = {
410 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
411 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
412 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
413 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
414 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
e850f9c3 415 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
7c94ee2e
YZ
416 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
417 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
418 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
419 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
420 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
421 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
422 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
3b19e4c9 423 EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
7c94ee2e
YZ
424 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
425 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
426 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
427 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
428 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
429 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
430 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
431 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
432 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
433 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
434 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
435 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
436 EVENT_CONSTRAINT_END
437};
438
439static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
440 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
441 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
442 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
443 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
444 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
445 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
446 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
447 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
448 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
449 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
450 EVENT_CONSTRAINT_END
451};
452
453static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
454 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
455 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
456 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
457 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
458 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
459 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
460 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
461 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
462 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
463 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
464 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
e850f9c3
YZ
465 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
466 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
467 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
468 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
469 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
470 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
471 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
472 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
7c94ee2e
YZ
473 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
474 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
475 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
476 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
477 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
478 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
479 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
e850f9c3
YZ
480 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
481 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
7c94ee2e
YZ
482 EVENT_CONSTRAINT_END
483};
484
485static struct intel_uncore_type snbep_uncore_ubox = {
486 .name = "ubox",
487 .num_counters = 2,
488 .num_boxes = 1,
489 .perf_ctr_bits = 44,
490 .fixed_ctr_bits = 48,
491 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
492 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
493 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
494 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
495 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
496 .ops = &snbep_uncore_msr_ops,
497 .format_group = &snbep_uncore_ubox_format_group,
498};
499
46bdd905
YZ
500static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
501 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
502 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
503 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
337397f3 504 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
46bdd905 505 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
337397f3 506 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
46bdd905 507 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
337397f3 508 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
46bdd905
YZ
509 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
510 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
511 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
512 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
513 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
514 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
515 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
516 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
517 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
518 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
519 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
520 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
521 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
522 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
523 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
524 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
525 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
526 EVENT_EXTRA_END
527};
528
529static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
530{
531 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
532 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
533 int i;
534
535 if (uncore_box_is_fake(box))
536 return;
537
538 for (i = 0; i < 5; i++) {
539 if (reg1->alloc & (0x1 << i))
540 atomic_sub(1 << (i * 6), &er->ref);
541 }
542 reg1->alloc = 0;
543}
544
545static struct event_constraint *
546__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
547 u64 (*cbox_filter_mask)(int fields))
548{
549 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
550 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
551 int i, alloc = 0;
552 unsigned long flags;
553 u64 mask;
554
555 if (reg1->idx == EXTRA_REG_NONE)
556 return NULL;
557
558 raw_spin_lock_irqsave(&er->lock, flags);
559 for (i = 0; i < 5; i++) {
560 if (!(reg1->idx & (0x1 << i)))
561 continue;
562 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
563 continue;
564
565 mask = cbox_filter_mask(0x1 << i);
566 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
567 !((reg1->config ^ er->config) & mask)) {
568 atomic_add(1 << (i * 6), &er->ref);
569 er->config &= ~mask;
570 er->config |= reg1->config & mask;
571 alloc |= (0x1 << i);
572 } else {
573 break;
574 }
575 }
576 raw_spin_unlock_irqrestore(&er->lock, flags);
577 if (i < 5)
578 goto fail;
579
580 if (!uncore_box_is_fake(box))
581 reg1->alloc |= alloc;
582
b2fa344d 583 return NULL;
46bdd905
YZ
584fail:
585 for (; i >= 0; i--) {
586 if (alloc & (0x1 << i))
587 atomic_sub(1 << (i * 6), &er->ref);
588 }
589 return &constraint_empty;
590}
591
592static u64 snbep_cbox_filter_mask(int fields)
593{
594 u64 mask = 0;
595
596 if (fields & 0x1)
597 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
598 if (fields & 0x2)
599 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
600 if (fields & 0x4)
601 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
602 if (fields & 0x8)
603 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
604
605 return mask;
606}
607
608static struct event_constraint *
609snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
610{
611 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
612}
613
614static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
615{
616 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
617 struct extra_reg *er;
618 int idx = 0;
619
620 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
621 if (er->event != (event->hw.config & er->config_mask))
622 continue;
623 idx |= er->idx;
624 }
625
626 if (idx) {
627 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
628 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
629 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
630 reg1->idx = idx;
631 }
632 return 0;
633}
634
635static struct intel_uncore_ops snbep_uncore_cbox_ops = {
636 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
637 .hw_config = snbep_cbox_hw_config,
638 .get_constraint = snbep_cbox_get_constraint,
639 .put_constraint = snbep_cbox_put_constraint,
640};
641
7c94ee2e 642static struct intel_uncore_type snbep_uncore_cbox = {
6a67943a
YZ
643 .name = "cbox",
644 .num_counters = 4,
645 .num_boxes = 8,
646 .perf_ctr_bits = 44,
647 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
648 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
649 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
650 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
651 .msr_offset = SNBEP_CBO_MSR_OFFSET,
652 .num_shared_regs = 1,
653 .constraints = snbep_uncore_cbox_constraints,
46bdd905 654 .ops = &snbep_uncore_cbox_ops,
6a67943a 655 .format_group = &snbep_uncore_cbox_format_group,
7c94ee2e
YZ
656};
657
46bdd905
YZ
658static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
659{
660 struct hw_perf_event *hwc = &event->hw;
661 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
662 u64 config = reg1->config;
663
664 if (new_idx > reg1->idx)
665 config <<= 8 * (new_idx - reg1->idx);
666 else
667 config >>= 8 * (reg1->idx - new_idx);
668
669 if (modify) {
670 hwc->config += new_idx - reg1->idx;
671 reg1->config = config;
672 reg1->idx = new_idx;
673 }
674 return config;
675}
676
677static struct event_constraint *
678snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
679{
680 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
681 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
682 unsigned long flags;
683 int idx = reg1->idx;
684 u64 mask, config1 = reg1->config;
685 bool ok = false;
686
687 if (reg1->idx == EXTRA_REG_NONE ||
688 (!uncore_box_is_fake(box) && reg1->alloc))
689 return NULL;
690again:
13acac30 691 mask = 0xffULL << (idx * 8);
46bdd905
YZ
692 raw_spin_lock_irqsave(&er->lock, flags);
693 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
694 !((config1 ^ er->config) & mask)) {
695 atomic_add(1 << (idx * 8), &er->ref);
696 er->config &= ~mask;
697 er->config |= config1 & mask;
698 ok = true;
699 }
700 raw_spin_unlock_irqrestore(&er->lock, flags);
701
702 if (!ok) {
703 idx = (idx + 1) % 4;
704 if (idx != reg1->idx) {
705 config1 = snbep_pcu_alter_er(event, idx, false);
706 goto again;
707 }
708 return &constraint_empty;
709 }
710
711 if (!uncore_box_is_fake(box)) {
712 if (idx != reg1->idx)
713 snbep_pcu_alter_er(event, idx, true);
714 reg1->alloc = 1;
715 }
716 return NULL;
717}
718
719static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
720{
721 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
722 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
723
724 if (uncore_box_is_fake(box) || !reg1->alloc)
725 return;
726
727 atomic_sub(1 << (reg1->idx * 8), &er->ref);
728 reg1->alloc = 0;
729}
730
731static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
732{
733 struct hw_perf_event *hwc = &event->hw;
734 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
735 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
736
737 if (ev_sel >= 0xb && ev_sel <= 0xe) {
738 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
739 reg1->idx = ev_sel - 0xb;
740 reg1->config = event->attr.config1 & (0xff << reg1->idx);
741 }
742 return 0;
743}
744
745static struct intel_uncore_ops snbep_uncore_pcu_ops = {
746 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
747 .hw_config = snbep_pcu_hw_config,
748 .get_constraint = snbep_pcu_get_constraint,
749 .put_constraint = snbep_pcu_put_constraint,
750};
751
7c94ee2e 752static struct intel_uncore_type snbep_uncore_pcu = {
6a67943a
YZ
753 .name = "pcu",
754 .num_counters = 4,
755 .num_boxes = 1,
756 .perf_ctr_bits = 48,
757 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
758 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
759 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
760 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
761 .num_shared_regs = 1,
46bdd905 762 .ops = &snbep_uncore_pcu_ops,
6a67943a 763 .format_group = &snbep_uncore_pcu_format_group,
7c94ee2e
YZ
764};
765
766static struct intel_uncore_type *snbep_msr_uncores[] = {
767 &snbep_uncore_ubox,
768 &snbep_uncore_cbox,
769 &snbep_uncore_pcu,
770 NULL,
771};
772
fd1ec259
YZ
773enum {
774 SNBEP_PCI_QPI_PORT0_FILTER,
775 SNBEP_PCI_QPI_PORT1_FILTER,
776};
777
778static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
779{
780 struct hw_perf_event *hwc = &event->hw;
781 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
782 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
783
784 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
785 reg1->idx = 0;
786 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
787 reg1->config = event->attr.config1;
788 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
789 reg2->config = event->attr.config2;
790 }
791 return 0;
792}
793
794static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
795{
796 struct pci_dev *pdev = box->pci_dev;
797 struct hw_perf_event *hwc = &event->hw;
798 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
799 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
800
801 if (reg1->idx != EXTRA_REG_NONE) {
802 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
803 struct pci_dev *filter_pdev = extra_pci_dev[box->phys_id][idx];
804 WARN_ON_ONCE(!filter_pdev);
805 if (filter_pdev) {
806 pci_write_config_dword(filter_pdev, reg1->reg,
807 (u32)reg1->config);
808 pci_write_config_dword(filter_pdev, reg1->reg + 4,
809 (u32)(reg1->config >> 32));
810 pci_write_config_dword(filter_pdev, reg2->reg,
811 (u32)reg2->config);
812 pci_write_config_dword(filter_pdev, reg2->reg + 4,
813 (u32)(reg2->config >> 32));
814 }
815 }
816
817 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
818}
819
820static struct intel_uncore_ops snbep_uncore_qpi_ops = {
821 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
822 .enable_event = snbep_qpi_enable_event,
823 .hw_config = snbep_qpi_hw_config,
824 .get_constraint = uncore_get_constraint,
825 .put_constraint = uncore_put_constraint,
826};
827
7c94ee2e
YZ
828#define SNBEP_UNCORE_PCI_COMMON_INIT() \
829 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
830 .event_ctl = SNBEP_PCI_PMON_CTL0, \
831 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
832 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
833 .ops = &snbep_uncore_pci_ops, \
834 .format_group = &snbep_uncore_format_group
835
836static struct intel_uncore_type snbep_uncore_ha = {
837 .name = "ha",
838 .num_counters = 4,
839 .num_boxes = 1,
840 .perf_ctr_bits = 48,
841 SNBEP_UNCORE_PCI_COMMON_INIT(),
842};
843
844static struct intel_uncore_type snbep_uncore_imc = {
845 .name = "imc",
846 .num_counters = 4,
847 .num_boxes = 4,
848 .perf_ctr_bits = 48,
849 .fixed_ctr_bits = 48,
850 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
851 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
852 .event_descs = snbep_uncore_imc_events,
853 SNBEP_UNCORE_PCI_COMMON_INIT(),
854};
855
856static struct intel_uncore_type snbep_uncore_qpi = {
fd1ec259
YZ
857 .name = "qpi",
858 .num_counters = 4,
859 .num_boxes = 2,
860 .perf_ctr_bits = 48,
861 .perf_ctr = SNBEP_PCI_PMON_CTR0,
862 .event_ctl = SNBEP_PCI_PMON_CTL0,
863 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
864 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
865 .num_shared_regs = 1,
866 .ops = &snbep_uncore_qpi_ops,
867 .event_descs = snbep_uncore_qpi_events,
868 .format_group = &snbep_uncore_qpi_format_group,
7c94ee2e
YZ
869};
870
871
872static struct intel_uncore_type snbep_uncore_r2pcie = {
873 .name = "r2pcie",
874 .num_counters = 4,
875 .num_boxes = 1,
876 .perf_ctr_bits = 44,
877 .constraints = snbep_uncore_r2pcie_constraints,
878 SNBEP_UNCORE_PCI_COMMON_INIT(),
879};
880
881static struct intel_uncore_type snbep_uncore_r3qpi = {
882 .name = "r3qpi",
883 .num_counters = 3,
884 .num_boxes = 2,
885 .perf_ctr_bits = 44,
886 .constraints = snbep_uncore_r3qpi_constraints,
887 SNBEP_UNCORE_PCI_COMMON_INIT(),
888};
889
e850f9c3
YZ
890enum {
891 SNBEP_PCI_UNCORE_HA,
892 SNBEP_PCI_UNCORE_IMC,
893 SNBEP_PCI_UNCORE_QPI,
894 SNBEP_PCI_UNCORE_R2PCIE,
895 SNBEP_PCI_UNCORE_R3QPI,
896};
897
7c94ee2e 898static struct intel_uncore_type *snbep_pci_uncores[] = {
e850f9c3
YZ
899 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
900 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
901 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
902 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
903 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
7c94ee2e
YZ
904 NULL,
905};
906
907static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
908 { /* Home Agent */
909 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
899396cf 910 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
7c94ee2e
YZ
911 },
912 { /* MC Channel 0 */
913 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
899396cf 914 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
7c94ee2e
YZ
915 },
916 { /* MC Channel 1 */
917 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
899396cf 918 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
7c94ee2e
YZ
919 },
920 { /* MC Channel 2 */
921 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
899396cf 922 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
7c94ee2e
YZ
923 },
924 { /* MC Channel 3 */
925 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
899396cf 926 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
7c94ee2e
YZ
927 },
928 { /* QPI Port 0 */
929 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
899396cf 930 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
7c94ee2e
YZ
931 },
932 { /* QPI Port 1 */
933 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
899396cf 934 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
7c94ee2e 935 },
e850f9c3 936 { /* R2PCIe */
7c94ee2e 937 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
899396cf 938 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
7c94ee2e
YZ
939 },
940 { /* R3QPI Link 0 */
941 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
899396cf 942 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
7c94ee2e
YZ
943 },
944 { /* R3QPI Link 1 */
945 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
899396cf 946 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
7c94ee2e 947 },
fd1ec259
YZ
948 { /* QPI Port 0 filter */
949 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
950 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
951 SNBEP_PCI_QPI_PORT0_FILTER),
952 },
953 { /* QPI Port 0 filter */
954 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
955 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
956 SNBEP_PCI_QPI_PORT1_FILTER),
957 },
7c94ee2e
YZ
958 { /* end: all zeroes */ }
959};
960
961static struct pci_driver snbep_uncore_pci_driver = {
962 .name = "snbep_uncore",
963 .id_table = snbep_uncore_pci_ids,
964};
965
966/*
967 * build pci bus to socket mapping
968 */
e850f9c3 969static int snbep_pci2phy_map_init(int devid)
7c94ee2e
YZ
970{
971 struct pci_dev *ubox_dev = NULL;
972 int i, bus, nodeid;
032c3851
YZ
973 int err = 0;
974 u32 config = 0;
7c94ee2e
YZ
975
976 while (1) {
977 /* find the UBOX device */
e850f9c3 978 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
7c94ee2e
YZ
979 if (!ubox_dev)
980 break;
981 bus = ubox_dev->bus->number;
982 /* get the Node ID of the local register */
032c3851
YZ
983 err = pci_read_config_dword(ubox_dev, 0x40, &config);
984 if (err)
985 break;
7c94ee2e
YZ
986 nodeid = config;
987 /* get the Node ID mapping */
032c3851
YZ
988 err = pci_read_config_dword(ubox_dev, 0x54, &config);
989 if (err)
990 break;
7c94ee2e 991 /*
254298c7
YZ
992 * every three bits in the Node ID mapping register maps
993 * to a particular node.
994 */
995 for (i = 0; i < 8; i++) {
996 if (nodeid == ((config >> (3 * i)) & 0x7)) {
997 pcibus_to_physid[bus] = i;
998 break;
999 }
1000 }
e850f9c3 1001 }
032c3851 1002
f891d8cf
YZ
1003 if (!err) {
1004 /*
1005 * For PCI bus with no UBOX device, find the next bus
1006 * that has UBOX device and use its mapping.
1007 */
1008 i = -1;
1009 for (bus = 255; bus >= 0; bus--) {
1010 if (pcibus_to_physid[bus] >= 0)
1011 i = pcibus_to_physid[bus];
1012 else
1013 pcibus_to_physid[bus] = i;
1014 }
1015 }
1016
032c3851
YZ
1017 if (ubox_dev)
1018 pci_dev_put(ubox_dev);
1019
1020 return err ? pcibios_err_to_errno(err) : 0;
254298c7
YZ
1021}
1022/* end of Sandy Bridge-EP uncore support */
1023
e850f9c3
YZ
1024/* IvyTown uncore support */
1025static void ivt_uncore_msr_init_box(struct intel_uncore_box *box)
1026{
1027 unsigned msr = uncore_msr_box_ctl(box);
1028 if (msr)
1029 wrmsrl(msr, IVT_PMON_BOX_CTL_INT);
1030}
1031
1032static void ivt_uncore_pci_init_box(struct intel_uncore_box *box)
1033{
1034 struct pci_dev *pdev = box->pci_dev;
1035
1036 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT);
1037}
1038
1039#define IVT_UNCORE_MSR_OPS_COMMON_INIT() \
1040 .init_box = ivt_uncore_msr_init_box, \
1041 .disable_box = snbep_uncore_msr_disable_box, \
1042 .enable_box = snbep_uncore_msr_enable_box, \
1043 .disable_event = snbep_uncore_msr_disable_event, \
1044 .enable_event = snbep_uncore_msr_enable_event, \
1045 .read_counter = uncore_msr_read_counter
1046
1047static struct intel_uncore_ops ivt_uncore_msr_ops = {
1048 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1049};
1050
1051static struct intel_uncore_ops ivt_uncore_pci_ops = {
1052 .init_box = ivt_uncore_pci_init_box,
1053 .disable_box = snbep_uncore_pci_disable_box,
1054 .enable_box = snbep_uncore_pci_enable_box,
1055 .disable_event = snbep_uncore_pci_disable_event,
1056 .enable_event = snbep_uncore_pci_enable_event,
1057 .read_counter = snbep_uncore_pci_read_counter,
1058};
1059
1060#define IVT_UNCORE_PCI_COMMON_INIT() \
1061 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1062 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1063 .event_mask = IVT_PMON_RAW_EVENT_MASK, \
1064 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1065 .ops = &ivt_uncore_pci_ops, \
1066 .format_group = &ivt_uncore_format_group
1067
1068static struct attribute *ivt_uncore_formats_attr[] = {
1069 &format_attr_event.attr,
1070 &format_attr_umask.attr,
1071 &format_attr_edge.attr,
1072 &format_attr_inv.attr,
1073 &format_attr_thresh8.attr,
1074 NULL,
1075};
1076
1077static struct attribute *ivt_uncore_ubox_formats_attr[] = {
1078 &format_attr_event.attr,
1079 &format_attr_umask.attr,
1080 &format_attr_edge.attr,
1081 &format_attr_inv.attr,
1082 &format_attr_thresh5.attr,
1083 NULL,
1084};
1085
1086static struct attribute *ivt_uncore_cbox_formats_attr[] = {
1087 &format_attr_event.attr,
1088 &format_attr_umask.attr,
1089 &format_attr_edge.attr,
1090 &format_attr_tid_en.attr,
1091 &format_attr_thresh8.attr,
1092 &format_attr_filter_tid.attr,
1093 &format_attr_filter_link.attr,
1094 &format_attr_filter_state2.attr,
1095 &format_attr_filter_nid2.attr,
1096 &format_attr_filter_opc2.attr,
1097 NULL,
1098};
1099
1100static struct attribute *ivt_uncore_pcu_formats_attr[] = {
1101 &format_attr_event_ext.attr,
1102 &format_attr_occ_sel.attr,
1103 &format_attr_edge.attr,
1104 &format_attr_thresh5.attr,
1105 &format_attr_occ_invert.attr,
1106 &format_attr_occ_edge.attr,
1107 &format_attr_filter_band0.attr,
1108 &format_attr_filter_band1.attr,
1109 &format_attr_filter_band2.attr,
1110 &format_attr_filter_band3.attr,
1111 NULL,
1112};
1113
1114static struct attribute *ivt_uncore_qpi_formats_attr[] = {
1115 &format_attr_event_ext.attr,
1116 &format_attr_umask.attr,
1117 &format_attr_edge.attr,
1118 &format_attr_thresh8.attr,
d1e8f4a8
YZ
1119 &format_attr_match_rds.attr,
1120 &format_attr_match_rnid30.attr,
1121 &format_attr_match_rnid4.attr,
1122 &format_attr_match_dnid.attr,
1123 &format_attr_match_mc.attr,
1124 &format_attr_match_opc.attr,
1125 &format_attr_match_vnw.attr,
1126 &format_attr_match0.attr,
1127 &format_attr_match1.attr,
1128 &format_attr_mask_rds.attr,
1129 &format_attr_mask_rnid30.attr,
1130 &format_attr_mask_rnid4.attr,
1131 &format_attr_mask_dnid.attr,
1132 &format_attr_mask_mc.attr,
1133 &format_attr_mask_opc.attr,
1134 &format_attr_mask_vnw.attr,
1135 &format_attr_mask0.attr,
1136 &format_attr_mask1.attr,
e850f9c3
YZ
1137 NULL,
1138};
1139
1140static struct attribute_group ivt_uncore_format_group = {
1141 .name = "format",
1142 .attrs = ivt_uncore_formats_attr,
1143};
1144
1145static struct attribute_group ivt_uncore_ubox_format_group = {
1146 .name = "format",
1147 .attrs = ivt_uncore_ubox_formats_attr,
1148};
1149
1150static struct attribute_group ivt_uncore_cbox_format_group = {
1151 .name = "format",
1152 .attrs = ivt_uncore_cbox_formats_attr,
1153};
1154
1155static struct attribute_group ivt_uncore_pcu_format_group = {
1156 .name = "format",
1157 .attrs = ivt_uncore_pcu_formats_attr,
1158};
1159
1160static struct attribute_group ivt_uncore_qpi_format_group = {
1161 .name = "format",
1162 .attrs = ivt_uncore_qpi_formats_attr,
1163};
1164
1165static struct intel_uncore_type ivt_uncore_ubox = {
1166 .name = "ubox",
1167 .num_counters = 2,
1168 .num_boxes = 1,
1169 .perf_ctr_bits = 44,
1170 .fixed_ctr_bits = 48,
1171 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1172 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1173 .event_mask = IVT_U_MSR_PMON_RAW_EVENT_MASK,
1174 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1175 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1176 .ops = &ivt_uncore_msr_ops,
1177 .format_group = &ivt_uncore_ubox_format_group,
1178};
1179
1180static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1181 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1182 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1183 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
337397f3
SE
1184 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1185 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1186 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
e850f9c3 1187 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
337397f3 1188 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
e850f9c3 1189 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
337397f3 1190 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
e850f9c3 1191 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
337397f3 1192 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
e850f9c3
YZ
1193 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1194 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1195 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1196 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1197 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1198 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1199 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1200 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1201 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1202 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1203 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1204 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1205 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1206 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1207 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1208 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1209 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1210 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1211 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1212 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1213 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1214 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1215 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1216 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1217 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1218 EVENT_EXTRA_END
1219};
1220
1221static u64 ivt_cbox_filter_mask(int fields)
1222{
1223 u64 mask = 0;
1224
1225 if (fields & 0x1)
1226 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID;
1227 if (fields & 0x2)
1228 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK;
1229 if (fields & 0x4)
1230 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE;
1231 if (fields & 0x8)
1232 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID;
1233 if (fields & 0x10)
1234 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC;
1235
1236 return mask;
1237}
1238
1239static struct event_constraint *
1240ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1241{
1242 return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask);
1243}
1244
1245static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1246{
1247 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1248 struct extra_reg *er;
1249 int idx = 0;
1250
1251 for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) {
1252 if (er->event != (event->hw.config & er->config_mask))
1253 continue;
1254 idx |= er->idx;
1255 }
1256
1257 if (idx) {
1258 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1259 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1260 reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx);
1261 reg1->idx = idx;
1262 }
1263 return 0;
1264}
1265
1266static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1267{
1268 struct hw_perf_event *hwc = &event->hw;
1269 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1270
1271 if (reg1->idx != EXTRA_REG_NONE) {
1272 u64 filter = uncore_shared_reg_config(box, 0);
1273 wrmsrl(reg1->reg, filter & 0xffffffff);
1274 wrmsrl(reg1->reg + 6, filter >> 32);
1275 }
1276
1277 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1278}
1279
1280static struct intel_uncore_ops ivt_uncore_cbox_ops = {
1281 .init_box = ivt_uncore_msr_init_box,
1282 .disable_box = snbep_uncore_msr_disable_box,
1283 .enable_box = snbep_uncore_msr_enable_box,
1284 .disable_event = snbep_uncore_msr_disable_event,
1285 .enable_event = ivt_cbox_enable_event,
1286 .read_counter = uncore_msr_read_counter,
1287 .hw_config = ivt_cbox_hw_config,
1288 .get_constraint = ivt_cbox_get_constraint,
1289 .put_constraint = snbep_cbox_put_constraint,
1290};
1291
1292static struct intel_uncore_type ivt_uncore_cbox = {
1293 .name = "cbox",
1294 .num_counters = 4,
1295 .num_boxes = 15,
1296 .perf_ctr_bits = 44,
1297 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1298 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1299 .event_mask = IVT_CBO_MSR_PMON_RAW_EVENT_MASK,
1300 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1301 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1302 .num_shared_regs = 1,
1303 .constraints = snbep_uncore_cbox_constraints,
1304 .ops = &ivt_uncore_cbox_ops,
1305 .format_group = &ivt_uncore_cbox_format_group,
1306};
1307
1308static struct intel_uncore_ops ivt_uncore_pcu_ops = {
1309 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1310 .hw_config = snbep_pcu_hw_config,
1311 .get_constraint = snbep_pcu_get_constraint,
1312 .put_constraint = snbep_pcu_put_constraint,
1313};
1314
1315static struct intel_uncore_type ivt_uncore_pcu = {
1316 .name = "pcu",
1317 .num_counters = 4,
1318 .num_boxes = 1,
1319 .perf_ctr_bits = 48,
1320 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1321 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1322 .event_mask = IVT_PCU_MSR_PMON_RAW_EVENT_MASK,
1323 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1324 .num_shared_regs = 1,
1325 .ops = &ivt_uncore_pcu_ops,
1326 .format_group = &ivt_uncore_pcu_format_group,
1327};
1328
1329static struct intel_uncore_type *ivt_msr_uncores[] = {
1330 &ivt_uncore_ubox,
1331 &ivt_uncore_cbox,
1332 &ivt_uncore_pcu,
1333 NULL,
1334};
1335
1336static struct intel_uncore_type ivt_uncore_ha = {
1337 .name = "ha",
1338 .num_counters = 4,
1339 .num_boxes = 2,
1340 .perf_ctr_bits = 48,
1341 IVT_UNCORE_PCI_COMMON_INIT(),
1342};
1343
1344static struct intel_uncore_type ivt_uncore_imc = {
1345 .name = "imc",
1346 .num_counters = 4,
1347 .num_boxes = 8,
1348 .perf_ctr_bits = 48,
1349 .fixed_ctr_bits = 48,
1350 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1351 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1352 IVT_UNCORE_PCI_COMMON_INIT(),
1353};
1354
f891d8cf
YZ
1355/* registers in IRP boxes are not properly aligned */
1356static unsigned ivt_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1357static unsigned ivt_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1358
1359static void ivt_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1360{
1361 struct pci_dev *pdev = box->pci_dev;
1362 struct hw_perf_event *hwc = &event->hw;
1363
1364 pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx],
1365 hwc->config | SNBEP_PMON_CTL_EN);
1366}
1367
1368static void ivt_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1369{
1370 struct pci_dev *pdev = box->pci_dev;
1371 struct hw_perf_event *hwc = &event->hw;
1372
1373 pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx], hwc->config);
1374}
1375
1376static u64 ivt_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1377{
1378 struct pci_dev *pdev = box->pci_dev;
1379 struct hw_perf_event *hwc = &event->hw;
1380 u64 count = 0;
1381
1382 pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1383 pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1384
1385 return count;
1386}
1387
1388static struct intel_uncore_ops ivt_uncore_irp_ops = {
1389 .init_box = ivt_uncore_pci_init_box,
1390 .disable_box = snbep_uncore_pci_disable_box,
1391 .enable_box = snbep_uncore_pci_enable_box,
1392 .disable_event = ivt_uncore_irp_disable_event,
1393 .enable_event = ivt_uncore_irp_enable_event,
1394 .read_counter = ivt_uncore_irp_read_counter,
1395};
1396
1397static struct intel_uncore_type ivt_uncore_irp = {
1398 .name = "irp",
1399 .num_counters = 4,
1400 .num_boxes = 1,
1401 .perf_ctr_bits = 48,
1402 .event_mask = IVT_PMON_RAW_EVENT_MASK,
1403 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1404 .ops = &ivt_uncore_irp_ops,
1405 .format_group = &ivt_uncore_format_group,
1406};
1407
d1e8f4a8
YZ
1408static struct intel_uncore_ops ivt_uncore_qpi_ops = {
1409 .init_box = ivt_uncore_pci_init_box,
1410 .disable_box = snbep_uncore_pci_disable_box,
1411 .enable_box = snbep_uncore_pci_enable_box,
1412 .disable_event = snbep_uncore_pci_disable_event,
1413 .enable_event = snbep_qpi_enable_event,
1414 .read_counter = snbep_uncore_pci_read_counter,
1415 .hw_config = snbep_qpi_hw_config,
1416 .get_constraint = uncore_get_constraint,
1417 .put_constraint = uncore_put_constraint,
1418};
1419
e850f9c3 1420static struct intel_uncore_type ivt_uncore_qpi = {
d1e8f4a8
YZ
1421 .name = "qpi",
1422 .num_counters = 4,
1423 .num_boxes = 3,
1424 .perf_ctr_bits = 48,
1425 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1426 .event_ctl = SNBEP_PCI_PMON_CTL0,
1427 .event_mask = IVT_QPI_PCI_PMON_RAW_EVENT_MASK,
1428 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1429 .num_shared_regs = 1,
1430 .ops = &ivt_uncore_qpi_ops,
1431 .format_group = &ivt_uncore_qpi_format_group,
e850f9c3
YZ
1432};
1433
1434static struct intel_uncore_type ivt_uncore_r2pcie = {
1435 .name = "r2pcie",
1436 .num_counters = 4,
1437 .num_boxes = 1,
1438 .perf_ctr_bits = 44,
1439 .constraints = snbep_uncore_r2pcie_constraints,
1440 IVT_UNCORE_PCI_COMMON_INIT(),
1441};
1442
1443static struct intel_uncore_type ivt_uncore_r3qpi = {
1444 .name = "r3qpi",
1445 .num_counters = 3,
1446 .num_boxes = 2,
1447 .perf_ctr_bits = 44,
1448 .constraints = snbep_uncore_r3qpi_constraints,
1449 IVT_UNCORE_PCI_COMMON_INIT(),
1450};
1451
1452enum {
1453 IVT_PCI_UNCORE_HA,
1454 IVT_PCI_UNCORE_IMC,
f891d8cf 1455 IVT_PCI_UNCORE_IRP,
e850f9c3
YZ
1456 IVT_PCI_UNCORE_QPI,
1457 IVT_PCI_UNCORE_R2PCIE,
1458 IVT_PCI_UNCORE_R3QPI,
1459};
1460
1461static struct intel_uncore_type *ivt_pci_uncores[] = {
1462 [IVT_PCI_UNCORE_HA] = &ivt_uncore_ha,
1463 [IVT_PCI_UNCORE_IMC] = &ivt_uncore_imc,
f891d8cf 1464 [IVT_PCI_UNCORE_IRP] = &ivt_uncore_irp,
e850f9c3
YZ
1465 [IVT_PCI_UNCORE_QPI] = &ivt_uncore_qpi,
1466 [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie,
1467 [IVT_PCI_UNCORE_R3QPI] = &ivt_uncore_r3qpi,
1468 NULL,
1469};
1470
1471static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
1472 { /* Home Agent 0 */
1473 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
899396cf 1474 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 0),
e850f9c3
YZ
1475 },
1476 { /* Home Agent 1 */
1477 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
899396cf 1478 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 1),
e850f9c3
YZ
1479 },
1480 { /* MC0 Channel 0 */
1481 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
899396cf 1482 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 0),
e850f9c3
YZ
1483 },
1484 { /* MC0 Channel 1 */
1485 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
899396cf 1486 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 1),
e850f9c3
YZ
1487 },
1488 { /* MC0 Channel 3 */
1489 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
899396cf 1490 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 2),
e850f9c3
YZ
1491 },
1492 { /* MC0 Channel 4 */
1493 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
899396cf 1494 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 3),
e850f9c3
YZ
1495 },
1496 { /* MC1 Channel 0 */
1497 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
899396cf 1498 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 4),
e850f9c3
YZ
1499 },
1500 { /* MC1 Channel 1 */
1501 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
899396cf 1502 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 5),
e850f9c3
YZ
1503 },
1504 { /* MC1 Channel 3 */
1505 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
899396cf 1506 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 6),
e850f9c3
YZ
1507 },
1508 { /* MC1 Channel 4 */
1509 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
899396cf 1510 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7),
e850f9c3 1511 },
f891d8cf
YZ
1512 { /* IRP */
1513 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1514 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IRP, 0),
1515 },
e850f9c3
YZ
1516 { /* QPI0 Port 0 */
1517 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
899396cf 1518 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0),
e850f9c3
YZ
1519 },
1520 { /* QPI0 Port 1 */
1521 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
899396cf 1522 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 1),
e850f9c3
YZ
1523 },
1524 { /* QPI1 Port 2 */
1525 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
899396cf 1526 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 2),
e850f9c3
YZ
1527 },
1528 { /* R2PCIe */
1529 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
899396cf 1530 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE, 0),
e850f9c3
YZ
1531 },
1532 { /* R3QPI0 Link 0 */
1533 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
899396cf 1534 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 0),
e850f9c3
YZ
1535 },
1536 { /* R3QPI0 Link 1 */
1537 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
899396cf 1538 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 1),
e850f9c3
YZ
1539 },
1540 { /* R3QPI1 Link 2 */
1541 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
899396cf 1542 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2),
e850f9c3 1543 },
d1e8f4a8
YZ
1544 { /* QPI Port 0 filter */
1545 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1546 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1547 SNBEP_PCI_QPI_PORT0_FILTER),
1548 },
1549 { /* QPI Port 0 filter */
1550 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1551 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1552 SNBEP_PCI_QPI_PORT1_FILTER),
1553 },
e850f9c3
YZ
1554 { /* end: all zeroes */ }
1555};
1556
1557static struct pci_driver ivt_uncore_pci_driver = {
1558 .name = "ivt_uncore",
1559 .id_table = ivt_uncore_pci_ids,
1560};
1561/* end of IvyTown uncore support */
1562
254298c7
YZ
1563/* Sandy Bridge uncore support */
1564static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1565{
1566 struct hw_perf_event *hwc = &event->hw;
1567
1568 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1569 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1570 else
1571 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
1572}
1573
1574static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1575{
1576 wrmsrl(event->hw.config_base, 0);
1577}
1578
1579static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
1580{
1581 if (box->pmu->pmu_idx == 0) {
1582 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
1583 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
1584 }
1585}
1586
35534b20
SE
1587static struct uncore_event_desc snb_uncore_events[] = {
1588 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
1589 { /* end: all zeroes */ },
1590};
1591
254298c7
YZ
1592static struct attribute *snb_uncore_formats_attr[] = {
1593 &format_attr_event.attr,
1594 &format_attr_umask.attr,
1595 &format_attr_edge.attr,
1596 &format_attr_inv.attr,
1597 &format_attr_cmask5.attr,
1598 NULL,
1599};
1600
1601static struct attribute_group snb_uncore_format_group = {
1602 .name = "format",
1603 .attrs = snb_uncore_formats_attr,
1604};
1605
1606static struct intel_uncore_ops snb_uncore_msr_ops = {
1607 .init_box = snb_uncore_msr_init_box,
1608 .disable_event = snb_uncore_msr_disable_event,
1609 .enable_event = snb_uncore_msr_enable_event,
1610 .read_counter = uncore_msr_read_counter,
1611};
1612
1613static struct event_constraint snb_uncore_cbox_constraints[] = {
1614 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
1615 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
1616 EVENT_CONSTRAINT_END
1617};
1618
1619static struct intel_uncore_type snb_uncore_cbox = {
1620 .name = "cbox",
1621 .num_counters = 2,
1622 .num_boxes = 4,
1623 .perf_ctr_bits = 44,
1624 .fixed_ctr_bits = 48,
1625 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
1626 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
1627 .fixed_ctr = SNB_UNC_FIXED_CTR,
1628 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
1629 .single_fixed = 1,
1630 .event_mask = SNB_UNC_RAW_EVENT_MASK,
1631 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
1632 .constraints = snb_uncore_cbox_constraints,
1633 .ops = &snb_uncore_msr_ops,
1634 .format_group = &snb_uncore_format_group,
35534b20 1635 .event_descs = snb_uncore_events,
254298c7
YZ
1636};
1637
1638static struct intel_uncore_type *snb_msr_uncores[] = {
1639 &snb_uncore_cbox,
1640 NULL,
1641};
1642/* end of Sandy Bridge uncore support */
1643
1644/* Nehalem uncore support */
1645static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
1646{
1647 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
1648}
1649
1650static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
1651{
1652 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
1653}
1654
1655static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1656{
1657 struct hw_perf_event *hwc = &event->hw;
1658
1659 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1660 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1661 else
1662 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
1663}
1664
1665static struct attribute *nhm_uncore_formats_attr[] = {
1666 &format_attr_event.attr,
1667 &format_attr_umask.attr,
1668 &format_attr_edge.attr,
1669 &format_attr_inv.attr,
1670 &format_attr_cmask8.attr,
1671 NULL,
1672};
1673
1674static struct attribute_group nhm_uncore_format_group = {
1675 .name = "format",
1676 .attrs = nhm_uncore_formats_attr,
1677};
1678
1679static struct uncore_event_desc nhm_uncore_events[] = {
1680 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
1681 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
1682 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
1683 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
1684 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
1685 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
1686 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
1687 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
1688 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
1689 { /* end: all zeroes */ },
1690};
1691
1692static struct intel_uncore_ops nhm_uncore_msr_ops = {
1693 .disable_box = nhm_uncore_msr_disable_box,
1694 .enable_box = nhm_uncore_msr_enable_box,
1695 .disable_event = snb_uncore_msr_disable_event,
1696 .enable_event = nhm_uncore_msr_enable_event,
1697 .read_counter = uncore_msr_read_counter,
1698};
1699
1700static struct intel_uncore_type nhm_uncore = {
1701 .name = "",
1702 .num_counters = 8,
1703 .num_boxes = 1,
1704 .perf_ctr_bits = 48,
1705 .fixed_ctr_bits = 48,
1706 .event_ctl = NHM_UNC_PERFEVTSEL0,
1707 .perf_ctr = NHM_UNC_UNCORE_PMC0,
1708 .fixed_ctr = NHM_UNC_FIXED_CTR,
1709 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
1710 .event_mask = NHM_UNC_RAW_EVENT_MASK,
1711 .event_descs = nhm_uncore_events,
1712 .ops = &nhm_uncore_msr_ops,
1713 .format_group = &nhm_uncore_format_group,
1714};
1715
1716static struct intel_uncore_type *nhm_msr_uncores[] = {
1717 &nhm_uncore,
1718 NULL,
1719};
1720/* end of Nehalem uncore support */
1721
1722/* Nehalem-EX uncore support */
254298c7
YZ
1723DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
1724DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
254298c7
YZ
1725DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
1726DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
1727
1728static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
1729{
1730 wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
1731}
1732
1733static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
1734{
1735 unsigned msr = uncore_msr_box_ctl(box);
1736 u64 config;
1737
1738 if (msr) {
1739 rdmsrl(msr, config);
1740 config &= ~((1ULL << uncore_num_counters(box)) - 1);
1741 /* WBox has a fixed counter */
1742 if (uncore_msr_fixed_ctl(box))
1743 config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
1744 wrmsrl(msr, config);
1745 }
1746}
1747
1748static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
1749{
1750 unsigned msr = uncore_msr_box_ctl(box);
1751 u64 config;
1752
1753 if (msr) {
1754 rdmsrl(msr, config);
1755 config |= (1ULL << uncore_num_counters(box)) - 1;
1756 /* WBox has a fixed counter */
1757 if (uncore_msr_fixed_ctl(box))
1758 config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
1759 wrmsrl(msr, config);
1760 }
1761}
1762
1763static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1764{
1765 wrmsrl(event->hw.config_base, 0);
1766}
1767
1768static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1769{
1770 struct hw_perf_event *hwc = &event->hw;
1771
1772 if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
1773 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
1774 else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
1775 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
1776 else
1777 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
1778}
1779
1780#define NHMEX_UNCORE_OPS_COMMON_INIT() \
1781 .init_box = nhmex_uncore_msr_init_box, \
1782 .disable_box = nhmex_uncore_msr_disable_box, \
1783 .enable_box = nhmex_uncore_msr_enable_box, \
1784 .disable_event = nhmex_uncore_msr_disable_event, \
1785 .read_counter = uncore_msr_read_counter
1786
1787static struct intel_uncore_ops nhmex_uncore_ops = {
1788 NHMEX_UNCORE_OPS_COMMON_INIT(),
1789 .enable_event = nhmex_uncore_msr_enable_event,
1790};
1791
1792static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
1793 &format_attr_event.attr,
1794 &format_attr_edge.attr,
1795 NULL,
1796};
1797
1798static struct attribute_group nhmex_uncore_ubox_format_group = {
1799 .name = "format",
1800 .attrs = nhmex_uncore_ubox_formats_attr,
1801};
1802
1803static struct intel_uncore_type nhmex_uncore_ubox = {
1804 .name = "ubox",
1805 .num_counters = 1,
1806 .num_boxes = 1,
1807 .perf_ctr_bits = 48,
1808 .event_ctl = NHMEX_U_MSR_PMON_EV_SEL,
1809 .perf_ctr = NHMEX_U_MSR_PMON_CTR,
1810 .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK,
1811 .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL,
1812 .ops = &nhmex_uncore_ops,
1813 .format_group = &nhmex_uncore_ubox_format_group
1814};
1815
1816static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
1817 &format_attr_event.attr,
1818 &format_attr_umask.attr,
1819 &format_attr_edge.attr,
1820 &format_attr_inv.attr,
1821 &format_attr_thresh8.attr,
1822 NULL,
1823};
1824
1825static struct attribute_group nhmex_uncore_cbox_format_group = {
1826 .name = "format",
1827 .attrs = nhmex_uncore_cbox_formats_attr,
1828};
1829
cb37af77
YZ
1830/* msr offset for each instance of cbox */
1831static unsigned nhmex_cbox_msr_offsets[] = {
1832 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
1833};
1834
254298c7
YZ
1835static struct intel_uncore_type nhmex_uncore_cbox = {
1836 .name = "cbox",
1837 .num_counters = 6,
cb37af77 1838 .num_boxes = 10,
254298c7
YZ
1839 .perf_ctr_bits = 48,
1840 .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0,
1841 .perf_ctr = NHMEX_C0_MSR_PMON_CTR0,
1842 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
1843 .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
cb37af77 1844 .msr_offsets = nhmex_cbox_msr_offsets,
254298c7
YZ
1845 .pair_ctr_ctl = 1,
1846 .ops = &nhmex_uncore_ops,
1847 .format_group = &nhmex_uncore_cbox_format_group
1848};
1849
1850static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
1851 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
1852 { /* end: all zeroes */ },
1853};
1854
1855static struct intel_uncore_type nhmex_uncore_wbox = {
1856 .name = "wbox",
1857 .num_counters = 4,
1858 .num_boxes = 1,
1859 .perf_ctr_bits = 48,
1860 .event_ctl = NHMEX_W_MSR_PMON_CNT0,
1861 .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0,
1862 .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR,
1863 .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL,
1864 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
1865 .box_ctl = NHMEX_W_MSR_GLOBAL_CTL,
1866 .pair_ctr_ctl = 1,
1867 .event_descs = nhmex_uncore_wbox_events,
1868 .ops = &nhmex_uncore_ops,
1869 .format_group = &nhmex_uncore_cbox_format_group
1870};
1871
1872static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1873{
1874 struct hw_perf_event *hwc = &event->hw;
1875 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1876 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1877 int ctr, ev_sel;
1878
1879 ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
1880 NHMEX_B_PMON_CTR_SHIFT;
1881 ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
1882 NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
1883
1884 /* events that do not use the match/mask registers */
1885 if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
1886 (ctr == 2 && ev_sel != 0x4) || ctr == 3)
1887 return 0;
1888
1889 if (box->pmu->pmu_idx == 0)
1890 reg1->reg = NHMEX_B0_MSR_MATCH;
1891 else
1892 reg1->reg = NHMEX_B1_MSR_MATCH;
1893 reg1->idx = 0;
1894 reg1->config = event->attr.config1;
1895 reg2->config = event->attr.config2;
1896 return 0;
1897}
1898
1899static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1900{
1901 struct hw_perf_event *hwc = &event->hw;
1902 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1903 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1904
1905 if (reg1->idx != EXTRA_REG_NONE) {
1906 wrmsrl(reg1->reg, reg1->config);
1907 wrmsrl(reg1->reg + 1, reg2->config);
1908 }
1909 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
1910 (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
1911}
1912
1913/*
1914 * The Bbox has 4 counters, but each counter monitors different events.
1915 * Use bits 6-7 in the event config to select counter.
1916 */
1917static struct event_constraint nhmex_uncore_bbox_constraints[] = {
1918 EVENT_CONSTRAINT(0 , 1, 0xc0),
1919 EVENT_CONSTRAINT(0x40, 2, 0xc0),
1920 EVENT_CONSTRAINT(0x80, 4, 0xc0),
1921 EVENT_CONSTRAINT(0xc0, 8, 0xc0),
1922 EVENT_CONSTRAINT_END,
1923};
1924
1925static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
1926 &format_attr_event5.attr,
1927 &format_attr_counter.attr,
1928 &format_attr_match.attr,
1929 &format_attr_mask.attr,
1930 NULL,
1931};
1932
1933static struct attribute_group nhmex_uncore_bbox_format_group = {
1934 .name = "format",
1935 .attrs = nhmex_uncore_bbox_formats_attr,
1936};
1937
1938static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
1939 NHMEX_UNCORE_OPS_COMMON_INIT(),
1940 .enable_event = nhmex_bbox_msr_enable_event,
1941 .hw_config = nhmex_bbox_hw_config,
1942 .get_constraint = uncore_get_constraint,
1943 .put_constraint = uncore_put_constraint,
1944};
1945
1946static struct intel_uncore_type nhmex_uncore_bbox = {
1947 .name = "bbox",
1948 .num_counters = 4,
1949 .num_boxes = 2,
1950 .perf_ctr_bits = 48,
1951 .event_ctl = NHMEX_B0_MSR_PMON_CTL0,
1952 .perf_ctr = NHMEX_B0_MSR_PMON_CTR0,
1953 .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK,
1954 .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
1955 .msr_offset = NHMEX_B_MSR_OFFSET,
1956 .pair_ctr_ctl = 1,
1957 .num_shared_regs = 1,
1958 .constraints = nhmex_uncore_bbox_constraints,
1959 .ops = &nhmex_uncore_bbox_ops,
1960 .format_group = &nhmex_uncore_bbox_format_group
1961};
1962
1963static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1964{
ebb6cc03
YZ
1965 struct hw_perf_event *hwc = &event->hw;
1966 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1967 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
254298c7 1968
ebb6cc03
YZ
1969 /* only TO_R_PROG_EV event uses the match/mask register */
1970 if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
1971 NHMEX_S_EVENT_TO_R_PROG_EV)
1972 return 0;
254298c7
YZ
1973
1974 if (box->pmu->pmu_idx == 0)
1975 reg1->reg = NHMEX_S0_MSR_MM_CFG;
1976 else
1977 reg1->reg = NHMEX_S1_MSR_MM_CFG;
254298c7 1978 reg1->idx = 0;
ebb6cc03
YZ
1979 reg1->config = event->attr.config1;
1980 reg2->config = event->attr.config2;
254298c7
YZ
1981 return 0;
1982}
1983
1984static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1985{
1986 struct hw_perf_event *hwc = &event->hw;
1987 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1988 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1989
ebb6cc03
YZ
1990 if (reg1->idx != EXTRA_REG_NONE) {
1991 wrmsrl(reg1->reg, 0);
254298c7
YZ
1992 wrmsrl(reg1->reg + 1, reg1->config);
1993 wrmsrl(reg1->reg + 2, reg2->config);
1994 wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
1995 }
1996 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
1997}
1998
1999static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
2000 &format_attr_event.attr,
2001 &format_attr_umask.attr,
2002 &format_attr_edge.attr,
2003 &format_attr_inv.attr,
2004 &format_attr_thresh8.attr,
254298c7
YZ
2005 &format_attr_match.attr,
2006 &format_attr_mask.attr,
2007 NULL,
2008};
2009
2010static struct attribute_group nhmex_uncore_sbox_format_group = {
2011 .name = "format",
2012 .attrs = nhmex_uncore_sbox_formats_attr,
2013};
2014
2015static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
2016 NHMEX_UNCORE_OPS_COMMON_INIT(),
2017 .enable_event = nhmex_sbox_msr_enable_event,
2018 .hw_config = nhmex_sbox_hw_config,
2019 .get_constraint = uncore_get_constraint,
2020 .put_constraint = uncore_put_constraint,
2021};
2022
2023static struct intel_uncore_type nhmex_uncore_sbox = {
2024 .name = "sbox",
2025 .num_counters = 4,
2026 .num_boxes = 2,
2027 .perf_ctr_bits = 48,
2028 .event_ctl = NHMEX_S0_MSR_PMON_CTL0,
2029 .perf_ctr = NHMEX_S0_MSR_PMON_CTR0,
2030 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
2031 .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
2032 .msr_offset = NHMEX_S_MSR_OFFSET,
2033 .pair_ctr_ctl = 1,
2034 .num_shared_regs = 1,
2035 .ops = &nhmex_uncore_sbox_ops,
2036 .format_group = &nhmex_uncore_sbox_format_group
2037};
2038
2039enum {
2040 EXTRA_REG_NHMEX_M_FILTER,
2041 EXTRA_REG_NHMEX_M_DSP,
2042 EXTRA_REG_NHMEX_M_ISS,
2043 EXTRA_REG_NHMEX_M_MAP,
2044 EXTRA_REG_NHMEX_M_MSC_THR,
2045 EXTRA_REG_NHMEX_M_PGT,
2046 EXTRA_REG_NHMEX_M_PLD,
2047 EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
2048};
2049
2050static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
2051 MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
2052 MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
2053 MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
2054 MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
2055 /* event 0xa uses two extra registers */
2056 MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
2057 MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
2058 MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
2059 /* events 0xd ~ 0x10 use the same extra register */
2060 MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
2061 MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
2062 MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
2063 MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
2064 MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
2065 MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
2066 MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
2067 MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
2068 MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
2069 EVENT_EXTRA_END
2070};
2071
cb37af77 2072/* Nehalem-EX or Westmere-EX ? */
46bdd905 2073static bool uncore_nhmex;
cb37af77 2074
254298c7
YZ
2075static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
2076{
2077 struct intel_uncore_extra_reg *er;
2078 unsigned long flags;
2079 bool ret = false;
2080 u64 mask;
2081
2082 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2083 er = &box->shared_regs[idx];
2084 raw_spin_lock_irqsave(&er->lock, flags);
2085 if (!atomic_read(&er->ref) || er->config == config) {
2086 atomic_inc(&er->ref);
2087 er->config = config;
2088 ret = true;
2089 }
2090 raw_spin_unlock_irqrestore(&er->lock, flags);
2091
2092 return ret;
2093 }
2094 /*
2095 * The ZDP_CTL_FVC MSR has 4 fields which are used to control
2096 * events 0xd ~ 0x10. Besides these 4 fields, there are additional
2097 * fields which are shared.
2098 */
2099 idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2100 if (WARN_ON_ONCE(idx >= 4))
2101 return false;
2102
2103 /* mask of the shared fields */
cb37af77
YZ
2104 if (uncore_nhmex)
2105 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
2106 else
2107 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
254298c7
YZ
2108 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2109
2110 raw_spin_lock_irqsave(&er->lock, flags);
2111 /* add mask of the non-shared field if it's in use */
cb37af77
YZ
2112 if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
2113 if (uncore_nhmex)
2114 mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2115 else
2116 mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2117 }
254298c7
YZ
2118
2119 if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
2120 atomic_add(1 << (idx * 8), &er->ref);
cb37af77
YZ
2121 if (uncore_nhmex)
2122 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
2123 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2124 else
2125 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
2126 WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
254298c7
YZ
2127 er->config &= ~mask;
2128 er->config |= (config & mask);
2129 ret = true;
2130 }
2131 raw_spin_unlock_irqrestore(&er->lock, flags);
2132
2133 return ret;
2134}
2135
2136static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
2137{
2138 struct intel_uncore_extra_reg *er;
2139
2140 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2141 er = &box->shared_regs[idx];
2142 atomic_dec(&er->ref);
2143 return;
2144 }
2145
2146 idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2147 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2148 atomic_sub(1 << (idx * 8), &er->ref);
2149}
2150
46bdd905 2151static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
254298c7
YZ
2152{
2153 struct hw_perf_event *hwc = &event->hw;
2154 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
13acac30 2155 u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
254298c7
YZ
2156 u64 config = reg1->config;
2157
2158 /* get the non-shared control bits and shift them */
2159 idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
cb37af77
YZ
2160 if (uncore_nhmex)
2161 config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
2162 else
2163 config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
254298c7
YZ
2164 if (new_idx > orig_idx) {
2165 idx = new_idx - orig_idx;
2166 config <<= 3 * idx;
2167 } else {
2168 idx = orig_idx - new_idx;
2169 config >>= 3 * idx;
2170 }
2171
2172 /* add the shared control bits back */
cb37af77
YZ
2173 if (uncore_nhmex)
2174 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2175 else
2176 config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
254298c7
YZ
2177 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
2178 if (modify) {
2179 /* adjust the main event selector */
2180 if (new_idx > orig_idx)
2181 hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2182 else
2183 hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
2184 reg1->config = config;
2185 reg1->idx = ~0xff | new_idx;
2186 }
2187 return config;
2188}
2189
2190static struct event_constraint *
2191nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2192{
2193 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2194 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2195 int i, idx[2], alloc = 0;
2196 u64 config1 = reg1->config;
2197
2198 idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
2199 idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
2200again:
2201 for (i = 0; i < 2; i++) {
2202 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
2203 idx[i] = 0xff;
2204
2205 if (idx[i] == 0xff)
2206 continue;
2207
2208 if (!nhmex_mbox_get_shared_reg(box, idx[i],
2209 __BITS_VALUE(config1, i, 32)))
2210 goto fail;
2211 alloc |= (0x1 << i);
2212 }
2213
2214 /* for the match/mask registers */
ebb6cc03
YZ
2215 if (reg2->idx != EXTRA_REG_NONE &&
2216 (uncore_box_is_fake(box) || !reg2->alloc) &&
254298c7
YZ
2217 !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
2218 goto fail;
2219
2220 /*
2221 * If it's a fake box -- as per validate_{group,event}() we
2222 * shouldn't touch event state and we can avoid doing so
2223 * since both will only call get_event_constraints() once
2224 * on each event, this avoids the need for reg->alloc.
2225 */
2226 if (!uncore_box_is_fake(box)) {
2227 if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
2228 nhmex_mbox_alter_er(event, idx[0], true);
2229 reg1->alloc |= alloc;
ebb6cc03
YZ
2230 if (reg2->idx != EXTRA_REG_NONE)
2231 reg2->alloc = 1;
254298c7
YZ
2232 }
2233 return NULL;
2234fail:
2235 if (idx[0] != 0xff && !(alloc & 0x1) &&
2236 idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
2237 /*
2238 * events 0xd ~ 0x10 are functional identical, but are
2239 * controlled by different fields in the ZDP_CTL_FVC
2240 * register. If we failed to take one field, try the
2241 * rest 3 choices.
7c94ee2e 2242 */
254298c7
YZ
2243 BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
2244 idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2245 idx[0] = (idx[0] + 1) % 4;
2246 idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
2247 if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
2248 config1 = nhmex_mbox_alter_er(event, idx[0], false);
2249 goto again;
7c94ee2e 2250 }
254298c7 2251 }
7c94ee2e 2252
254298c7
YZ
2253 if (alloc & 0x1)
2254 nhmex_mbox_put_shared_reg(box, idx[0]);
2255 if (alloc & 0x2)
2256 nhmex_mbox_put_shared_reg(box, idx[1]);
2257 return &constraint_empty;
2258}
fcde10e9 2259
254298c7 2260static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 2261{
254298c7
YZ
2262 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2263 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
fcde10e9 2264
254298c7
YZ
2265 if (uncore_box_is_fake(box))
2266 return;
2267
2268 if (reg1->alloc & 0x1)
2269 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
2270 if (reg1->alloc & 0x2)
2271 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
2272 reg1->alloc = 0;
2273
2274 if (reg2->alloc) {
2275 nhmex_mbox_put_shared_reg(box, reg2->idx);
2276 reg2->alloc = 0;
2277 }
fcde10e9
YZ
2278}
2279
254298c7 2280static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
fcde10e9 2281{
254298c7
YZ
2282 if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2283 return er->idx;
2284 return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
fcde10e9
YZ
2285}
2286
254298c7 2287static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 2288{
254298c7
YZ
2289 struct intel_uncore_type *type = box->pmu->type;
2290 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2291 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
2292 struct extra_reg *er;
2293 unsigned msr;
2294 int reg_idx = 0;
254298c7
YZ
2295 /*
2296 * The mbox events may require 2 extra MSRs at the most. But only
2297 * the lower 32 bits in these MSRs are significant, so we can use
2298 * config1 to pass two MSRs' config.
2299 */
2300 for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
2301 if (er->event != (event->hw.config & er->config_mask))
2302 continue;
2303 if (event->attr.config1 & ~er->valid_mask)
2304 return -EINVAL;
254298c7
YZ
2305
2306 msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
2307 if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
2308 return -EINVAL;
2309
2310 /* always use the 32~63 bits to pass the PLD config */
2311 if (er->idx == EXTRA_REG_NHMEX_M_PLD)
2312 reg_idx = 1;
ebb6cc03
YZ
2313 else if (WARN_ON_ONCE(reg_idx > 0))
2314 return -EINVAL;
254298c7
YZ
2315
2316 reg1->idx &= ~(0xff << (reg_idx * 8));
2317 reg1->reg &= ~(0xffff << (reg_idx * 16));
2318 reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
2319 reg1->reg |= msr << (reg_idx * 16);
2320 reg1->config = event->attr.config1;
2321 reg_idx++;
2322 }
ebb6cc03
YZ
2323 /*
2324 * The mbox only provides ability to perform address matching
2325 * for the PLD events.
2326 */
2327 if (reg_idx == 2) {
2328 reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
2329 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
2330 reg2->config = event->attr.config2;
2331 else
2332 reg2->config = ~0ULL;
2333 if (box->pmu->pmu_idx == 0)
2334 reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
2335 else
2336 reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
2337 }
254298c7 2338 return 0;
fcde10e9
YZ
2339}
2340
254298c7 2341static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
fcde10e9 2342{
254298c7
YZ
2343 struct intel_uncore_extra_reg *er;
2344 unsigned long flags;
2345 u64 config;
2346
2347 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
2348 return box->shared_regs[idx].config;
2349
2350 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
2351 raw_spin_lock_irqsave(&er->lock, flags);
2352 config = er->config;
2353 raw_spin_unlock_irqrestore(&er->lock, flags);
2354 return config;
2355}
2356
2357static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2358{
2359 struct hw_perf_event *hwc = &event->hw;
2360 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2361 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2362 int idx;
2363
2364 idx = __BITS_VALUE(reg1->idx, 0, 8);
2365 if (idx != 0xff)
2366 wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
2367 nhmex_mbox_shared_reg_config(box, idx));
2368 idx = __BITS_VALUE(reg1->idx, 1, 8);
2369 if (idx != 0xff)
2370 wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
2371 nhmex_mbox_shared_reg_config(box, idx));
2372
ebb6cc03
YZ
2373 if (reg2->idx != EXTRA_REG_NONE) {
2374 wrmsrl(reg2->reg, 0);
2375 if (reg2->config != ~0ULL) {
2376 wrmsrl(reg2->reg + 1,
2377 reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
2378 wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
2379 (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
2380 wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
2381 }
fcde10e9 2382 }
254298c7
YZ
2383
2384 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
fcde10e9
YZ
2385}
2386
ebb6cc03
YZ
2387DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
2388DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5");
2389DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6");
2390DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7");
2391DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13");
2392DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21");
2393DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63");
2394DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33");
2395DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61");
2396DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31");
2397DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31");
2398DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31");
2399DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
2400DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
2401DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31");
2402DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63");
254298c7
YZ
2403
2404static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
2405 &format_attr_count_mode.attr,
2406 &format_attr_storage_mode.attr,
2407 &format_attr_wrap_mode.attr,
2408 &format_attr_flag_mode.attr,
2409 &format_attr_inc_sel.attr,
2410 &format_attr_set_flag_sel.attr,
ebb6cc03 2411 &format_attr_filter_cfg_en.attr,
254298c7
YZ
2412 &format_attr_filter_match.attr,
2413 &format_attr_filter_mask.attr,
2414 &format_attr_dsp.attr,
2415 &format_attr_thr.attr,
2416 &format_attr_fvc.attr,
2417 &format_attr_pgt.attr,
2418 &format_attr_map.attr,
2419 &format_attr_iss.attr,
2420 &format_attr_pld.attr,
fcde10e9
YZ
2421 NULL,
2422};
2423
254298c7
YZ
2424static struct attribute_group nhmex_uncore_mbox_format_group = {
2425 .name = "format",
2426 .attrs = nhmex_uncore_mbox_formats_attr,
fcde10e9
YZ
2427};
2428
254298c7
YZ
2429static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
2430 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
2431 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
2432 { /* end: all zeroes */ },
fcde10e9
YZ
2433};
2434
cb37af77
YZ
2435static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
2436 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
2437 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
2438 { /* end: all zeroes */ },
2439};
2440
254298c7
YZ
2441static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
2442 NHMEX_UNCORE_OPS_COMMON_INIT(),
2443 .enable_event = nhmex_mbox_msr_enable_event,
2444 .hw_config = nhmex_mbox_hw_config,
2445 .get_constraint = nhmex_mbox_get_constraint,
2446 .put_constraint = nhmex_mbox_put_constraint,
fcde10e9
YZ
2447};
2448
254298c7
YZ
2449static struct intel_uncore_type nhmex_uncore_mbox = {
2450 .name = "mbox",
2451 .num_counters = 6,
2452 .num_boxes = 2,
2453 .perf_ctr_bits = 48,
2454 .event_ctl = NHMEX_M0_MSR_PMU_CTL0,
2455 .perf_ctr = NHMEX_M0_MSR_PMU_CNT0,
2456 .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK,
2457 .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL,
2458 .msr_offset = NHMEX_M_MSR_OFFSET,
2459 .pair_ctr_ctl = 1,
2460 .num_shared_regs = 8,
2461 .event_descs = nhmex_uncore_mbox_events,
2462 .ops = &nhmex_uncore_mbox_ops,
2463 .format_group = &nhmex_uncore_mbox_format_group,
fcde10e9
YZ
2464};
2465
46bdd905 2466static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
254298c7
YZ
2467{
2468 struct hw_perf_event *hwc = &event->hw;
2469 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
fcde10e9 2470
ebb6cc03 2471 /* adjust the main event selector and extra register index */
254298c7
YZ
2472 if (reg1->idx % 2) {
2473 reg1->idx--;
2474 hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2475 } else {
2476 reg1->idx++;
2477 hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2478 }
2479
ebb6cc03 2480 /* adjust extra register config */
254298c7 2481 switch (reg1->idx % 6) {
254298c7 2482 case 2:
ebb6cc03 2483 /* shift the 8~15 bits to the 0~7 bits */
254298c7
YZ
2484 reg1->config >>= 8;
2485 break;
2486 case 3:
ebb6cc03 2487 /* shift the 0~7 bits to the 8~15 bits */
254298c7
YZ
2488 reg1->config <<= 8;
2489 break;
254298c7
YZ
2490 };
2491}
2492
2493/*
2494 * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
2495 * An event set consists of 6 events, the 3rd and 4th events in
2496 * an event set use the same extra register. So an event set uses
2497 * 5 extra registers.
2498 */
2499static struct event_constraint *
2500nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 2501{
254298c7
YZ
2502 struct hw_perf_event *hwc = &event->hw;
2503 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2504 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
2505 struct intel_uncore_extra_reg *er;
2506 unsigned long flags;
2507 int idx, er_idx;
2508 u64 config1;
2509 bool ok = false;
2510
2511 if (!uncore_box_is_fake(box) && reg1->alloc)
2512 return NULL;
2513
2514 idx = reg1->idx % 6;
2515 config1 = reg1->config;
2516again:
2517 er_idx = idx;
2518 /* the 3rd and 4th events use the same extra register */
2519 if (er_idx > 2)
2520 er_idx--;
2521 er_idx += (reg1->idx / 6) * 5;
2522
2523 er = &box->shared_regs[er_idx];
2524 raw_spin_lock_irqsave(&er->lock, flags);
2525 if (idx < 2) {
2526 if (!atomic_read(&er->ref) || er->config == reg1->config) {
2527 atomic_inc(&er->ref);
2528 er->config = reg1->config;
2529 ok = true;
2530 }
2531 } else if (idx == 2 || idx == 3) {
2532 /*
2533 * these two events use different fields in a extra register,
2534 * the 0~7 bits and the 8~15 bits respectively.
2535 */
2536 u64 mask = 0xff << ((idx - 2) * 8);
2537 if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
2538 !((er->config ^ config1) & mask)) {
2539 atomic_add(1 << ((idx - 2) * 8), &er->ref);
2540 er->config &= ~mask;
2541 er->config |= config1 & mask;
2542 ok = true;
2543 }
2544 } else {
2545 if (!atomic_read(&er->ref) ||
2546 (er->config == (hwc->config >> 32) &&
2547 er->config1 == reg1->config &&
2548 er->config2 == reg2->config)) {
2549 atomic_inc(&er->ref);
2550 er->config = (hwc->config >> 32);
2551 er->config1 = reg1->config;
2552 er->config2 = reg2->config;
2553 ok = true;
2554 }
2555 }
2556 raw_spin_unlock_irqrestore(&er->lock, flags);
2557
2558 if (!ok) {
2559 /*
2560 * The Rbox events are always in pairs. The paired
2561 * events are functional identical, but use different
2562 * extra registers. If we failed to take an extra
2563 * register, try the alternative.
2564 */
2565 if (idx % 2)
2566 idx--;
2567 else
2568 idx++;
2569 if (idx != reg1->idx % 6) {
2570 if (idx == 2)
2571 config1 >>= 8;
2572 else if (idx == 3)
2573 config1 <<= 8;
2574 goto again;
2575 }
2576 } else {
2577 if (!uncore_box_is_fake(box)) {
2578 if (idx != reg1->idx % 6)
2579 nhmex_rbox_alter_er(box, event);
2580 reg1->alloc = 1;
2581 }
2582 return NULL;
2583 }
2584 return &constraint_empty;
fcde10e9
YZ
2585}
2586
254298c7 2587static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 2588{
254298c7
YZ
2589 struct intel_uncore_extra_reg *er;
2590 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2591 int idx, er_idx;
2592
2593 if (uncore_box_is_fake(box) || !reg1->alloc)
2594 return;
2595
2596 idx = reg1->idx % 6;
2597 er_idx = idx;
2598 if (er_idx > 2)
2599 er_idx--;
2600 er_idx += (reg1->idx / 6) * 5;
2601
2602 er = &box->shared_regs[er_idx];
2603 if (idx == 2 || idx == 3)
2604 atomic_sub(1 << ((idx - 2) * 8), &er->ref);
2605 else
2606 atomic_dec(&er->ref);
2607
2608 reg1->alloc = 0;
fcde10e9
YZ
2609}
2610
254298c7 2611static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9
YZ
2612{
2613 struct hw_perf_event *hwc = &event->hw;
254298c7
YZ
2614 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2615 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
ebb6cc03 2616 int idx;
fcde10e9 2617
254298c7
YZ
2618 idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
2619 NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
2620 if (idx >= 0x18)
2621 return -EINVAL;
2622
2623 reg1->idx = idx;
2624 reg1->config = event->attr.config1;
2625
ebb6cc03 2626 switch (idx % 6) {
254298c7
YZ
2627 case 4:
2628 case 5:
254298c7 2629 hwc->config |= event->attr.config & (~0ULL << 32);
ebb6cc03 2630 reg2->config = event->attr.config2;
254298c7
YZ
2631 break;
2632 };
2633 return 0;
fcde10e9
YZ
2634}
2635
254298c7
YZ
2636static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
2637{
2638 struct hw_perf_event *hwc = &event->hw;
2639 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2640 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
ebb6cc03 2641 int idx, port;
254298c7 2642
ebb6cc03
YZ
2643 idx = reg1->idx;
2644 port = idx / 6 + box->pmu->pmu_idx * 4;
254298c7 2645
ebb6cc03 2646 switch (idx % 6) {
254298c7 2647 case 0:
ebb6cc03
YZ
2648 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
2649 break;
254298c7 2650 case 1:
ebb6cc03 2651 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
254298c7
YZ
2652 break;
2653 case 2:
2654 case 3:
ebb6cc03 2655 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
46bdd905 2656 uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
254298c7
YZ
2657 break;
2658 case 4:
ebb6cc03
YZ
2659 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
2660 hwc->config >> 32);
2661 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
2662 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
2663 break;
254298c7 2664 case 5:
ebb6cc03
YZ
2665 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
2666 hwc->config >> 32);
2667 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
2668 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
254298c7
YZ
2669 break;
2670 };
2671
2672 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
2673 (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
2674}
2675
ebb6cc03
YZ
2676DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
2677DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
254298c7
YZ
2678DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
2679DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
2680DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
2681
2682static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
2683 &format_attr_event5.attr,
2684 &format_attr_xbr_mm_cfg.attr,
2685 &format_attr_xbr_match.attr,
2686 &format_attr_xbr_mask.attr,
2687 &format_attr_qlx_cfg.attr,
2688 &format_attr_iperf_cfg.attr,
fcde10e9
YZ
2689 NULL,
2690};
2691
254298c7 2692static struct attribute_group nhmex_uncore_rbox_format_group = {
fcde10e9 2693 .name = "format",
254298c7 2694 .attrs = nhmex_uncore_rbox_formats_attr,
fcde10e9
YZ
2695};
2696
254298c7
YZ
2697static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
2698 INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"),
2699 INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"),
2700 INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"),
2701 INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"),
2702 INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"),
2703 INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"),
fcde10e9
YZ
2704 { /* end: all zeroes */ },
2705};
2706
254298c7
YZ
2707static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
2708 NHMEX_UNCORE_OPS_COMMON_INIT(),
2709 .enable_event = nhmex_rbox_msr_enable_event,
2710 .hw_config = nhmex_rbox_hw_config,
2711 .get_constraint = nhmex_rbox_get_constraint,
2712 .put_constraint = nhmex_rbox_put_constraint,
fcde10e9
YZ
2713};
2714
254298c7
YZ
2715static struct intel_uncore_type nhmex_uncore_rbox = {
2716 .name = "rbox",
2717 .num_counters = 8,
2718 .num_boxes = 2,
2719 .perf_ctr_bits = 48,
2720 .event_ctl = NHMEX_R_MSR_PMON_CTL0,
2721 .perf_ctr = NHMEX_R_MSR_PMON_CNT0,
2722 .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK,
2723 .box_ctl = NHMEX_R_MSR_GLOBAL_CTL,
2724 .msr_offset = NHMEX_R_MSR_OFFSET,
2725 .pair_ctr_ctl = 1,
2726 .num_shared_regs = 20,
2727 .event_descs = nhmex_uncore_rbox_events,
2728 .ops = &nhmex_uncore_rbox_ops,
2729 .format_group = &nhmex_uncore_rbox_format_group
fcde10e9
YZ
2730};
2731
254298c7
YZ
2732static struct intel_uncore_type *nhmex_msr_uncores[] = {
2733 &nhmex_uncore_ubox,
2734 &nhmex_uncore_cbox,
2735 &nhmex_uncore_bbox,
2736 &nhmex_uncore_sbox,
2737 &nhmex_uncore_mbox,
2738 &nhmex_uncore_rbox,
2739 &nhmex_uncore_wbox,
fcde10e9
YZ
2740 NULL,
2741};
254298c7 2742/* end of Nehalem-EX uncore support */
fcde10e9 2743
254298c7 2744static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
087bfbb0
YZ
2745{
2746 struct hw_perf_event *hwc = &event->hw;
2747
2748 hwc->idx = idx;
2749 hwc->last_tag = ++box->tags[idx];
2750
2751 if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
14371cce
YZ
2752 hwc->event_base = uncore_fixed_ctr(box);
2753 hwc->config_base = uncore_fixed_ctl(box);
087bfbb0
YZ
2754 return;
2755 }
2756
14371cce
YZ
2757 hwc->config_base = uncore_event_ctl(box, hwc->idx);
2758 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
087bfbb0
YZ
2759}
2760
254298c7 2761static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
087bfbb0
YZ
2762{
2763 u64 prev_count, new_count, delta;
2764 int shift;
2765
2766 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
2767 shift = 64 - uncore_fixed_ctr_bits(box);
2768 else
2769 shift = 64 - uncore_perf_ctr_bits(box);
2770
2771 /* the hrtimer might modify the previous event value */
2772again:
2773 prev_count = local64_read(&event->hw.prev_count);
2774 new_count = uncore_read_counter(box, event);
2775 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
2776 goto again;
2777
2778 delta = (new_count << shift) - (prev_count << shift);
2779 delta >>= shift;
2780
2781 local64_add(delta, &event->count);
2782}
2783
2784/*
2785 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
2786 * for SandyBridge. So we use hrtimer to periodically poll the counter
2787 * to avoid overflow.
2788 */
2789static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
2790{
2791 struct intel_uncore_box *box;
2792 unsigned long flags;
2793 int bit;
2794
2795 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
2796 if (!box->n_active || box->cpu != smp_processor_id())
2797 return HRTIMER_NORESTART;
2798 /*
2799 * disable local interrupt to prevent uncore_pmu_event_start/stop
2800 * to interrupt the update process
2801 */
2802 local_irq_save(flags);
2803
2804 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
2805 uncore_perf_event_update(box, box->events[bit]);
2806
2807 local_irq_restore(flags);
2808
2809 hrtimer_forward_now(hrtimer, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL));
2810 return HRTIMER_RESTART;
2811}
2812
2813static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
2814{
2815 __hrtimer_start_range_ns(&box->hrtimer,
2816 ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0,
2817 HRTIMER_MODE_REL_PINNED, 0);
2818}
2819
2820static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
2821{
2822 hrtimer_cancel(&box->hrtimer);
2823}
2824
2825static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
2826{
2827 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2828 box->hrtimer.function = uncore_pmu_hrtimer;
2829}
2830
73c4427c 2831static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
087bfbb0
YZ
2832{
2833 struct intel_uncore_box *box;
6a67943a 2834 int i, size;
087bfbb0 2835
254298c7 2836 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
6a67943a 2837
73c4427c 2838 box = kzalloc_node(size, GFP_KERNEL, node);
087bfbb0
YZ
2839 if (!box)
2840 return NULL;
2841
6a67943a
YZ
2842 for (i = 0; i < type->num_shared_regs; i++)
2843 raw_spin_lock_init(&box->shared_regs[i].lock);
2844
087bfbb0
YZ
2845 uncore_pmu_init_hrtimer(box);
2846 atomic_set(&box->refcnt, 1);
2847 box->cpu = -1;
2848 box->phys_id = -1;
2849
2850 return box;
2851}
2852
2853static struct intel_uncore_box *
2854uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
2855{
402537fd 2856 struct intel_uncore_box *box;
14371cce
YZ
2857
2858 box = *per_cpu_ptr(pmu->box, cpu);
2859 if (box)
2860 return box;
2861
2862 raw_spin_lock(&uncore_box_lock);
2863 list_for_each_entry(box, &pmu->box_list, list) {
2864 if (box->phys_id == topology_physical_package_id(cpu)) {
2865 atomic_inc(&box->refcnt);
2866 *per_cpu_ptr(pmu->box, cpu) = box;
2867 break;
2868 }
2869 }
2870 raw_spin_unlock(&uncore_box_lock);
2871
087bfbb0
YZ
2872 return *per_cpu_ptr(pmu->box, cpu);
2873}
2874
2875static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
2876{
2877 return container_of(event->pmu, struct intel_uncore_pmu, pmu);
2878}
2879
2880static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
2881{
2882 /*
2883 * perf core schedules event on the basis of cpu, uncore events are
2884 * collected by one of the cpus inside a physical package.
2885 */
254298c7 2886 return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
087bfbb0
YZ
2887}
2888
254298c7
YZ
2889static int
2890uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
087bfbb0
YZ
2891{
2892 struct perf_event *event;
2893 int n, max_count;
2894
2895 max_count = box->pmu->type->num_counters;
2896 if (box->pmu->type->fixed_ctl)
2897 max_count++;
2898
2899 if (box->n_events >= max_count)
2900 return -EINVAL;
2901
2902 n = box->n_events;
2903 box->event_list[n] = leader;
2904 n++;
2905 if (!dogrp)
2906 return n;
2907
2908 list_for_each_entry(event, &leader->sibling_list, group_entry) {
2909 if (event->state <= PERF_EVENT_STATE_OFF)
2910 continue;
2911
2912 if (n >= max_count)
2913 return -EINVAL;
2914
2915 box->event_list[n] = event;
2916 n++;
2917 }
2918 return n;
2919}
2920
2921static struct event_constraint *
254298c7 2922uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
087bfbb0 2923{
6a67943a 2924 struct intel_uncore_type *type = box->pmu->type;
087bfbb0
YZ
2925 struct event_constraint *c;
2926
6a67943a
YZ
2927 if (type->ops->get_constraint) {
2928 c = type->ops->get_constraint(box, event);
2929 if (c)
2930 return c;
2931 }
2932
dbc33f70 2933 if (event->attr.config == UNCORE_FIXED_EVENT)
087bfbb0
YZ
2934 return &constraint_fixed;
2935
2936 if (type->constraints) {
2937 for_each_event_constraint(c, type->constraints) {
2938 if ((event->hw.config & c->cmask) == c->code)
2939 return c;
2940 }
2941 }
2942
2943 return &type->unconstrainted;
2944}
2945
254298c7 2946static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
6a67943a
YZ
2947{
2948 if (box->pmu->type->ops->put_constraint)
2949 box->pmu->type->ops->put_constraint(box, event);
2950}
2951
254298c7 2952static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
087bfbb0
YZ
2953{
2954 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
43b45780 2955 struct event_constraint *c;
6a67943a 2956 int i, wmin, wmax, ret = 0;
087bfbb0
YZ
2957 struct hw_perf_event *hwc;
2958
2959 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
2960
2961 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
43b45780 2962 hwc = &box->event_list[i]->hw;
6a67943a 2963 c = uncore_get_event_constraint(box, box->event_list[i]);
43b45780 2964 hwc->constraint = c;
087bfbb0
YZ
2965 wmin = min(wmin, c->weight);
2966 wmax = max(wmax, c->weight);
2967 }
2968
2969 /* fastpath, try to reuse previous register */
2970 for (i = 0; i < n; i++) {
2971 hwc = &box->event_list[i]->hw;
43b45780 2972 c = hwc->constraint;
087bfbb0
YZ
2973
2974 /* never assigned */
2975 if (hwc->idx == -1)
2976 break;
2977
2978 /* constraint still honored */
2979 if (!test_bit(hwc->idx, c->idxmsk))
2980 break;
2981
2982 /* not already used */
2983 if (test_bit(hwc->idx, used_mask))
2984 break;
2985
2986 __set_bit(hwc->idx, used_mask);
6a67943a
YZ
2987 if (assign)
2988 assign[i] = hwc->idx;
087bfbb0 2989 }
087bfbb0 2990 /* slow path */
6a67943a 2991 if (i != n)
43b45780
AH
2992 ret = perf_assign_events(box->event_list, n,
2993 wmin, wmax, assign);
6a67943a
YZ
2994
2995 if (!assign || ret) {
2996 for (i = 0; i < n; i++)
2997 uncore_put_event_constraint(box, box->event_list[i]);
2998 }
087bfbb0
YZ
2999 return ret ? -EINVAL : 0;
3000}
3001
3002static void uncore_pmu_event_start(struct perf_event *event, int flags)
3003{
3004 struct intel_uncore_box *box = uncore_event_to_box(event);
3005 int idx = event->hw.idx;
3006
3007 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
3008 return;
3009
3010 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
3011 return;
3012
3013 event->hw.state = 0;
3014 box->events[idx] = event;
3015 box->n_active++;
3016 __set_bit(idx, box->active_mask);
3017
3018 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
3019 uncore_enable_event(box, event);
3020
3021 if (box->n_active == 1) {
3022 uncore_enable_box(box);
3023 uncore_pmu_start_hrtimer(box);
3024 }
3025}
3026
3027static void uncore_pmu_event_stop(struct perf_event *event, int flags)
3028{
3029 struct intel_uncore_box *box = uncore_event_to_box(event);
3030 struct hw_perf_event *hwc = &event->hw;
3031
3032 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
3033 uncore_disable_event(box, event);
3034 box->n_active--;
3035 box->events[hwc->idx] = NULL;
3036 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
3037 hwc->state |= PERF_HES_STOPPED;
3038
3039 if (box->n_active == 0) {
3040 uncore_disable_box(box);
3041 uncore_pmu_cancel_hrtimer(box);
3042 }
3043 }
3044
3045 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
3046 /*
3047 * Drain the remaining delta count out of a event
3048 * that we are disabling:
3049 */
3050 uncore_perf_event_update(box, event);
3051 hwc->state |= PERF_HES_UPTODATE;
3052 }
3053}
3054
3055static int uncore_pmu_event_add(struct perf_event *event, int flags)
3056{
3057 struct intel_uncore_box *box = uncore_event_to_box(event);
3058 struct hw_perf_event *hwc = &event->hw;
3059 int assign[UNCORE_PMC_IDX_MAX];
3060 int i, n, ret;
3061
3062 if (!box)
3063 return -ENODEV;
3064
3065 ret = n = uncore_collect_events(box, event, false);
3066 if (ret < 0)
3067 return ret;
3068
3069 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
3070 if (!(flags & PERF_EF_START))
3071 hwc->state |= PERF_HES_ARCH;
3072
3073 ret = uncore_assign_events(box, assign, n);
3074 if (ret)
3075 return ret;
3076
3077 /* save events moving to new counters */
3078 for (i = 0; i < box->n_events; i++) {
3079 event = box->event_list[i];
3080 hwc = &event->hw;
3081
3082 if (hwc->idx == assign[i] &&
3083 hwc->last_tag == box->tags[assign[i]])
3084 continue;
3085 /*
3086 * Ensure we don't accidentally enable a stopped
3087 * counter simply because we rescheduled.
3088 */
3089 if (hwc->state & PERF_HES_STOPPED)
3090 hwc->state |= PERF_HES_ARCH;
3091
3092 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
3093 }
3094
3095 /* reprogram moved events into new counters */
3096 for (i = 0; i < n; i++) {
3097 event = box->event_list[i];
3098 hwc = &event->hw;
3099
3100 if (hwc->idx != assign[i] ||
3101 hwc->last_tag != box->tags[assign[i]])
3102 uncore_assign_hw_event(box, event, assign[i]);
3103 else if (i < box->n_events)
3104 continue;
3105
3106 if (hwc->state & PERF_HES_ARCH)
3107 continue;
3108
3109 uncore_pmu_event_start(event, 0);
3110 }
3111 box->n_events = n;
3112
3113 return 0;
3114}
3115
3116static void uncore_pmu_event_del(struct perf_event *event, int flags)
3117{
3118 struct intel_uncore_box *box = uncore_event_to_box(event);
3119 int i;
3120
3121 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
3122
3123 for (i = 0; i < box->n_events; i++) {
3124 if (event == box->event_list[i]) {
6a67943a
YZ
3125 uncore_put_event_constraint(box, event);
3126
087bfbb0
YZ
3127 while (++i < box->n_events)
3128 box->event_list[i - 1] = box->event_list[i];
3129
3130 --box->n_events;
3131 break;
3132 }
3133 }
3134
3135 event->hw.idx = -1;
3136 event->hw.last_tag = ~0ULL;
3137}
3138
3139static void uncore_pmu_event_read(struct perf_event *event)
3140{
3141 struct intel_uncore_box *box = uncore_event_to_box(event);
3142 uncore_perf_event_update(box, event);
3143}
3144
3145/*
3146 * validation ensures the group can be loaded onto the
3147 * PMU if it was the only group available.
3148 */
3149static int uncore_validate_group(struct intel_uncore_pmu *pmu,
3150 struct perf_event *event)
3151{
3152 struct perf_event *leader = event->group_leader;
3153 struct intel_uncore_box *fake_box;
087bfbb0
YZ
3154 int ret = -EINVAL, n;
3155
73c4427c 3156 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
087bfbb0
YZ
3157 if (!fake_box)
3158 return -ENOMEM;
3159
3160 fake_box->pmu = pmu;
3161 /*
3162 * the event is not yet connected with its
3163 * siblings therefore we must first collect
3164 * existing siblings, then add the new event
3165 * before we can simulate the scheduling
3166 */
3167 n = uncore_collect_events(fake_box, leader, true);
3168 if (n < 0)
3169 goto out;
3170
3171 fake_box->n_events = n;
3172 n = uncore_collect_events(fake_box, event, false);
3173 if (n < 0)
3174 goto out;
3175
3176 fake_box->n_events = n;
3177
6a67943a 3178 ret = uncore_assign_events(fake_box, NULL, n);
087bfbb0
YZ
3179out:
3180 kfree(fake_box);
3181 return ret;
3182}
3183
46bdd905 3184static int uncore_pmu_event_init(struct perf_event *event)
087bfbb0
YZ
3185{
3186 struct intel_uncore_pmu *pmu;
3187 struct intel_uncore_box *box;
3188 struct hw_perf_event *hwc = &event->hw;
3189 int ret;
3190
3191 if (event->attr.type != event->pmu->type)
3192 return -ENOENT;
3193
3194 pmu = uncore_event_to_pmu(event);
3195 /* no device found for this pmu */
3196 if (pmu->func_id < 0)
3197 return -ENOENT;
3198
3199 /*
3200 * Uncore PMU does measure at all privilege level all the time.
3201 * So it doesn't make sense to specify any exclude bits.
3202 */
3203 if (event->attr.exclude_user || event->attr.exclude_kernel ||
3204 event->attr.exclude_hv || event->attr.exclude_idle)
3205 return -EINVAL;
3206
3207 /* Sampling not supported yet */
3208 if (hwc->sample_period)
3209 return -EINVAL;
3210
3211 /*
3212 * Place all uncore events for a particular physical package
3213 * onto a single cpu
3214 */
3215 if (event->cpu < 0)
3216 return -EINVAL;
3217 box = uncore_pmu_to_box(pmu, event->cpu);
3218 if (!box || box->cpu < 0)
3219 return -EINVAL;
3220 event->cpu = box->cpu;
3221
6a67943a
YZ
3222 event->hw.idx = -1;
3223 event->hw.last_tag = ~0ULL;
3224 event->hw.extra_reg.idx = EXTRA_REG_NONE;
ebb6cc03 3225 event->hw.branch_reg.idx = EXTRA_REG_NONE;
6a67943a 3226
087bfbb0
YZ
3227 if (event->attr.config == UNCORE_FIXED_EVENT) {
3228 /* no fixed counter */
3229 if (!pmu->type->fixed_ctl)
3230 return -EINVAL;
3231 /*
3232 * if there is only one fixed counter, only the first pmu
3233 * can access the fixed counter
3234 */
3235 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
3236 return -EINVAL;
dbc33f70
SE
3237
3238 /* fixed counters have event field hardcoded to zero */
3239 hwc->config = 0ULL;
087bfbb0
YZ
3240 } else {
3241 hwc->config = event->attr.config & pmu->type->event_mask;
6a67943a
YZ
3242 if (pmu->type->ops->hw_config) {
3243 ret = pmu->type->ops->hw_config(box, event);
3244 if (ret)
3245 return ret;
3246 }
087bfbb0
YZ
3247 }
3248
087bfbb0
YZ
3249 if (event->group_leader != event)
3250 ret = uncore_validate_group(pmu, event);
3251 else
3252 ret = 0;
3253
3254 return ret;
3255}
3256
314d9f63
YZ
3257static ssize_t uncore_get_attr_cpumask(struct device *dev,
3258 struct device_attribute *attr, char *buf)
3259{
3260 int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &uncore_cpu_mask);
3261
3262 buf[n++] = '\n';
3263 buf[n] = '\0';
3264 return n;
3265}
3266
3267static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
3268
3269static struct attribute *uncore_pmu_attrs[] = {
3270 &dev_attr_cpumask.attr,
3271 NULL,
3272};
3273
3274static struct attribute_group uncore_pmu_attr_group = {
3275 .attrs = uncore_pmu_attrs,
3276};
3277
087bfbb0
YZ
3278static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
3279{
3280 int ret;
3281
3282 pmu->pmu = (struct pmu) {
3283 .attr_groups = pmu->type->attr_groups,
3284 .task_ctx_nr = perf_invalid_context,
3285 .event_init = uncore_pmu_event_init,
3286 .add = uncore_pmu_event_add,
3287 .del = uncore_pmu_event_del,
3288 .start = uncore_pmu_event_start,
3289 .stop = uncore_pmu_event_stop,
3290 .read = uncore_pmu_event_read,
3291 };
3292
3293 if (pmu->type->num_boxes == 1) {
3294 if (strlen(pmu->type->name) > 0)
3295 sprintf(pmu->name, "uncore_%s", pmu->type->name);
3296 else
3297 sprintf(pmu->name, "uncore");
3298 } else {
3299 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
3300 pmu->pmu_idx);
3301 }
3302
3303 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
3304 return ret;
3305}
3306
3307static void __init uncore_type_exit(struct intel_uncore_type *type)
3308{
3309 int i;
3310
3311 for (i = 0; i < type->num_boxes; i++)
3312 free_percpu(type->pmus[i].box);
3313 kfree(type->pmus);
3314 type->pmus = NULL;
314d9f63
YZ
3315 kfree(type->events_group);
3316 type->events_group = NULL;
087bfbb0
YZ
3317}
3318
cffa59ba 3319static void __init uncore_types_exit(struct intel_uncore_type **types)
14371cce
YZ
3320{
3321 int i;
3322 for (i = 0; types[i]; i++)
3323 uncore_type_exit(types[i]);
3324}
3325
087bfbb0
YZ
3326static int __init uncore_type_init(struct intel_uncore_type *type)
3327{
3328 struct intel_uncore_pmu *pmus;
1b0dac2a 3329 struct attribute_group *attr_group;
087bfbb0
YZ
3330 struct attribute **attrs;
3331 int i, j;
3332
3333 pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
3334 if (!pmus)
3335 return -ENOMEM;
3336
b7b4839d
DJ
3337 type->pmus = pmus;
3338
087bfbb0
YZ
3339 type->unconstrainted = (struct event_constraint)
3340 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
9fac2cf3 3341 0, type->num_counters, 0, 0);
087bfbb0
YZ
3342
3343 for (i = 0; i < type->num_boxes; i++) {
3344 pmus[i].func_id = -1;
3345 pmus[i].pmu_idx = i;
3346 pmus[i].type = type;
14371cce 3347 INIT_LIST_HEAD(&pmus[i].box_list);
087bfbb0
YZ
3348 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
3349 if (!pmus[i].box)
3350 goto fail;
3351 }
3352
3353 if (type->event_descs) {
3354 i = 0;
3355 while (type->event_descs[i].attr.attr.name)
3356 i++;
3357
1b0dac2a
JSM
3358 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
3359 sizeof(*attr_group), GFP_KERNEL);
3360 if (!attr_group)
087bfbb0
YZ
3361 goto fail;
3362
1b0dac2a
JSM
3363 attrs = (struct attribute **)(attr_group + 1);
3364 attr_group->name = "events";
3365 attr_group->attrs = attrs;
087bfbb0
YZ
3366
3367 for (j = 0; j < i; j++)
3368 attrs[j] = &type->event_descs[j].attr.attr;
3369
1b0dac2a 3370 type->events_group = attr_group;
087bfbb0
YZ
3371 }
3372
314d9f63 3373 type->pmu_group = &uncore_pmu_attr_group;
087bfbb0
YZ
3374 return 0;
3375fail:
3376 uncore_type_exit(type);
3377 return -ENOMEM;
3378}
3379
3380static int __init uncore_types_init(struct intel_uncore_type **types)
3381{
3382 int i, ret;
3383
3384 for (i = 0; types[i]; i++) {
3385 ret = uncore_type_init(types[i]);
3386 if (ret)
3387 goto fail;
3388 }
3389 return 0;
3390fail:
3391 while (--i >= 0)
3392 uncore_type_exit(types[i]);
3393 return ret;
3394}
3395
14371cce
YZ
3396static struct pci_driver *uncore_pci_driver;
3397static bool pcidrv_registered;
3398
3399/*
3400 * add a pci uncore device
3401 */
899396cf 3402static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
14371cce
YZ
3403{
3404 struct intel_uncore_pmu *pmu;
3405 struct intel_uncore_box *box;
899396cf
YZ
3406 struct intel_uncore_type *type;
3407 int phys_id;
14371cce
YZ
3408
3409 phys_id = pcibus_to_physid[pdev->bus->number];
3410 if (phys_id < 0)
3411 return -ENODEV;
3412
899396cf
YZ
3413 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
3414 extra_pci_dev[phys_id][UNCORE_PCI_DEV_IDX(id->driver_data)] = pdev;
3415 pci_set_drvdata(pdev, NULL);
3416 return 0;
3417 }
3418
3419 type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
73c4427c 3420 box = uncore_alloc_box(type, NUMA_NO_NODE);
14371cce
YZ
3421 if (!box)
3422 return -ENOMEM;
3423
3424 /*
3425 * for performance monitoring unit with multiple boxes,
3426 * each box has a different function id.
3427 */
899396cf
YZ
3428 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
3429 if (pmu->func_id < 0)
3430 pmu->func_id = pdev->devfn;
3431 else
3432 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
14371cce
YZ
3433
3434 box->phys_id = phys_id;
3435 box->pci_dev = pdev;
3436 box->pmu = pmu;
3437 uncore_box_init(box);
3438 pci_set_drvdata(pdev, box);
3439
3440 raw_spin_lock(&uncore_box_lock);
3441 list_add_tail(&box->list, &pmu->box_list);
3442 raw_spin_unlock(&uncore_box_lock);
3443
3444 return 0;
3445}
3446
357398e9 3447static void uncore_pci_remove(struct pci_dev *pdev)
14371cce
YZ
3448{
3449 struct intel_uncore_box *box = pci_get_drvdata(pdev);
899396cf
YZ
3450 struct intel_uncore_pmu *pmu;
3451 int i, cpu, phys_id = pcibus_to_physid[pdev->bus->number];
3452
3453 box = pci_get_drvdata(pdev);
3454 if (!box) {
3455 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
3456 if (extra_pci_dev[phys_id][i] == pdev) {
3457 extra_pci_dev[phys_id][i] = NULL;
3458 break;
3459 }
3460 }
3461 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
3462 return;
3463 }
14371cce 3464
899396cf 3465 pmu = box->pmu;
14371cce
YZ
3466 if (WARN_ON_ONCE(phys_id != box->phys_id))
3467 return;
3468
e850f9c3
YZ
3469 pci_set_drvdata(pdev, NULL);
3470
14371cce
YZ
3471 raw_spin_lock(&uncore_box_lock);
3472 list_del(&box->list);
3473 raw_spin_unlock(&uncore_box_lock);
3474
3475 for_each_possible_cpu(cpu) {
3476 if (*per_cpu_ptr(pmu->box, cpu) == box) {
3477 *per_cpu_ptr(pmu->box, cpu) = NULL;
3478 atomic_dec(&box->refcnt);
3479 }
3480 }
3481
3482 WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
3483 kfree(box);
3484}
3485
14371cce
YZ
3486static int __init uncore_pci_init(void)
3487{
3488 int ret;
3489
3490 switch (boot_cpu_data.x86_model) {
7c94ee2e 3491 case 45: /* Sandy Bridge-EP */
e850f9c3 3492 ret = snbep_pci2phy_map_init(0x3ce0);
032c3851
YZ
3493 if (ret)
3494 return ret;
7c94ee2e
YZ
3495 pci_uncores = snbep_pci_uncores;
3496 uncore_pci_driver = &snbep_uncore_pci_driver;
7c94ee2e 3497 break;
e850f9c3
YZ
3498 case 62: /* IvyTown */
3499 ret = snbep_pci2phy_map_init(0x0e1e);
3500 if (ret)
3501 return ret;
3502 pci_uncores = ivt_pci_uncores;
3503 uncore_pci_driver = &ivt_uncore_pci_driver;
3504 break;
14371cce
YZ
3505 default:
3506 return 0;
3507 }
3508
3509 ret = uncore_types_init(pci_uncores);
3510 if (ret)
3511 return ret;
3512
3513 uncore_pci_driver->probe = uncore_pci_probe;
3514 uncore_pci_driver->remove = uncore_pci_remove;
3515
3516 ret = pci_register_driver(uncore_pci_driver);
3517 if (ret == 0)
3518 pcidrv_registered = true;
3519 else
3520 uncore_types_exit(pci_uncores);
3521
3522 return ret;
3523}
3524
3525static void __init uncore_pci_exit(void)
3526{
3527 if (pcidrv_registered) {
3528 pcidrv_registered = false;
3529 pci_unregister_driver(uncore_pci_driver);
3530 uncore_types_exit(pci_uncores);
3531 }
3532}
3533
22cc4ccf
YZ
3534/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
3535static LIST_HEAD(boxes_to_free);
3536
148f9bb8 3537static void uncore_kfree_boxes(void)
22cc4ccf
YZ
3538{
3539 struct intel_uncore_box *box;
3540
3541 while (!list_empty(&boxes_to_free)) {
3542 box = list_entry(boxes_to_free.next,
3543 struct intel_uncore_box, list);
3544 list_del(&box->list);
3545 kfree(box);
3546 }
3547}
3548
148f9bb8 3549static void uncore_cpu_dying(int cpu)
087bfbb0
YZ
3550{
3551 struct intel_uncore_type *type;
3552 struct intel_uncore_pmu *pmu;
3553 struct intel_uncore_box *box;
3554 int i, j;
3555
3556 for (i = 0; msr_uncores[i]; i++) {
3557 type = msr_uncores[i];
3558 for (j = 0; j < type->num_boxes; j++) {
3559 pmu = &type->pmus[j];
3560 box = *per_cpu_ptr(pmu->box, cpu);
3561 *per_cpu_ptr(pmu->box, cpu) = NULL;
3562 if (box && atomic_dec_and_test(&box->refcnt))
22cc4ccf 3563 list_add(&box->list, &boxes_to_free);
087bfbb0
YZ
3564 }
3565 }
3566}
3567
148f9bb8 3568static int uncore_cpu_starting(int cpu)
087bfbb0
YZ
3569{
3570 struct intel_uncore_type *type;
3571 struct intel_uncore_pmu *pmu;
3572 struct intel_uncore_box *box, *exist;
3573 int i, j, k, phys_id;
3574
3575 phys_id = topology_physical_package_id(cpu);
3576
3577 for (i = 0; msr_uncores[i]; i++) {
3578 type = msr_uncores[i];
3579 for (j = 0; j < type->num_boxes; j++) {
3580 pmu = &type->pmus[j];
3581 box = *per_cpu_ptr(pmu->box, cpu);
3582 /* called by uncore_cpu_init? */
3583 if (box && box->phys_id >= 0) {
3584 uncore_box_init(box);
3585 continue;
3586 }
3587
3588 for_each_online_cpu(k) {
3589 exist = *per_cpu_ptr(pmu->box, k);
3590 if (exist && exist->phys_id == phys_id) {
3591 atomic_inc(&exist->refcnt);
3592 *per_cpu_ptr(pmu->box, cpu) = exist;
22cc4ccf
YZ
3593 if (box) {
3594 list_add(&box->list,
3595 &boxes_to_free);
3596 box = NULL;
3597 }
087bfbb0
YZ
3598 break;
3599 }
3600 }
3601
3602 if (box) {
3603 box->phys_id = phys_id;
3604 uncore_box_init(box);
3605 }
3606 }
3607 }
3608 return 0;
3609}
3610
148f9bb8 3611static int uncore_cpu_prepare(int cpu, int phys_id)
087bfbb0
YZ
3612{
3613 struct intel_uncore_type *type;
3614 struct intel_uncore_pmu *pmu;
3615 struct intel_uncore_box *box;
3616 int i, j;
3617
3618 for (i = 0; msr_uncores[i]; i++) {
3619 type = msr_uncores[i];
3620 for (j = 0; j < type->num_boxes; j++) {
3621 pmu = &type->pmus[j];
3622 if (pmu->func_id < 0)
3623 pmu->func_id = j;
3624
73c4427c 3625 box = uncore_alloc_box(type, cpu_to_node(cpu));
087bfbb0
YZ
3626 if (!box)
3627 return -ENOMEM;
3628
3629 box->pmu = pmu;
3630 box->phys_id = phys_id;
3631 *per_cpu_ptr(pmu->box, cpu) = box;
3632 }
3633 }
3634 return 0;
3635}
3636
148f9bb8 3637static void
254298c7 3638uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
087bfbb0
YZ
3639{
3640 struct intel_uncore_type *type;
3641 struct intel_uncore_pmu *pmu;
3642 struct intel_uncore_box *box;
3643 int i, j;
3644
3645 for (i = 0; uncores[i]; i++) {
3646 type = uncores[i];
3647 for (j = 0; j < type->num_boxes; j++) {
3648 pmu = &type->pmus[j];
3649 if (old_cpu < 0)
3650 box = uncore_pmu_to_box(pmu, new_cpu);
3651 else
3652 box = uncore_pmu_to_box(pmu, old_cpu);
3653 if (!box)
3654 continue;
3655
3656 if (old_cpu < 0) {
3657 WARN_ON_ONCE(box->cpu != -1);
3658 box->cpu = new_cpu;
3659 continue;
3660 }
3661
3662 WARN_ON_ONCE(box->cpu != old_cpu);
3663 if (new_cpu >= 0) {
3664 uncore_pmu_cancel_hrtimer(box);
3665 perf_pmu_migrate_context(&pmu->pmu,
3666 old_cpu, new_cpu);
3667 box->cpu = new_cpu;
3668 } else {
3669 box->cpu = -1;
3670 }
3671 }
3672 }
3673}
3674
148f9bb8 3675static void uncore_event_exit_cpu(int cpu)
087bfbb0
YZ
3676{
3677 int i, phys_id, target;
3678
3679 /* if exiting cpu is used for collecting uncore events */
3680 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
3681 return;
3682
3683 /* find a new cpu to collect uncore events */
3684 phys_id = topology_physical_package_id(cpu);
3685 target = -1;
3686 for_each_online_cpu(i) {
3687 if (i == cpu)
3688 continue;
3689 if (phys_id == topology_physical_package_id(i)) {
3690 target = i;
3691 break;
3692 }
3693 }
3694
3695 /* migrate uncore events to the new cpu */
3696 if (target >= 0)
3697 cpumask_set_cpu(target, &uncore_cpu_mask);
3698
3699 uncore_change_context(msr_uncores, cpu, target);
14371cce 3700 uncore_change_context(pci_uncores, cpu, target);
087bfbb0
YZ
3701}
3702
148f9bb8 3703static void uncore_event_init_cpu(int cpu)
087bfbb0
YZ
3704{
3705 int i, phys_id;
3706
3707 phys_id = topology_physical_package_id(cpu);
3708 for_each_cpu(i, &uncore_cpu_mask) {
3709 if (phys_id == topology_physical_package_id(i))
3710 return;
3711 }
3712
3713 cpumask_set_cpu(cpu, &uncore_cpu_mask);
3714
3715 uncore_change_context(msr_uncores, -1, cpu);
14371cce 3716 uncore_change_context(pci_uncores, -1, cpu);
087bfbb0
YZ
3717}
3718
148f9bb8
PG
3719static int uncore_cpu_notifier(struct notifier_block *self,
3720 unsigned long action, void *hcpu)
087bfbb0
YZ
3721{
3722 unsigned int cpu = (long)hcpu;
3723
3724 /* allocate/free data structure for uncore box */
3725 switch (action & ~CPU_TASKS_FROZEN) {
3726 case CPU_UP_PREPARE:
3727 uncore_cpu_prepare(cpu, -1);
3728 break;
3729 case CPU_STARTING:
3730 uncore_cpu_starting(cpu);
3731 break;
3732 case CPU_UP_CANCELED:
3733 case CPU_DYING:
3734 uncore_cpu_dying(cpu);
3735 break;
22cc4ccf
YZ
3736 case CPU_ONLINE:
3737 case CPU_DEAD:
3738 uncore_kfree_boxes();
3739 break;
087bfbb0
YZ
3740 default:
3741 break;
3742 }
3743
3744 /* select the cpu that collects uncore events */
3745 switch (action & ~CPU_TASKS_FROZEN) {
3746 case CPU_DOWN_FAILED:
3747 case CPU_STARTING:
3748 uncore_event_init_cpu(cpu);
3749 break;
3750 case CPU_DOWN_PREPARE:
3751 uncore_event_exit_cpu(cpu);
3752 break;
3753 default:
3754 break;
3755 }
3756
3757 return NOTIFY_OK;
3758}
3759
148f9bb8 3760static struct notifier_block uncore_cpu_nb = {
254298c7 3761 .notifier_call = uncore_cpu_notifier,
087bfbb0
YZ
3762 /*
3763 * to migrate uncore events, our notifier should be executed
3764 * before perf core's notifier.
3765 */
254298c7 3766 .priority = CPU_PRI_PERF + 1,
087bfbb0
YZ
3767};
3768
3769static void __init uncore_cpu_setup(void *dummy)
3770{
3771 uncore_cpu_starting(smp_processor_id());
3772}
3773
3774static int __init uncore_cpu_init(void)
3775{
42089697 3776 int ret, cpu, max_cores;
087bfbb0 3777
42089697 3778 max_cores = boot_cpu_data.x86_max_cores;
087bfbb0 3779 switch (boot_cpu_data.x86_model) {
fcde10e9
YZ
3780 case 26: /* Nehalem */
3781 case 30:
3782 case 37: /* Westmere */
3783 case 44:
3784 msr_uncores = nhm_msr_uncores;
3785 break;
3786 case 42: /* Sandy Bridge */
9a6bc143 3787 case 58: /* Ivy Bridge */
42089697
YZ
3788 if (snb_uncore_cbox.num_boxes > max_cores)
3789 snb_uncore_cbox.num_boxes = max_cores;
fcde10e9
YZ
3790 msr_uncores = snb_msr_uncores;
3791 break;
80e217e9 3792 case 45: /* Sandy Bridge-EP */
42089697
YZ
3793 if (snbep_uncore_cbox.num_boxes > max_cores)
3794 snbep_uncore_cbox.num_boxes = max_cores;
7c94ee2e
YZ
3795 msr_uncores = snbep_msr_uncores;
3796 break;
cb37af77
YZ
3797 case 46: /* Nehalem-EX */
3798 uncore_nhmex = true;
3799 case 47: /* Westmere-EX aka. Xeon E7 */
3800 if (!uncore_nhmex)
3801 nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
3802 if (nhmex_uncore_cbox.num_boxes > max_cores)
3803 nhmex_uncore_cbox.num_boxes = max_cores;
254298c7
YZ
3804 msr_uncores = nhmex_msr_uncores;
3805 break;
e850f9c3
YZ
3806 case 62: /* IvyTown */
3807 if (ivt_uncore_cbox.num_boxes > max_cores)
3808 ivt_uncore_cbox.num_boxes = max_cores;
3809 msr_uncores = ivt_msr_uncores;
3810 break;
3811
087bfbb0
YZ
3812 default:
3813 return 0;
3814 }
3815
3816 ret = uncore_types_init(msr_uncores);
3817 if (ret)
3818 return ret;
3819
3820 get_online_cpus();
3821
3822 for_each_online_cpu(cpu) {
3823 int i, phys_id = topology_physical_package_id(cpu);
3824
3825 for_each_cpu(i, &uncore_cpu_mask) {
3826 if (phys_id == topology_physical_package_id(i)) {
3827 phys_id = -1;
3828 break;
3829 }
3830 }
3831 if (phys_id < 0)
3832 continue;
3833
3834 uncore_cpu_prepare(cpu, phys_id);
3835 uncore_event_init_cpu(cpu);
3836 }
3837 on_each_cpu(uncore_cpu_setup, NULL, 1);
3838
3839 register_cpu_notifier(&uncore_cpu_nb);
3840
3841 put_online_cpus();
3842
3843 return 0;
3844}
3845
3846static int __init uncore_pmus_register(void)
3847{
3848 struct intel_uncore_pmu *pmu;
3849 struct intel_uncore_type *type;
3850 int i, j;
3851
3852 for (i = 0; msr_uncores[i]; i++) {
3853 type = msr_uncores[i];
3854 for (j = 0; j < type->num_boxes; j++) {
3855 pmu = &type->pmus[j];
3856 uncore_pmu_register(pmu);
3857 }
3858 }
3859
14371cce
YZ
3860 for (i = 0; pci_uncores[i]; i++) {
3861 type = pci_uncores[i];
3862 for (j = 0; j < type->num_boxes; j++) {
3863 pmu = &type->pmus[j];
3864 uncore_pmu_register(pmu);
3865 }
3866 }
3867
087bfbb0
YZ
3868 return 0;
3869}
3870
3871static int __init intel_uncore_init(void)
3872{
3873 int ret;
3874
3875 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
3876 return -ENODEV;
3877
a05123bd
YZ
3878 if (cpu_has_hypervisor)
3879 return -ENODEV;
3880
14371cce 3881 ret = uncore_pci_init();
087bfbb0
YZ
3882 if (ret)
3883 goto fail;
14371cce
YZ
3884 ret = uncore_cpu_init();
3885 if (ret) {
3886 uncore_pci_exit();
3887 goto fail;
3888 }
087bfbb0
YZ
3889
3890 uncore_pmus_register();
3891 return 0;
3892fail:
3893 return ret;
3894}
3895device_initcall(intel_uncore_init);