Merge tag 'for-linus' of git://linux-c6x.org/git/projects/linux-c6x-upstreaming
[linux-2.6-block.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.c
CommitLineData
087bfbb0
YZ
1#include "perf_event_intel_uncore.h"
2
3static struct intel_uncore_type *empty_uncore[] = { NULL, };
4static struct intel_uncore_type **msr_uncores = empty_uncore;
14371cce
YZ
5static struct intel_uncore_type **pci_uncores = empty_uncore;
6/* pci bus to socket mapping */
7static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
8
9static DEFINE_RAW_SPINLOCK(uncore_box_lock);
087bfbb0
YZ
10
11/* mask of cpus that collect uncore events */
12static cpumask_t uncore_cpu_mask;
13
14/* constraint for the fixed counter */
15static struct event_constraint constraint_fixed =
16 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
6a67943a
YZ
17static struct event_constraint constraint_empty =
18 EVENT_CONSTRAINT(0, 0, 0);
087bfbb0 19
fcde10e9 20DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
c1ece48c 21DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
fcde10e9
YZ
22DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
23DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
6a67943a 24DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
fcde10e9
YZ
25DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
26DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
27DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
7c94ee2e
YZ
28DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
29DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
30DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
31DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
32DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
6a67943a
YZ
33DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
34DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
35DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
36DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
4f3f713f
YZ
37DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
38DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
39DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
40DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
7c94ee2e 41
254298c7
YZ
42static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
43{
44 u64 count;
45
46 rdmsrl(event->hw.event_base, count);
47
48 return count;
49}
50
51/*
52 * generic get constraint function for shared match/mask registers.
53 */
54static struct event_constraint *
55uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
56{
57 struct intel_uncore_extra_reg *er;
58 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
59 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
60 unsigned long flags;
61 bool ok = false;
62
63 /*
64 * reg->alloc can be set due to existing state, so for fake box we
65 * need to ignore this, otherwise we might fail to allocate proper
66 * fake state for this extra reg constraint.
67 */
68 if (reg1->idx == EXTRA_REG_NONE ||
69 (!uncore_box_is_fake(box) && reg1->alloc))
70 return NULL;
71
72 er = &box->shared_regs[reg1->idx];
73 raw_spin_lock_irqsave(&er->lock, flags);
74 if (!atomic_read(&er->ref) ||
75 (er->config1 == reg1->config && er->config2 == reg2->config)) {
76 atomic_inc(&er->ref);
77 er->config1 = reg1->config;
78 er->config2 = reg2->config;
79 ok = true;
80 }
81 raw_spin_unlock_irqrestore(&er->lock, flags);
82
83 if (ok) {
84 if (!uncore_box_is_fake(box))
85 reg1->alloc = 1;
86 return NULL;
87 }
88
89 return &constraint_empty;
90}
91
92static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
93{
94 struct intel_uncore_extra_reg *er;
95 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
96
97 /*
98 * Only put constraint if extra reg was actually allocated. Also
99 * takes care of event which do not use an extra shared reg.
100 *
101 * Also, if this is a fake box we shouldn't touch any event state
102 * (reg->alloc) and we don't care about leaving inconsistent box
103 * state either since it will be thrown out.
104 */
105 if (uncore_box_is_fake(box) || !reg1->alloc)
106 return;
107
108 er = &box->shared_regs[reg1->idx];
109 atomic_dec(&er->ref);
110 reg1->alloc = 0;
111}
112
7c94ee2e 113/* Sandy Bridge-EP uncore support */
6a67943a
YZ
114static struct intel_uncore_type snbep_uncore_cbox;
115static struct intel_uncore_type snbep_uncore_pcu;
116
7c94ee2e
YZ
117static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
118{
119 struct pci_dev *pdev = box->pci_dev;
120 int box_ctl = uncore_pci_box_ctl(box);
121 u32 config;
122
123 pci_read_config_dword(pdev, box_ctl, &config);
124 config |= SNBEP_PMON_BOX_CTL_FRZ;
125 pci_write_config_dword(pdev, box_ctl, config);
126}
127
128static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
129{
130 struct pci_dev *pdev = box->pci_dev;
131 int box_ctl = uncore_pci_box_ctl(box);
132 u32 config;
133
134 pci_read_config_dword(pdev, box_ctl, &config);
135 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
136 pci_write_config_dword(pdev, box_ctl, config);
137}
138
254298c7 139static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
140{
141 struct pci_dev *pdev = box->pci_dev;
142 struct hw_perf_event *hwc = &event->hw;
143
254298c7 144 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
7c94ee2e
YZ
145}
146
254298c7 147static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
148{
149 struct pci_dev *pdev = box->pci_dev;
150 struct hw_perf_event *hwc = &event->hw;
151
152 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
153}
154
254298c7 155static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
156{
157 struct pci_dev *pdev = box->pci_dev;
158 struct hw_perf_event *hwc = &event->hw;
159 u64 count;
160
161 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
162 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
254298c7 163
7c94ee2e
YZ
164 return count;
165}
166
167static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
168{
169 struct pci_dev *pdev = box->pci_dev;
254298c7
YZ
170
171 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
7c94ee2e
YZ
172}
173
174static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
175{
176 u64 config;
177 unsigned msr;
178
179 msr = uncore_msr_box_ctl(box);
180 if (msr) {
181 rdmsrl(msr, config);
182 config |= SNBEP_PMON_BOX_CTL_FRZ;
183 wrmsrl(msr, config);
7c94ee2e
YZ
184 }
185}
186
187static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
188{
189 u64 config;
190 unsigned msr;
191
192 msr = uncore_msr_box_ctl(box);
193 if (msr) {
194 rdmsrl(msr, config);
195 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
196 wrmsrl(msr, config);
7c94ee2e
YZ
197 }
198}
199
254298c7 200static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
201{
202 struct hw_perf_event *hwc = &event->hw;
6a67943a
YZ
203 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
204
205 if (reg1->idx != EXTRA_REG_NONE)
206 wrmsrl(reg1->reg, reg1->config);
7c94ee2e
YZ
207
208 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
209}
210
211static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
212 struct perf_event *event)
213{
214 struct hw_perf_event *hwc = &event->hw;
215
216 wrmsrl(hwc->config_base, hwc->config);
217}
218
7c94ee2e
YZ
219static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
220{
221 unsigned msr = uncore_msr_box_ctl(box);
254298c7 222
7c94ee2e
YZ
223 if (msr)
224 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
225}
226
254298c7 227static int snbep_uncore_hw_config(struct intel_uncore_box *box, struct perf_event *event)
6a67943a
YZ
228{
229 struct hw_perf_event *hwc = &event->hw;
230 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
231
232 if (box->pmu->type == &snbep_uncore_cbox) {
233 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
234 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
235 reg1->config = event->attr.config1 &
236 SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK;
6a67943a 237 } else {
254298c7
YZ
238 if (box->pmu->type == &snbep_uncore_pcu) {
239 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
240 reg1->config = event->attr.config1 & SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK;
241 } else {
242 return 0;
243 }
6a67943a
YZ
244 }
245 reg1->idx = 0;
254298c7 246
6a67943a
YZ
247 return 0;
248}
249
7c94ee2e
YZ
250static struct attribute *snbep_uncore_formats_attr[] = {
251 &format_attr_event.attr,
252 &format_attr_umask.attr,
253 &format_attr_edge.attr,
254 &format_attr_inv.attr,
255 &format_attr_thresh8.attr,
256 NULL,
257};
258
259static struct attribute *snbep_uncore_ubox_formats_attr[] = {
260 &format_attr_event.attr,
261 &format_attr_umask.attr,
262 &format_attr_edge.attr,
263 &format_attr_inv.attr,
264 &format_attr_thresh5.attr,
265 NULL,
266};
267
6a67943a
YZ
268static struct attribute *snbep_uncore_cbox_formats_attr[] = {
269 &format_attr_event.attr,
270 &format_attr_umask.attr,
271 &format_attr_edge.attr,
272 &format_attr_tid_en.attr,
273 &format_attr_inv.attr,
274 &format_attr_thresh8.attr,
275 &format_attr_filter_tid.attr,
276 &format_attr_filter_nid.attr,
277 &format_attr_filter_state.attr,
278 &format_attr_filter_opc.attr,
279 NULL,
280};
281
7c94ee2e
YZ
282static struct attribute *snbep_uncore_pcu_formats_attr[] = {
283 &format_attr_event.attr,
284 &format_attr_occ_sel.attr,
285 &format_attr_edge.attr,
286 &format_attr_inv.attr,
287 &format_attr_thresh5.attr,
288 &format_attr_occ_invert.attr,
289 &format_attr_occ_edge.attr,
4f3f713f
YZ
290 &format_attr_filter_band0.attr,
291 &format_attr_filter_band1.attr,
292 &format_attr_filter_band2.attr,
293 &format_attr_filter_band3.attr,
7c94ee2e
YZ
294 NULL,
295};
296
c1ece48c
YZ
297static struct attribute *snbep_uncore_qpi_formats_attr[] = {
298 &format_attr_event_ext.attr,
299 &format_attr_umask.attr,
300 &format_attr_edge.attr,
301 &format_attr_inv.attr,
302 &format_attr_thresh8.attr,
303 NULL,
304};
305
7c94ee2e 306static struct uncore_event_desc snbep_uncore_imc_events[] = {
eca26c99 307 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
2992c542
PZ
308 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
309 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
7c94ee2e
YZ
310 { /* end: all zeroes */ },
311};
312
313static struct uncore_event_desc snbep_uncore_qpi_events[] = {
2992c542
PZ
314 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
315 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
316 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"),
317 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"),
7c94ee2e
YZ
318 { /* end: all zeroes */ },
319};
320
321static struct attribute_group snbep_uncore_format_group = {
322 .name = "format",
323 .attrs = snbep_uncore_formats_attr,
324};
325
326static struct attribute_group snbep_uncore_ubox_format_group = {
327 .name = "format",
328 .attrs = snbep_uncore_ubox_formats_attr,
329};
330
6a67943a
YZ
331static struct attribute_group snbep_uncore_cbox_format_group = {
332 .name = "format",
333 .attrs = snbep_uncore_cbox_formats_attr,
334};
335
7c94ee2e
YZ
336static struct attribute_group snbep_uncore_pcu_format_group = {
337 .name = "format",
338 .attrs = snbep_uncore_pcu_formats_attr,
339};
340
c1ece48c
YZ
341static struct attribute_group snbep_uncore_qpi_format_group = {
342 .name = "format",
343 .attrs = snbep_uncore_qpi_formats_attr,
344};
345
7c94ee2e
YZ
346static struct intel_uncore_ops snbep_uncore_msr_ops = {
347 .init_box = snbep_uncore_msr_init_box,
348 .disable_box = snbep_uncore_msr_disable_box,
349 .enable_box = snbep_uncore_msr_enable_box,
350 .disable_event = snbep_uncore_msr_disable_event,
351 .enable_event = snbep_uncore_msr_enable_event,
254298c7
YZ
352 .read_counter = uncore_msr_read_counter,
353 .get_constraint = uncore_get_constraint,
354 .put_constraint = uncore_put_constraint,
6a67943a 355 .hw_config = snbep_uncore_hw_config,
7c94ee2e
YZ
356};
357
358static struct intel_uncore_ops snbep_uncore_pci_ops = {
359 .init_box = snbep_uncore_pci_init_box,
360 .disable_box = snbep_uncore_pci_disable_box,
361 .enable_box = snbep_uncore_pci_enable_box,
362 .disable_event = snbep_uncore_pci_disable_event,
363 .enable_event = snbep_uncore_pci_enable_event,
364 .read_counter = snbep_uncore_pci_read_counter,
365};
366
367static struct event_constraint snbep_uncore_cbox_constraints[] = {
368 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
369 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
370 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
371 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
372 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
373 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
374 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
375 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
376 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
377 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
378 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
379 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
3b19e4c9 380 EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
7c94ee2e
YZ
381 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
382 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
383 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
384 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
385 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
386 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
387 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
388 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
389 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
390 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
391 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
392 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
393 EVENT_CONSTRAINT_END
394};
395
396static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
397 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
398 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
399 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
400 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
401 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
402 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
403 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
404 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
405 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
406 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
407 EVENT_CONSTRAINT_END
408};
409
410static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
411 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
412 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
413 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
414 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
415 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
416 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
417 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
418 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
419 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
420 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
421 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
422 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
423 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
424 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
425 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
426 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
427 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
428 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
429 EVENT_CONSTRAINT_END
430};
431
432static struct intel_uncore_type snbep_uncore_ubox = {
433 .name = "ubox",
434 .num_counters = 2,
435 .num_boxes = 1,
436 .perf_ctr_bits = 44,
437 .fixed_ctr_bits = 48,
438 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
439 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
440 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
441 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
442 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
443 .ops = &snbep_uncore_msr_ops,
444 .format_group = &snbep_uncore_ubox_format_group,
445};
446
447static struct intel_uncore_type snbep_uncore_cbox = {
6a67943a
YZ
448 .name = "cbox",
449 .num_counters = 4,
450 .num_boxes = 8,
451 .perf_ctr_bits = 44,
452 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
453 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
454 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
455 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
456 .msr_offset = SNBEP_CBO_MSR_OFFSET,
457 .num_shared_regs = 1,
458 .constraints = snbep_uncore_cbox_constraints,
459 .ops = &snbep_uncore_msr_ops,
460 .format_group = &snbep_uncore_cbox_format_group,
7c94ee2e
YZ
461};
462
463static struct intel_uncore_type snbep_uncore_pcu = {
6a67943a
YZ
464 .name = "pcu",
465 .num_counters = 4,
466 .num_boxes = 1,
467 .perf_ctr_bits = 48,
468 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
469 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
470 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
471 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
472 .num_shared_regs = 1,
473 .ops = &snbep_uncore_msr_ops,
474 .format_group = &snbep_uncore_pcu_format_group,
7c94ee2e
YZ
475};
476
477static struct intel_uncore_type *snbep_msr_uncores[] = {
478 &snbep_uncore_ubox,
479 &snbep_uncore_cbox,
480 &snbep_uncore_pcu,
481 NULL,
482};
483
484#define SNBEP_UNCORE_PCI_COMMON_INIT() \
485 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
486 .event_ctl = SNBEP_PCI_PMON_CTL0, \
487 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
488 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
489 .ops = &snbep_uncore_pci_ops, \
490 .format_group = &snbep_uncore_format_group
491
492static struct intel_uncore_type snbep_uncore_ha = {
493 .name = "ha",
494 .num_counters = 4,
495 .num_boxes = 1,
496 .perf_ctr_bits = 48,
497 SNBEP_UNCORE_PCI_COMMON_INIT(),
498};
499
500static struct intel_uncore_type snbep_uncore_imc = {
501 .name = "imc",
502 .num_counters = 4,
503 .num_boxes = 4,
504 .perf_ctr_bits = 48,
505 .fixed_ctr_bits = 48,
506 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
507 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
508 .event_descs = snbep_uncore_imc_events,
509 SNBEP_UNCORE_PCI_COMMON_INIT(),
510};
511
512static struct intel_uncore_type snbep_uncore_qpi = {
513 .name = "qpi",
514 .num_counters = 4,
515 .num_boxes = 2,
516 .perf_ctr_bits = 48,
c1ece48c
YZ
517 .perf_ctr = SNBEP_PCI_PMON_CTR0,
518 .event_ctl = SNBEP_PCI_PMON_CTL0,
519 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
520 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
521 .ops = &snbep_uncore_pci_ops,
7c94ee2e 522 .event_descs = snbep_uncore_qpi_events,
c1ece48c 523 .format_group = &snbep_uncore_qpi_format_group,
7c94ee2e
YZ
524};
525
526
527static struct intel_uncore_type snbep_uncore_r2pcie = {
528 .name = "r2pcie",
529 .num_counters = 4,
530 .num_boxes = 1,
531 .perf_ctr_bits = 44,
532 .constraints = snbep_uncore_r2pcie_constraints,
533 SNBEP_UNCORE_PCI_COMMON_INIT(),
534};
535
536static struct intel_uncore_type snbep_uncore_r3qpi = {
537 .name = "r3qpi",
538 .num_counters = 3,
539 .num_boxes = 2,
540 .perf_ctr_bits = 44,
541 .constraints = snbep_uncore_r3qpi_constraints,
542 SNBEP_UNCORE_PCI_COMMON_INIT(),
543};
544
545static struct intel_uncore_type *snbep_pci_uncores[] = {
546 &snbep_uncore_ha,
547 &snbep_uncore_imc,
548 &snbep_uncore_qpi,
549 &snbep_uncore_r2pcie,
550 &snbep_uncore_r3qpi,
551 NULL,
552};
553
554static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
555 { /* Home Agent */
556 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
557 .driver_data = (unsigned long)&snbep_uncore_ha,
558 },
559 { /* MC Channel 0 */
560 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
561 .driver_data = (unsigned long)&snbep_uncore_imc,
562 },
563 { /* MC Channel 1 */
564 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
565 .driver_data = (unsigned long)&snbep_uncore_imc,
566 },
567 { /* MC Channel 2 */
568 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
569 .driver_data = (unsigned long)&snbep_uncore_imc,
570 },
571 { /* MC Channel 3 */
572 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
573 .driver_data = (unsigned long)&snbep_uncore_imc,
574 },
575 { /* QPI Port 0 */
576 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
577 .driver_data = (unsigned long)&snbep_uncore_qpi,
578 },
579 { /* QPI Port 1 */
580 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
581 .driver_data = (unsigned long)&snbep_uncore_qpi,
582 },
583 { /* P2PCIe */
584 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
585 .driver_data = (unsigned long)&snbep_uncore_r2pcie,
586 },
587 { /* R3QPI Link 0 */
588 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
589 .driver_data = (unsigned long)&snbep_uncore_r3qpi,
590 },
591 { /* R3QPI Link 1 */
592 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
593 .driver_data = (unsigned long)&snbep_uncore_r3qpi,
594 },
595 { /* end: all zeroes */ }
596};
597
598static struct pci_driver snbep_uncore_pci_driver = {
599 .name = "snbep_uncore",
600 .id_table = snbep_uncore_pci_ids,
601};
602
603/*
604 * build pci bus to socket mapping
605 */
606static void snbep_pci2phy_map_init(void)
607{
608 struct pci_dev *ubox_dev = NULL;
609 int i, bus, nodeid;
610 u32 config;
611
612 while (1) {
613 /* find the UBOX device */
614 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
615 PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX,
616 ubox_dev);
617 if (!ubox_dev)
618 break;
619 bus = ubox_dev->bus->number;
620 /* get the Node ID of the local register */
621 pci_read_config_dword(ubox_dev, 0x40, &config);
622 nodeid = config;
623 /* get the Node ID mapping */
624 pci_read_config_dword(ubox_dev, 0x54, &config);
625 /*
254298c7
YZ
626 * every three bits in the Node ID mapping register maps
627 * to a particular node.
628 */
629 for (i = 0; i < 8; i++) {
630 if (nodeid == ((config >> (3 * i)) & 0x7)) {
631 pcibus_to_physid[bus] = i;
632 break;
633 }
634 }
635 };
636 return;
637}
638/* end of Sandy Bridge-EP uncore support */
639
640/* Sandy Bridge uncore support */
641static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
642{
643 struct hw_perf_event *hwc = &event->hw;
644
645 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
646 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
647 else
648 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
649}
650
651static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
652{
653 wrmsrl(event->hw.config_base, 0);
654}
655
656static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
657{
658 if (box->pmu->pmu_idx == 0) {
659 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
660 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
661 }
662}
663
35534b20
SE
664static struct uncore_event_desc snb_uncore_events[] = {
665 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
666 { /* end: all zeroes */ },
667};
668
254298c7
YZ
669static struct attribute *snb_uncore_formats_attr[] = {
670 &format_attr_event.attr,
671 &format_attr_umask.attr,
672 &format_attr_edge.attr,
673 &format_attr_inv.attr,
674 &format_attr_cmask5.attr,
675 NULL,
676};
677
678static struct attribute_group snb_uncore_format_group = {
679 .name = "format",
680 .attrs = snb_uncore_formats_attr,
681};
682
683static struct intel_uncore_ops snb_uncore_msr_ops = {
684 .init_box = snb_uncore_msr_init_box,
685 .disable_event = snb_uncore_msr_disable_event,
686 .enable_event = snb_uncore_msr_enable_event,
687 .read_counter = uncore_msr_read_counter,
688};
689
690static struct event_constraint snb_uncore_cbox_constraints[] = {
691 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
692 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
693 EVENT_CONSTRAINT_END
694};
695
696static struct intel_uncore_type snb_uncore_cbox = {
697 .name = "cbox",
698 .num_counters = 2,
699 .num_boxes = 4,
700 .perf_ctr_bits = 44,
701 .fixed_ctr_bits = 48,
702 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
703 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
704 .fixed_ctr = SNB_UNC_FIXED_CTR,
705 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
706 .single_fixed = 1,
707 .event_mask = SNB_UNC_RAW_EVENT_MASK,
708 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
709 .constraints = snb_uncore_cbox_constraints,
710 .ops = &snb_uncore_msr_ops,
711 .format_group = &snb_uncore_format_group,
35534b20 712 .event_descs = snb_uncore_events,
254298c7
YZ
713};
714
715static struct intel_uncore_type *snb_msr_uncores[] = {
716 &snb_uncore_cbox,
717 NULL,
718};
719/* end of Sandy Bridge uncore support */
720
721/* Nehalem uncore support */
722static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
723{
724 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
725}
726
727static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
728{
729 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
730}
731
732static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
733{
734 struct hw_perf_event *hwc = &event->hw;
735
736 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
737 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
738 else
739 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
740}
741
742static struct attribute *nhm_uncore_formats_attr[] = {
743 &format_attr_event.attr,
744 &format_attr_umask.attr,
745 &format_attr_edge.attr,
746 &format_attr_inv.attr,
747 &format_attr_cmask8.attr,
748 NULL,
749};
750
751static struct attribute_group nhm_uncore_format_group = {
752 .name = "format",
753 .attrs = nhm_uncore_formats_attr,
754};
755
756static struct uncore_event_desc nhm_uncore_events[] = {
757 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
758 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
759 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
760 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
761 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
762 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
763 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
764 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
765 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
766 { /* end: all zeroes */ },
767};
768
769static struct intel_uncore_ops nhm_uncore_msr_ops = {
770 .disable_box = nhm_uncore_msr_disable_box,
771 .enable_box = nhm_uncore_msr_enable_box,
772 .disable_event = snb_uncore_msr_disable_event,
773 .enable_event = nhm_uncore_msr_enable_event,
774 .read_counter = uncore_msr_read_counter,
775};
776
777static struct intel_uncore_type nhm_uncore = {
778 .name = "",
779 .num_counters = 8,
780 .num_boxes = 1,
781 .perf_ctr_bits = 48,
782 .fixed_ctr_bits = 48,
783 .event_ctl = NHM_UNC_PERFEVTSEL0,
784 .perf_ctr = NHM_UNC_UNCORE_PMC0,
785 .fixed_ctr = NHM_UNC_FIXED_CTR,
786 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
787 .event_mask = NHM_UNC_RAW_EVENT_MASK,
788 .event_descs = nhm_uncore_events,
789 .ops = &nhm_uncore_msr_ops,
790 .format_group = &nhm_uncore_format_group,
791};
792
793static struct intel_uncore_type *nhm_msr_uncores[] = {
794 &nhm_uncore,
795 NULL,
796};
797/* end of Nehalem uncore support */
798
799/* Nehalem-EX uncore support */
800#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
801 ((1ULL << (n)) - 1)))
802
803DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
804DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
254298c7
YZ
805DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
806DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
807
808static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
809{
810 wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
811}
812
813static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
814{
815 unsigned msr = uncore_msr_box_ctl(box);
816 u64 config;
817
818 if (msr) {
819 rdmsrl(msr, config);
820 config &= ~((1ULL << uncore_num_counters(box)) - 1);
821 /* WBox has a fixed counter */
822 if (uncore_msr_fixed_ctl(box))
823 config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
824 wrmsrl(msr, config);
825 }
826}
827
828static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
829{
830 unsigned msr = uncore_msr_box_ctl(box);
831 u64 config;
832
833 if (msr) {
834 rdmsrl(msr, config);
835 config |= (1ULL << uncore_num_counters(box)) - 1;
836 /* WBox has a fixed counter */
837 if (uncore_msr_fixed_ctl(box))
838 config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
839 wrmsrl(msr, config);
840 }
841}
842
843static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
844{
845 wrmsrl(event->hw.config_base, 0);
846}
847
848static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
849{
850 struct hw_perf_event *hwc = &event->hw;
851
852 if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
853 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
854 else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
855 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
856 else
857 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
858}
859
860#define NHMEX_UNCORE_OPS_COMMON_INIT() \
861 .init_box = nhmex_uncore_msr_init_box, \
862 .disable_box = nhmex_uncore_msr_disable_box, \
863 .enable_box = nhmex_uncore_msr_enable_box, \
864 .disable_event = nhmex_uncore_msr_disable_event, \
865 .read_counter = uncore_msr_read_counter
866
867static struct intel_uncore_ops nhmex_uncore_ops = {
868 NHMEX_UNCORE_OPS_COMMON_INIT(),
869 .enable_event = nhmex_uncore_msr_enable_event,
870};
871
872static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
873 &format_attr_event.attr,
874 &format_attr_edge.attr,
875 NULL,
876};
877
878static struct attribute_group nhmex_uncore_ubox_format_group = {
879 .name = "format",
880 .attrs = nhmex_uncore_ubox_formats_attr,
881};
882
883static struct intel_uncore_type nhmex_uncore_ubox = {
884 .name = "ubox",
885 .num_counters = 1,
886 .num_boxes = 1,
887 .perf_ctr_bits = 48,
888 .event_ctl = NHMEX_U_MSR_PMON_EV_SEL,
889 .perf_ctr = NHMEX_U_MSR_PMON_CTR,
890 .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK,
891 .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL,
892 .ops = &nhmex_uncore_ops,
893 .format_group = &nhmex_uncore_ubox_format_group
894};
895
896static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
897 &format_attr_event.attr,
898 &format_attr_umask.attr,
899 &format_attr_edge.attr,
900 &format_attr_inv.attr,
901 &format_attr_thresh8.attr,
902 NULL,
903};
904
905static struct attribute_group nhmex_uncore_cbox_format_group = {
906 .name = "format",
907 .attrs = nhmex_uncore_cbox_formats_attr,
908};
909
cb37af77
YZ
910/* msr offset for each instance of cbox */
911static unsigned nhmex_cbox_msr_offsets[] = {
912 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
913};
914
254298c7
YZ
915static struct intel_uncore_type nhmex_uncore_cbox = {
916 .name = "cbox",
917 .num_counters = 6,
cb37af77 918 .num_boxes = 10,
254298c7
YZ
919 .perf_ctr_bits = 48,
920 .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0,
921 .perf_ctr = NHMEX_C0_MSR_PMON_CTR0,
922 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
923 .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
cb37af77 924 .msr_offsets = nhmex_cbox_msr_offsets,
254298c7
YZ
925 .pair_ctr_ctl = 1,
926 .ops = &nhmex_uncore_ops,
927 .format_group = &nhmex_uncore_cbox_format_group
928};
929
930static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
931 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
932 { /* end: all zeroes */ },
933};
934
935static struct intel_uncore_type nhmex_uncore_wbox = {
936 .name = "wbox",
937 .num_counters = 4,
938 .num_boxes = 1,
939 .perf_ctr_bits = 48,
940 .event_ctl = NHMEX_W_MSR_PMON_CNT0,
941 .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0,
942 .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR,
943 .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL,
944 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
945 .box_ctl = NHMEX_W_MSR_GLOBAL_CTL,
946 .pair_ctr_ctl = 1,
947 .event_descs = nhmex_uncore_wbox_events,
948 .ops = &nhmex_uncore_ops,
949 .format_group = &nhmex_uncore_cbox_format_group
950};
951
952static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
953{
954 struct hw_perf_event *hwc = &event->hw;
955 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
956 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
957 int ctr, ev_sel;
958
959 ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
960 NHMEX_B_PMON_CTR_SHIFT;
961 ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
962 NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
963
964 /* events that do not use the match/mask registers */
965 if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
966 (ctr == 2 && ev_sel != 0x4) || ctr == 3)
967 return 0;
968
969 if (box->pmu->pmu_idx == 0)
970 reg1->reg = NHMEX_B0_MSR_MATCH;
971 else
972 reg1->reg = NHMEX_B1_MSR_MATCH;
973 reg1->idx = 0;
974 reg1->config = event->attr.config1;
975 reg2->config = event->attr.config2;
976 return 0;
977}
978
979static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
980{
981 struct hw_perf_event *hwc = &event->hw;
982 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
983 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
984
985 if (reg1->idx != EXTRA_REG_NONE) {
986 wrmsrl(reg1->reg, reg1->config);
987 wrmsrl(reg1->reg + 1, reg2->config);
988 }
989 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
990 (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
991}
992
993/*
994 * The Bbox has 4 counters, but each counter monitors different events.
995 * Use bits 6-7 in the event config to select counter.
996 */
997static struct event_constraint nhmex_uncore_bbox_constraints[] = {
998 EVENT_CONSTRAINT(0 , 1, 0xc0),
999 EVENT_CONSTRAINT(0x40, 2, 0xc0),
1000 EVENT_CONSTRAINT(0x80, 4, 0xc0),
1001 EVENT_CONSTRAINT(0xc0, 8, 0xc0),
1002 EVENT_CONSTRAINT_END,
1003};
1004
1005static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
1006 &format_attr_event5.attr,
1007 &format_attr_counter.attr,
1008 &format_attr_match.attr,
1009 &format_attr_mask.attr,
1010 NULL,
1011};
1012
1013static struct attribute_group nhmex_uncore_bbox_format_group = {
1014 .name = "format",
1015 .attrs = nhmex_uncore_bbox_formats_attr,
1016};
1017
1018static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
1019 NHMEX_UNCORE_OPS_COMMON_INIT(),
1020 .enable_event = nhmex_bbox_msr_enable_event,
1021 .hw_config = nhmex_bbox_hw_config,
1022 .get_constraint = uncore_get_constraint,
1023 .put_constraint = uncore_put_constraint,
1024};
1025
1026static struct intel_uncore_type nhmex_uncore_bbox = {
1027 .name = "bbox",
1028 .num_counters = 4,
1029 .num_boxes = 2,
1030 .perf_ctr_bits = 48,
1031 .event_ctl = NHMEX_B0_MSR_PMON_CTL0,
1032 .perf_ctr = NHMEX_B0_MSR_PMON_CTR0,
1033 .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK,
1034 .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
1035 .msr_offset = NHMEX_B_MSR_OFFSET,
1036 .pair_ctr_ctl = 1,
1037 .num_shared_regs = 1,
1038 .constraints = nhmex_uncore_bbox_constraints,
1039 .ops = &nhmex_uncore_bbox_ops,
1040 .format_group = &nhmex_uncore_bbox_format_group
1041};
1042
1043static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1044{
ebb6cc03
YZ
1045 struct hw_perf_event *hwc = &event->hw;
1046 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1047 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
254298c7 1048
ebb6cc03
YZ
1049 /* only TO_R_PROG_EV event uses the match/mask register */
1050 if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
1051 NHMEX_S_EVENT_TO_R_PROG_EV)
1052 return 0;
254298c7
YZ
1053
1054 if (box->pmu->pmu_idx == 0)
1055 reg1->reg = NHMEX_S0_MSR_MM_CFG;
1056 else
1057 reg1->reg = NHMEX_S1_MSR_MM_CFG;
254298c7 1058 reg1->idx = 0;
ebb6cc03
YZ
1059 reg1->config = event->attr.config1;
1060 reg2->config = event->attr.config2;
254298c7
YZ
1061 return 0;
1062}
1063
1064static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1065{
1066 struct hw_perf_event *hwc = &event->hw;
1067 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1068 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1069
ebb6cc03
YZ
1070 if (reg1->idx != EXTRA_REG_NONE) {
1071 wrmsrl(reg1->reg, 0);
254298c7
YZ
1072 wrmsrl(reg1->reg + 1, reg1->config);
1073 wrmsrl(reg1->reg + 2, reg2->config);
1074 wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
1075 }
1076 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
1077}
1078
1079static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
1080 &format_attr_event.attr,
1081 &format_attr_umask.attr,
1082 &format_attr_edge.attr,
1083 &format_attr_inv.attr,
1084 &format_attr_thresh8.attr,
254298c7
YZ
1085 &format_attr_match.attr,
1086 &format_attr_mask.attr,
1087 NULL,
1088};
1089
1090static struct attribute_group nhmex_uncore_sbox_format_group = {
1091 .name = "format",
1092 .attrs = nhmex_uncore_sbox_formats_attr,
1093};
1094
1095static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
1096 NHMEX_UNCORE_OPS_COMMON_INIT(),
1097 .enable_event = nhmex_sbox_msr_enable_event,
1098 .hw_config = nhmex_sbox_hw_config,
1099 .get_constraint = uncore_get_constraint,
1100 .put_constraint = uncore_put_constraint,
1101};
1102
1103static struct intel_uncore_type nhmex_uncore_sbox = {
1104 .name = "sbox",
1105 .num_counters = 4,
1106 .num_boxes = 2,
1107 .perf_ctr_bits = 48,
1108 .event_ctl = NHMEX_S0_MSR_PMON_CTL0,
1109 .perf_ctr = NHMEX_S0_MSR_PMON_CTR0,
1110 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
1111 .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
1112 .msr_offset = NHMEX_S_MSR_OFFSET,
1113 .pair_ctr_ctl = 1,
1114 .num_shared_regs = 1,
1115 .ops = &nhmex_uncore_sbox_ops,
1116 .format_group = &nhmex_uncore_sbox_format_group
1117};
1118
1119enum {
1120 EXTRA_REG_NHMEX_M_FILTER,
1121 EXTRA_REG_NHMEX_M_DSP,
1122 EXTRA_REG_NHMEX_M_ISS,
1123 EXTRA_REG_NHMEX_M_MAP,
1124 EXTRA_REG_NHMEX_M_MSC_THR,
1125 EXTRA_REG_NHMEX_M_PGT,
1126 EXTRA_REG_NHMEX_M_PLD,
1127 EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
1128};
1129
1130static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
1131 MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
1132 MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
1133 MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
1134 MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
1135 /* event 0xa uses two extra registers */
1136 MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
1137 MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
1138 MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
1139 /* events 0xd ~ 0x10 use the same extra register */
1140 MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
1141 MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
1142 MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
1143 MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
1144 MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
1145 MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
1146 MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
1147 MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
1148 MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
1149 EVENT_EXTRA_END
1150};
1151
cb37af77
YZ
1152/* Nehalem-EX or Westmere-EX ? */
1153bool uncore_nhmex;
1154
254298c7
YZ
1155static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
1156{
1157 struct intel_uncore_extra_reg *er;
1158 unsigned long flags;
1159 bool ret = false;
1160 u64 mask;
1161
1162 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
1163 er = &box->shared_regs[idx];
1164 raw_spin_lock_irqsave(&er->lock, flags);
1165 if (!atomic_read(&er->ref) || er->config == config) {
1166 atomic_inc(&er->ref);
1167 er->config = config;
1168 ret = true;
1169 }
1170 raw_spin_unlock_irqrestore(&er->lock, flags);
1171
1172 return ret;
1173 }
1174 /*
1175 * The ZDP_CTL_FVC MSR has 4 fields which are used to control
1176 * events 0xd ~ 0x10. Besides these 4 fields, there are additional
1177 * fields which are shared.
1178 */
1179 idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1180 if (WARN_ON_ONCE(idx >= 4))
1181 return false;
1182
1183 /* mask of the shared fields */
cb37af77
YZ
1184 if (uncore_nhmex)
1185 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
1186 else
1187 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
254298c7
YZ
1188 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
1189
1190 raw_spin_lock_irqsave(&er->lock, flags);
1191 /* add mask of the non-shared field if it's in use */
cb37af77
YZ
1192 if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
1193 if (uncore_nhmex)
1194 mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1195 else
1196 mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1197 }
254298c7
YZ
1198
1199 if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
1200 atomic_add(1 << (idx * 8), &er->ref);
cb37af77
YZ
1201 if (uncore_nhmex)
1202 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
1203 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1204 else
1205 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
1206 WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
254298c7
YZ
1207 er->config &= ~mask;
1208 er->config |= (config & mask);
1209 ret = true;
1210 }
1211 raw_spin_unlock_irqrestore(&er->lock, flags);
1212
1213 return ret;
1214}
1215
1216static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
1217{
1218 struct intel_uncore_extra_reg *er;
1219
1220 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
1221 er = &box->shared_regs[idx];
1222 atomic_dec(&er->ref);
1223 return;
1224 }
1225
1226 idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1227 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
1228 atomic_sub(1 << (idx * 8), &er->ref);
1229}
1230
1231u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
1232{
1233 struct hw_perf_event *hwc = &event->hw;
1234 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1235 int idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
1236 u64 config = reg1->config;
1237
1238 /* get the non-shared control bits and shift them */
1239 idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
cb37af77
YZ
1240 if (uncore_nhmex)
1241 config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1242 else
1243 config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
254298c7
YZ
1244 if (new_idx > orig_idx) {
1245 idx = new_idx - orig_idx;
1246 config <<= 3 * idx;
1247 } else {
1248 idx = orig_idx - new_idx;
1249 config >>= 3 * idx;
1250 }
1251
1252 /* add the shared control bits back */
cb37af77
YZ
1253 if (uncore_nhmex)
1254 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
1255 else
1256 config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
254298c7
YZ
1257 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
1258 if (modify) {
1259 /* adjust the main event selector */
1260 if (new_idx > orig_idx)
1261 hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
1262 else
1263 hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
1264 reg1->config = config;
1265 reg1->idx = ~0xff | new_idx;
1266 }
1267 return config;
1268}
1269
1270static struct event_constraint *
1271nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1272{
1273 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1274 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1275 int i, idx[2], alloc = 0;
1276 u64 config1 = reg1->config;
1277
1278 idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
1279 idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
1280again:
1281 for (i = 0; i < 2; i++) {
1282 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
1283 idx[i] = 0xff;
1284
1285 if (idx[i] == 0xff)
1286 continue;
1287
1288 if (!nhmex_mbox_get_shared_reg(box, idx[i],
1289 __BITS_VALUE(config1, i, 32)))
1290 goto fail;
1291 alloc |= (0x1 << i);
1292 }
1293
1294 /* for the match/mask registers */
ebb6cc03
YZ
1295 if (reg2->idx != EXTRA_REG_NONE &&
1296 (uncore_box_is_fake(box) || !reg2->alloc) &&
254298c7
YZ
1297 !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
1298 goto fail;
1299
1300 /*
1301 * If it's a fake box -- as per validate_{group,event}() we
1302 * shouldn't touch event state and we can avoid doing so
1303 * since both will only call get_event_constraints() once
1304 * on each event, this avoids the need for reg->alloc.
1305 */
1306 if (!uncore_box_is_fake(box)) {
1307 if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
1308 nhmex_mbox_alter_er(event, idx[0], true);
1309 reg1->alloc |= alloc;
ebb6cc03
YZ
1310 if (reg2->idx != EXTRA_REG_NONE)
1311 reg2->alloc = 1;
254298c7
YZ
1312 }
1313 return NULL;
1314fail:
1315 if (idx[0] != 0xff && !(alloc & 0x1) &&
1316 idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
1317 /*
1318 * events 0xd ~ 0x10 are functional identical, but are
1319 * controlled by different fields in the ZDP_CTL_FVC
1320 * register. If we failed to take one field, try the
1321 * rest 3 choices.
7c94ee2e 1322 */
254298c7
YZ
1323 BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
1324 idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1325 idx[0] = (idx[0] + 1) % 4;
1326 idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1327 if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
1328 config1 = nhmex_mbox_alter_er(event, idx[0], false);
1329 goto again;
7c94ee2e 1330 }
254298c7 1331 }
7c94ee2e 1332
254298c7
YZ
1333 if (alloc & 0x1)
1334 nhmex_mbox_put_shared_reg(box, idx[0]);
1335 if (alloc & 0x2)
1336 nhmex_mbox_put_shared_reg(box, idx[1]);
1337 return &constraint_empty;
1338}
fcde10e9 1339
254298c7 1340static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 1341{
254298c7
YZ
1342 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1343 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
fcde10e9 1344
254298c7
YZ
1345 if (uncore_box_is_fake(box))
1346 return;
1347
1348 if (reg1->alloc & 0x1)
1349 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
1350 if (reg1->alloc & 0x2)
1351 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
1352 reg1->alloc = 0;
1353
1354 if (reg2->alloc) {
1355 nhmex_mbox_put_shared_reg(box, reg2->idx);
1356 reg2->alloc = 0;
1357 }
fcde10e9
YZ
1358}
1359
254298c7 1360static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
fcde10e9 1361{
254298c7
YZ
1362 if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
1363 return er->idx;
1364 return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
fcde10e9
YZ
1365}
1366
254298c7 1367static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 1368{
254298c7
YZ
1369 struct intel_uncore_type *type = box->pmu->type;
1370 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1371 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1372 struct extra_reg *er;
1373 unsigned msr;
1374 int reg_idx = 0;
254298c7
YZ
1375 /*
1376 * The mbox events may require 2 extra MSRs at the most. But only
1377 * the lower 32 bits in these MSRs are significant, so we can use
1378 * config1 to pass two MSRs' config.
1379 */
1380 for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
1381 if (er->event != (event->hw.config & er->config_mask))
1382 continue;
1383 if (event->attr.config1 & ~er->valid_mask)
1384 return -EINVAL;
254298c7
YZ
1385
1386 msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
1387 if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
1388 return -EINVAL;
1389
1390 /* always use the 32~63 bits to pass the PLD config */
1391 if (er->idx == EXTRA_REG_NHMEX_M_PLD)
1392 reg_idx = 1;
ebb6cc03
YZ
1393 else if (WARN_ON_ONCE(reg_idx > 0))
1394 return -EINVAL;
254298c7
YZ
1395
1396 reg1->idx &= ~(0xff << (reg_idx * 8));
1397 reg1->reg &= ~(0xffff << (reg_idx * 16));
1398 reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
1399 reg1->reg |= msr << (reg_idx * 16);
1400 reg1->config = event->attr.config1;
1401 reg_idx++;
1402 }
ebb6cc03
YZ
1403 /*
1404 * The mbox only provides ability to perform address matching
1405 * for the PLD events.
1406 */
1407 if (reg_idx == 2) {
1408 reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
1409 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
1410 reg2->config = event->attr.config2;
1411 else
1412 reg2->config = ~0ULL;
1413 if (box->pmu->pmu_idx == 0)
1414 reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
1415 else
1416 reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
1417 }
254298c7 1418 return 0;
fcde10e9
YZ
1419}
1420
254298c7 1421static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
fcde10e9 1422{
254298c7
YZ
1423 struct intel_uncore_extra_reg *er;
1424 unsigned long flags;
1425 u64 config;
1426
1427 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
1428 return box->shared_regs[idx].config;
1429
1430 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
1431 raw_spin_lock_irqsave(&er->lock, flags);
1432 config = er->config;
1433 raw_spin_unlock_irqrestore(&er->lock, flags);
1434 return config;
1435}
1436
1437static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1438{
1439 struct hw_perf_event *hwc = &event->hw;
1440 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1441 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1442 int idx;
1443
1444 idx = __BITS_VALUE(reg1->idx, 0, 8);
1445 if (idx != 0xff)
1446 wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
1447 nhmex_mbox_shared_reg_config(box, idx));
1448 idx = __BITS_VALUE(reg1->idx, 1, 8);
1449 if (idx != 0xff)
1450 wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
1451 nhmex_mbox_shared_reg_config(box, idx));
1452
ebb6cc03
YZ
1453 if (reg2->idx != EXTRA_REG_NONE) {
1454 wrmsrl(reg2->reg, 0);
1455 if (reg2->config != ~0ULL) {
1456 wrmsrl(reg2->reg + 1,
1457 reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
1458 wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
1459 (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
1460 wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
1461 }
fcde10e9 1462 }
254298c7
YZ
1463
1464 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
fcde10e9
YZ
1465}
1466
ebb6cc03
YZ
1467DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
1468DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5");
1469DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6");
1470DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7");
1471DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13");
1472DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21");
1473DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63");
1474DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33");
1475DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61");
1476DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31");
1477DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31");
1478DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31");
1479DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
1480DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
1481DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31");
1482DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63");
254298c7
YZ
1483
1484static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
1485 &format_attr_count_mode.attr,
1486 &format_attr_storage_mode.attr,
1487 &format_attr_wrap_mode.attr,
1488 &format_attr_flag_mode.attr,
1489 &format_attr_inc_sel.attr,
1490 &format_attr_set_flag_sel.attr,
ebb6cc03 1491 &format_attr_filter_cfg_en.attr,
254298c7
YZ
1492 &format_attr_filter_match.attr,
1493 &format_attr_filter_mask.attr,
1494 &format_attr_dsp.attr,
1495 &format_attr_thr.attr,
1496 &format_attr_fvc.attr,
1497 &format_attr_pgt.attr,
1498 &format_attr_map.attr,
1499 &format_attr_iss.attr,
1500 &format_attr_pld.attr,
fcde10e9
YZ
1501 NULL,
1502};
1503
254298c7
YZ
1504static struct attribute_group nhmex_uncore_mbox_format_group = {
1505 .name = "format",
1506 .attrs = nhmex_uncore_mbox_formats_attr,
fcde10e9
YZ
1507};
1508
254298c7
YZ
1509static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
1510 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
1511 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
1512 { /* end: all zeroes */ },
fcde10e9
YZ
1513};
1514
cb37af77
YZ
1515static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
1516 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
1517 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
1518 { /* end: all zeroes */ },
1519};
1520
254298c7
YZ
1521static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
1522 NHMEX_UNCORE_OPS_COMMON_INIT(),
1523 .enable_event = nhmex_mbox_msr_enable_event,
1524 .hw_config = nhmex_mbox_hw_config,
1525 .get_constraint = nhmex_mbox_get_constraint,
1526 .put_constraint = nhmex_mbox_put_constraint,
fcde10e9
YZ
1527};
1528
254298c7
YZ
1529static struct intel_uncore_type nhmex_uncore_mbox = {
1530 .name = "mbox",
1531 .num_counters = 6,
1532 .num_boxes = 2,
1533 .perf_ctr_bits = 48,
1534 .event_ctl = NHMEX_M0_MSR_PMU_CTL0,
1535 .perf_ctr = NHMEX_M0_MSR_PMU_CNT0,
1536 .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK,
1537 .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL,
1538 .msr_offset = NHMEX_M_MSR_OFFSET,
1539 .pair_ctr_ctl = 1,
1540 .num_shared_regs = 8,
1541 .event_descs = nhmex_uncore_mbox_events,
1542 .ops = &nhmex_uncore_mbox_ops,
1543 .format_group = &nhmex_uncore_mbox_format_group,
fcde10e9
YZ
1544};
1545
254298c7
YZ
1546void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
1547{
1548 struct hw_perf_event *hwc = &event->hw;
1549 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1550 int port;
fcde10e9 1551
ebb6cc03 1552 /* adjust the main event selector and extra register index */
254298c7
YZ
1553 if (reg1->idx % 2) {
1554 reg1->idx--;
1555 hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1556 } else {
1557 reg1->idx++;
1558 hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1559 }
1560
ebb6cc03 1561 /* adjust extra register config */
254298c7
YZ
1562 port = reg1->idx / 6 + box->pmu->pmu_idx * 4;
1563 switch (reg1->idx % 6) {
254298c7 1564 case 2:
ebb6cc03 1565 /* shift the 8~15 bits to the 0~7 bits */
254298c7
YZ
1566 reg1->config >>= 8;
1567 break;
1568 case 3:
ebb6cc03 1569 /* shift the 0~7 bits to the 8~15 bits */
254298c7
YZ
1570 reg1->config <<= 8;
1571 break;
254298c7
YZ
1572 };
1573}
1574
1575/*
1576 * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
1577 * An event set consists of 6 events, the 3rd and 4th events in
1578 * an event set use the same extra register. So an event set uses
1579 * 5 extra registers.
1580 */
1581static struct event_constraint *
1582nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 1583{
254298c7
YZ
1584 struct hw_perf_event *hwc = &event->hw;
1585 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1586 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1587 struct intel_uncore_extra_reg *er;
1588 unsigned long flags;
1589 int idx, er_idx;
1590 u64 config1;
1591 bool ok = false;
1592
1593 if (!uncore_box_is_fake(box) && reg1->alloc)
1594 return NULL;
1595
1596 idx = reg1->idx % 6;
1597 config1 = reg1->config;
1598again:
1599 er_idx = idx;
1600 /* the 3rd and 4th events use the same extra register */
1601 if (er_idx > 2)
1602 er_idx--;
1603 er_idx += (reg1->idx / 6) * 5;
1604
1605 er = &box->shared_regs[er_idx];
1606 raw_spin_lock_irqsave(&er->lock, flags);
1607 if (idx < 2) {
1608 if (!atomic_read(&er->ref) || er->config == reg1->config) {
1609 atomic_inc(&er->ref);
1610 er->config = reg1->config;
1611 ok = true;
1612 }
1613 } else if (idx == 2 || idx == 3) {
1614 /*
1615 * these two events use different fields in a extra register,
1616 * the 0~7 bits and the 8~15 bits respectively.
1617 */
1618 u64 mask = 0xff << ((idx - 2) * 8);
1619 if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
1620 !((er->config ^ config1) & mask)) {
1621 atomic_add(1 << ((idx - 2) * 8), &er->ref);
1622 er->config &= ~mask;
1623 er->config |= config1 & mask;
1624 ok = true;
1625 }
1626 } else {
1627 if (!atomic_read(&er->ref) ||
1628 (er->config == (hwc->config >> 32) &&
1629 er->config1 == reg1->config &&
1630 er->config2 == reg2->config)) {
1631 atomic_inc(&er->ref);
1632 er->config = (hwc->config >> 32);
1633 er->config1 = reg1->config;
1634 er->config2 = reg2->config;
1635 ok = true;
1636 }
1637 }
1638 raw_spin_unlock_irqrestore(&er->lock, flags);
1639
1640 if (!ok) {
1641 /*
1642 * The Rbox events are always in pairs. The paired
1643 * events are functional identical, but use different
1644 * extra registers. If we failed to take an extra
1645 * register, try the alternative.
1646 */
1647 if (idx % 2)
1648 idx--;
1649 else
1650 idx++;
1651 if (idx != reg1->idx % 6) {
1652 if (idx == 2)
1653 config1 >>= 8;
1654 else if (idx == 3)
1655 config1 <<= 8;
1656 goto again;
1657 }
1658 } else {
1659 if (!uncore_box_is_fake(box)) {
1660 if (idx != reg1->idx % 6)
1661 nhmex_rbox_alter_er(box, event);
1662 reg1->alloc = 1;
1663 }
1664 return NULL;
1665 }
1666 return &constraint_empty;
fcde10e9
YZ
1667}
1668
254298c7 1669static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 1670{
254298c7
YZ
1671 struct intel_uncore_extra_reg *er;
1672 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1673 int idx, er_idx;
1674
1675 if (uncore_box_is_fake(box) || !reg1->alloc)
1676 return;
1677
1678 idx = reg1->idx % 6;
1679 er_idx = idx;
1680 if (er_idx > 2)
1681 er_idx--;
1682 er_idx += (reg1->idx / 6) * 5;
1683
1684 er = &box->shared_regs[er_idx];
1685 if (idx == 2 || idx == 3)
1686 atomic_sub(1 << ((idx - 2) * 8), &er->ref);
1687 else
1688 atomic_dec(&er->ref);
1689
1690 reg1->alloc = 0;
fcde10e9
YZ
1691}
1692
254298c7 1693static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9
YZ
1694{
1695 struct hw_perf_event *hwc = &event->hw;
254298c7
YZ
1696 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1697 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
ebb6cc03 1698 int idx;
fcde10e9 1699
254298c7
YZ
1700 idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
1701 NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1702 if (idx >= 0x18)
1703 return -EINVAL;
1704
1705 reg1->idx = idx;
1706 reg1->config = event->attr.config1;
1707
ebb6cc03 1708 switch (idx % 6) {
254298c7
YZ
1709 case 4:
1710 case 5:
254298c7 1711 hwc->config |= event->attr.config & (~0ULL << 32);
ebb6cc03 1712 reg2->config = event->attr.config2;
254298c7
YZ
1713 break;
1714 };
1715 return 0;
fcde10e9
YZ
1716}
1717
254298c7
YZ
1718static u64 nhmex_rbox_shared_reg_config(struct intel_uncore_box *box, int idx)
1719{
1720 struct intel_uncore_extra_reg *er;
1721 unsigned long flags;
1722 u64 config;
1723
1724 er = &box->shared_regs[idx];
1725
1726 raw_spin_lock_irqsave(&er->lock, flags);
1727 config = er->config;
1728 raw_spin_unlock_irqrestore(&er->lock, flags);
1729
1730 return config;
1731}
1732
1733static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1734{
1735 struct hw_perf_event *hwc = &event->hw;
1736 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1737 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
ebb6cc03 1738 int idx, port;
254298c7 1739
ebb6cc03
YZ
1740 idx = reg1->idx;
1741 port = idx / 6 + box->pmu->pmu_idx * 4;
254298c7 1742
ebb6cc03 1743 switch (idx % 6) {
254298c7 1744 case 0:
ebb6cc03
YZ
1745 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
1746 break;
254298c7 1747 case 1:
ebb6cc03 1748 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
254298c7
YZ
1749 break;
1750 case 2:
1751 case 3:
ebb6cc03
YZ
1752 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
1753 nhmex_rbox_shared_reg_config(box, 2 + (idx / 6) * 5));
254298c7
YZ
1754 break;
1755 case 4:
ebb6cc03
YZ
1756 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
1757 hwc->config >> 32);
1758 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
1759 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
1760 break;
254298c7 1761 case 5:
ebb6cc03
YZ
1762 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
1763 hwc->config >> 32);
1764 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
1765 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
254298c7
YZ
1766 break;
1767 };
1768
1769 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
1770 (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
1771}
1772
ebb6cc03
YZ
1773DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
1774DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
254298c7
YZ
1775DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
1776DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
1777DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
1778
1779static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
1780 &format_attr_event5.attr,
1781 &format_attr_xbr_mm_cfg.attr,
1782 &format_attr_xbr_match.attr,
1783 &format_attr_xbr_mask.attr,
1784 &format_attr_qlx_cfg.attr,
1785 &format_attr_iperf_cfg.attr,
fcde10e9
YZ
1786 NULL,
1787};
1788
254298c7 1789static struct attribute_group nhmex_uncore_rbox_format_group = {
fcde10e9 1790 .name = "format",
254298c7 1791 .attrs = nhmex_uncore_rbox_formats_attr,
fcde10e9
YZ
1792};
1793
254298c7
YZ
1794static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
1795 INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"),
1796 INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"),
1797 INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"),
1798 INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"),
1799 INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"),
1800 INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"),
fcde10e9
YZ
1801 { /* end: all zeroes */ },
1802};
1803
254298c7
YZ
1804static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
1805 NHMEX_UNCORE_OPS_COMMON_INIT(),
1806 .enable_event = nhmex_rbox_msr_enable_event,
1807 .hw_config = nhmex_rbox_hw_config,
1808 .get_constraint = nhmex_rbox_get_constraint,
1809 .put_constraint = nhmex_rbox_put_constraint,
fcde10e9
YZ
1810};
1811
254298c7
YZ
1812static struct intel_uncore_type nhmex_uncore_rbox = {
1813 .name = "rbox",
1814 .num_counters = 8,
1815 .num_boxes = 2,
1816 .perf_ctr_bits = 48,
1817 .event_ctl = NHMEX_R_MSR_PMON_CTL0,
1818 .perf_ctr = NHMEX_R_MSR_PMON_CNT0,
1819 .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK,
1820 .box_ctl = NHMEX_R_MSR_GLOBAL_CTL,
1821 .msr_offset = NHMEX_R_MSR_OFFSET,
1822 .pair_ctr_ctl = 1,
1823 .num_shared_regs = 20,
1824 .event_descs = nhmex_uncore_rbox_events,
1825 .ops = &nhmex_uncore_rbox_ops,
1826 .format_group = &nhmex_uncore_rbox_format_group
fcde10e9
YZ
1827};
1828
254298c7
YZ
1829static struct intel_uncore_type *nhmex_msr_uncores[] = {
1830 &nhmex_uncore_ubox,
1831 &nhmex_uncore_cbox,
1832 &nhmex_uncore_bbox,
1833 &nhmex_uncore_sbox,
1834 &nhmex_uncore_mbox,
1835 &nhmex_uncore_rbox,
1836 &nhmex_uncore_wbox,
fcde10e9
YZ
1837 NULL,
1838};
254298c7 1839/* end of Nehalem-EX uncore support */
fcde10e9 1840
254298c7 1841static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
087bfbb0
YZ
1842{
1843 struct hw_perf_event *hwc = &event->hw;
1844
1845 hwc->idx = idx;
1846 hwc->last_tag = ++box->tags[idx];
1847
1848 if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
14371cce
YZ
1849 hwc->event_base = uncore_fixed_ctr(box);
1850 hwc->config_base = uncore_fixed_ctl(box);
087bfbb0
YZ
1851 return;
1852 }
1853
14371cce
YZ
1854 hwc->config_base = uncore_event_ctl(box, hwc->idx);
1855 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
087bfbb0
YZ
1856}
1857
254298c7 1858static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
087bfbb0
YZ
1859{
1860 u64 prev_count, new_count, delta;
1861 int shift;
1862
1863 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
1864 shift = 64 - uncore_fixed_ctr_bits(box);
1865 else
1866 shift = 64 - uncore_perf_ctr_bits(box);
1867
1868 /* the hrtimer might modify the previous event value */
1869again:
1870 prev_count = local64_read(&event->hw.prev_count);
1871 new_count = uncore_read_counter(box, event);
1872 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
1873 goto again;
1874
1875 delta = (new_count << shift) - (prev_count << shift);
1876 delta >>= shift;
1877
1878 local64_add(delta, &event->count);
1879}
1880
1881/*
1882 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
1883 * for SandyBridge. So we use hrtimer to periodically poll the counter
1884 * to avoid overflow.
1885 */
1886static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
1887{
1888 struct intel_uncore_box *box;
1889 unsigned long flags;
1890 int bit;
1891
1892 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
1893 if (!box->n_active || box->cpu != smp_processor_id())
1894 return HRTIMER_NORESTART;
1895 /*
1896 * disable local interrupt to prevent uncore_pmu_event_start/stop
1897 * to interrupt the update process
1898 */
1899 local_irq_save(flags);
1900
1901 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
1902 uncore_perf_event_update(box, box->events[bit]);
1903
1904 local_irq_restore(flags);
1905
1906 hrtimer_forward_now(hrtimer, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL));
1907 return HRTIMER_RESTART;
1908}
1909
1910static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
1911{
1912 __hrtimer_start_range_ns(&box->hrtimer,
1913 ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0,
1914 HRTIMER_MODE_REL_PINNED, 0);
1915}
1916
1917static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
1918{
1919 hrtimer_cancel(&box->hrtimer);
1920}
1921
1922static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
1923{
1924 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1925 box->hrtimer.function = uncore_pmu_hrtimer;
1926}
1927
254298c7 1928struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu)
087bfbb0
YZ
1929{
1930 struct intel_uncore_box *box;
6a67943a 1931 int i, size;
087bfbb0 1932
254298c7 1933 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
6a67943a
YZ
1934
1935 box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu));
087bfbb0
YZ
1936 if (!box)
1937 return NULL;
1938
6a67943a
YZ
1939 for (i = 0; i < type->num_shared_regs; i++)
1940 raw_spin_lock_init(&box->shared_regs[i].lock);
1941
087bfbb0
YZ
1942 uncore_pmu_init_hrtimer(box);
1943 atomic_set(&box->refcnt, 1);
1944 box->cpu = -1;
1945 box->phys_id = -1;
1946
1947 return box;
1948}
1949
1950static struct intel_uncore_box *
1951uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
1952{
14371cce
YZ
1953 static struct intel_uncore_box *box;
1954
1955 box = *per_cpu_ptr(pmu->box, cpu);
1956 if (box)
1957 return box;
1958
1959 raw_spin_lock(&uncore_box_lock);
1960 list_for_each_entry(box, &pmu->box_list, list) {
1961 if (box->phys_id == topology_physical_package_id(cpu)) {
1962 atomic_inc(&box->refcnt);
1963 *per_cpu_ptr(pmu->box, cpu) = box;
1964 break;
1965 }
1966 }
1967 raw_spin_unlock(&uncore_box_lock);
1968
087bfbb0
YZ
1969 return *per_cpu_ptr(pmu->box, cpu);
1970}
1971
1972static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
1973{
1974 return container_of(event->pmu, struct intel_uncore_pmu, pmu);
1975}
1976
1977static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
1978{
1979 /*
1980 * perf core schedules event on the basis of cpu, uncore events are
1981 * collected by one of the cpus inside a physical package.
1982 */
254298c7 1983 return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
087bfbb0
YZ
1984}
1985
254298c7
YZ
1986static int
1987uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
087bfbb0
YZ
1988{
1989 struct perf_event *event;
1990 int n, max_count;
1991
1992 max_count = box->pmu->type->num_counters;
1993 if (box->pmu->type->fixed_ctl)
1994 max_count++;
1995
1996 if (box->n_events >= max_count)
1997 return -EINVAL;
1998
1999 n = box->n_events;
2000 box->event_list[n] = leader;
2001 n++;
2002 if (!dogrp)
2003 return n;
2004
2005 list_for_each_entry(event, &leader->sibling_list, group_entry) {
2006 if (event->state <= PERF_EVENT_STATE_OFF)
2007 continue;
2008
2009 if (n >= max_count)
2010 return -EINVAL;
2011
2012 box->event_list[n] = event;
2013 n++;
2014 }
2015 return n;
2016}
2017
2018static struct event_constraint *
254298c7 2019uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
087bfbb0 2020{
6a67943a 2021 struct intel_uncore_type *type = box->pmu->type;
087bfbb0
YZ
2022 struct event_constraint *c;
2023
6a67943a
YZ
2024 if (type->ops->get_constraint) {
2025 c = type->ops->get_constraint(box, event);
2026 if (c)
2027 return c;
2028 }
2029
087bfbb0
YZ
2030 if (event->hw.config == ~0ULL)
2031 return &constraint_fixed;
2032
2033 if (type->constraints) {
2034 for_each_event_constraint(c, type->constraints) {
2035 if ((event->hw.config & c->cmask) == c->code)
2036 return c;
2037 }
2038 }
2039
2040 return &type->unconstrainted;
2041}
2042
254298c7 2043static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
6a67943a
YZ
2044{
2045 if (box->pmu->type->ops->put_constraint)
2046 box->pmu->type->ops->put_constraint(box, event);
2047}
2048
254298c7 2049static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
087bfbb0
YZ
2050{
2051 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
2052 struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX];
6a67943a 2053 int i, wmin, wmax, ret = 0;
087bfbb0
YZ
2054 struct hw_perf_event *hwc;
2055
2056 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
2057
2058 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
6a67943a 2059 c = uncore_get_event_constraint(box, box->event_list[i]);
087bfbb0
YZ
2060 constraints[i] = c;
2061 wmin = min(wmin, c->weight);
2062 wmax = max(wmax, c->weight);
2063 }
2064
2065 /* fastpath, try to reuse previous register */
2066 for (i = 0; i < n; i++) {
2067 hwc = &box->event_list[i]->hw;
2068 c = constraints[i];
2069
2070 /* never assigned */
2071 if (hwc->idx == -1)
2072 break;
2073
2074 /* constraint still honored */
2075 if (!test_bit(hwc->idx, c->idxmsk))
2076 break;
2077
2078 /* not already used */
2079 if (test_bit(hwc->idx, used_mask))
2080 break;
2081
2082 __set_bit(hwc->idx, used_mask);
6a67943a
YZ
2083 if (assign)
2084 assign[i] = hwc->idx;
087bfbb0 2085 }
087bfbb0 2086 /* slow path */
6a67943a
YZ
2087 if (i != n)
2088 ret = perf_assign_events(constraints, n, wmin, wmax, assign);
2089
2090 if (!assign || ret) {
2091 for (i = 0; i < n; i++)
2092 uncore_put_event_constraint(box, box->event_list[i]);
2093 }
087bfbb0
YZ
2094 return ret ? -EINVAL : 0;
2095}
2096
2097static void uncore_pmu_event_start(struct perf_event *event, int flags)
2098{
2099 struct intel_uncore_box *box = uncore_event_to_box(event);
2100 int idx = event->hw.idx;
2101
2102 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
2103 return;
2104
2105 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
2106 return;
2107
2108 event->hw.state = 0;
2109 box->events[idx] = event;
2110 box->n_active++;
2111 __set_bit(idx, box->active_mask);
2112
2113 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
2114 uncore_enable_event(box, event);
2115
2116 if (box->n_active == 1) {
2117 uncore_enable_box(box);
2118 uncore_pmu_start_hrtimer(box);
2119 }
2120}
2121
2122static void uncore_pmu_event_stop(struct perf_event *event, int flags)
2123{
2124 struct intel_uncore_box *box = uncore_event_to_box(event);
2125 struct hw_perf_event *hwc = &event->hw;
2126
2127 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
2128 uncore_disable_event(box, event);
2129 box->n_active--;
2130 box->events[hwc->idx] = NULL;
2131 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
2132 hwc->state |= PERF_HES_STOPPED;
2133
2134 if (box->n_active == 0) {
2135 uncore_disable_box(box);
2136 uncore_pmu_cancel_hrtimer(box);
2137 }
2138 }
2139
2140 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
2141 /*
2142 * Drain the remaining delta count out of a event
2143 * that we are disabling:
2144 */
2145 uncore_perf_event_update(box, event);
2146 hwc->state |= PERF_HES_UPTODATE;
2147 }
2148}
2149
2150static int uncore_pmu_event_add(struct perf_event *event, int flags)
2151{
2152 struct intel_uncore_box *box = uncore_event_to_box(event);
2153 struct hw_perf_event *hwc = &event->hw;
2154 int assign[UNCORE_PMC_IDX_MAX];
2155 int i, n, ret;
2156
2157 if (!box)
2158 return -ENODEV;
2159
2160 ret = n = uncore_collect_events(box, event, false);
2161 if (ret < 0)
2162 return ret;
2163
2164 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
2165 if (!(flags & PERF_EF_START))
2166 hwc->state |= PERF_HES_ARCH;
2167
2168 ret = uncore_assign_events(box, assign, n);
2169 if (ret)
2170 return ret;
2171
2172 /* save events moving to new counters */
2173 for (i = 0; i < box->n_events; i++) {
2174 event = box->event_list[i];
2175 hwc = &event->hw;
2176
2177 if (hwc->idx == assign[i] &&
2178 hwc->last_tag == box->tags[assign[i]])
2179 continue;
2180 /*
2181 * Ensure we don't accidentally enable a stopped
2182 * counter simply because we rescheduled.
2183 */
2184 if (hwc->state & PERF_HES_STOPPED)
2185 hwc->state |= PERF_HES_ARCH;
2186
2187 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
2188 }
2189
2190 /* reprogram moved events into new counters */
2191 for (i = 0; i < n; i++) {
2192 event = box->event_list[i];
2193 hwc = &event->hw;
2194
2195 if (hwc->idx != assign[i] ||
2196 hwc->last_tag != box->tags[assign[i]])
2197 uncore_assign_hw_event(box, event, assign[i]);
2198 else if (i < box->n_events)
2199 continue;
2200
2201 if (hwc->state & PERF_HES_ARCH)
2202 continue;
2203
2204 uncore_pmu_event_start(event, 0);
2205 }
2206 box->n_events = n;
2207
2208 return 0;
2209}
2210
2211static void uncore_pmu_event_del(struct perf_event *event, int flags)
2212{
2213 struct intel_uncore_box *box = uncore_event_to_box(event);
2214 int i;
2215
2216 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
2217
2218 for (i = 0; i < box->n_events; i++) {
2219 if (event == box->event_list[i]) {
6a67943a
YZ
2220 uncore_put_event_constraint(box, event);
2221
087bfbb0
YZ
2222 while (++i < box->n_events)
2223 box->event_list[i - 1] = box->event_list[i];
2224
2225 --box->n_events;
2226 break;
2227 }
2228 }
2229
2230 event->hw.idx = -1;
2231 event->hw.last_tag = ~0ULL;
2232}
2233
2234static void uncore_pmu_event_read(struct perf_event *event)
2235{
2236 struct intel_uncore_box *box = uncore_event_to_box(event);
2237 uncore_perf_event_update(box, event);
2238}
2239
2240/*
2241 * validation ensures the group can be loaded onto the
2242 * PMU if it was the only group available.
2243 */
2244static int uncore_validate_group(struct intel_uncore_pmu *pmu,
2245 struct perf_event *event)
2246{
2247 struct perf_event *leader = event->group_leader;
2248 struct intel_uncore_box *fake_box;
087bfbb0
YZ
2249 int ret = -EINVAL, n;
2250
6a67943a 2251 fake_box = uncore_alloc_box(pmu->type, smp_processor_id());
087bfbb0
YZ
2252 if (!fake_box)
2253 return -ENOMEM;
2254
2255 fake_box->pmu = pmu;
2256 /*
2257 * the event is not yet connected with its
2258 * siblings therefore we must first collect
2259 * existing siblings, then add the new event
2260 * before we can simulate the scheduling
2261 */
2262 n = uncore_collect_events(fake_box, leader, true);
2263 if (n < 0)
2264 goto out;
2265
2266 fake_box->n_events = n;
2267 n = uncore_collect_events(fake_box, event, false);
2268 if (n < 0)
2269 goto out;
2270
2271 fake_box->n_events = n;
2272
6a67943a 2273 ret = uncore_assign_events(fake_box, NULL, n);
087bfbb0
YZ
2274out:
2275 kfree(fake_box);
2276 return ret;
2277}
2278
2279int uncore_pmu_event_init(struct perf_event *event)
2280{
2281 struct intel_uncore_pmu *pmu;
2282 struct intel_uncore_box *box;
2283 struct hw_perf_event *hwc = &event->hw;
2284 int ret;
2285
2286 if (event->attr.type != event->pmu->type)
2287 return -ENOENT;
2288
2289 pmu = uncore_event_to_pmu(event);
2290 /* no device found for this pmu */
2291 if (pmu->func_id < 0)
2292 return -ENOENT;
2293
2294 /*
2295 * Uncore PMU does measure at all privilege level all the time.
2296 * So it doesn't make sense to specify any exclude bits.
2297 */
2298 if (event->attr.exclude_user || event->attr.exclude_kernel ||
2299 event->attr.exclude_hv || event->attr.exclude_idle)
2300 return -EINVAL;
2301
2302 /* Sampling not supported yet */
2303 if (hwc->sample_period)
2304 return -EINVAL;
2305
2306 /*
2307 * Place all uncore events for a particular physical package
2308 * onto a single cpu
2309 */
2310 if (event->cpu < 0)
2311 return -EINVAL;
2312 box = uncore_pmu_to_box(pmu, event->cpu);
2313 if (!box || box->cpu < 0)
2314 return -EINVAL;
2315 event->cpu = box->cpu;
2316
6a67943a
YZ
2317 event->hw.idx = -1;
2318 event->hw.last_tag = ~0ULL;
2319 event->hw.extra_reg.idx = EXTRA_REG_NONE;
ebb6cc03 2320 event->hw.branch_reg.idx = EXTRA_REG_NONE;
6a67943a 2321
087bfbb0
YZ
2322 if (event->attr.config == UNCORE_FIXED_EVENT) {
2323 /* no fixed counter */
2324 if (!pmu->type->fixed_ctl)
2325 return -EINVAL;
2326 /*
2327 * if there is only one fixed counter, only the first pmu
2328 * can access the fixed counter
2329 */
2330 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
2331 return -EINVAL;
2332 hwc->config = ~0ULL;
2333 } else {
2334 hwc->config = event->attr.config & pmu->type->event_mask;
6a67943a
YZ
2335 if (pmu->type->ops->hw_config) {
2336 ret = pmu->type->ops->hw_config(box, event);
2337 if (ret)
2338 return ret;
2339 }
087bfbb0
YZ
2340 }
2341
087bfbb0
YZ
2342 if (event->group_leader != event)
2343 ret = uncore_validate_group(pmu, event);
2344 else
2345 ret = 0;
2346
2347 return ret;
2348}
2349
2350static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
2351{
2352 int ret;
2353
2354 pmu->pmu = (struct pmu) {
2355 .attr_groups = pmu->type->attr_groups,
2356 .task_ctx_nr = perf_invalid_context,
2357 .event_init = uncore_pmu_event_init,
2358 .add = uncore_pmu_event_add,
2359 .del = uncore_pmu_event_del,
2360 .start = uncore_pmu_event_start,
2361 .stop = uncore_pmu_event_stop,
2362 .read = uncore_pmu_event_read,
2363 };
2364
2365 if (pmu->type->num_boxes == 1) {
2366 if (strlen(pmu->type->name) > 0)
2367 sprintf(pmu->name, "uncore_%s", pmu->type->name);
2368 else
2369 sprintf(pmu->name, "uncore");
2370 } else {
2371 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
2372 pmu->pmu_idx);
2373 }
2374
2375 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
2376 return ret;
2377}
2378
2379static void __init uncore_type_exit(struct intel_uncore_type *type)
2380{
2381 int i;
2382
2383 for (i = 0; i < type->num_boxes; i++)
2384 free_percpu(type->pmus[i].box);
2385 kfree(type->pmus);
2386 type->pmus = NULL;
2387 kfree(type->attr_groups[1]);
2388 type->attr_groups[1] = NULL;
2389}
2390
cffa59ba 2391static void __init uncore_types_exit(struct intel_uncore_type **types)
14371cce
YZ
2392{
2393 int i;
2394 for (i = 0; types[i]; i++)
2395 uncore_type_exit(types[i]);
2396}
2397
087bfbb0
YZ
2398static int __init uncore_type_init(struct intel_uncore_type *type)
2399{
2400 struct intel_uncore_pmu *pmus;
2401 struct attribute_group *events_group;
2402 struct attribute **attrs;
2403 int i, j;
2404
2405 pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
2406 if (!pmus)
2407 return -ENOMEM;
2408
2409 type->unconstrainted = (struct event_constraint)
2410 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
2411 0, type->num_counters, 0);
2412
2413 for (i = 0; i < type->num_boxes; i++) {
2414 pmus[i].func_id = -1;
2415 pmus[i].pmu_idx = i;
2416 pmus[i].type = type;
14371cce 2417 INIT_LIST_HEAD(&pmus[i].box_list);
087bfbb0
YZ
2418 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
2419 if (!pmus[i].box)
2420 goto fail;
2421 }
2422
2423 if (type->event_descs) {
2424 i = 0;
2425 while (type->event_descs[i].attr.attr.name)
2426 i++;
2427
2428 events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
2429 sizeof(*events_group), GFP_KERNEL);
2430 if (!events_group)
2431 goto fail;
2432
2433 attrs = (struct attribute **)(events_group + 1);
2434 events_group->name = "events";
2435 events_group->attrs = attrs;
2436
2437 for (j = 0; j < i; j++)
2438 attrs[j] = &type->event_descs[j].attr.attr;
2439
2440 type->attr_groups[1] = events_group;
2441 }
2442
2443 type->pmus = pmus;
2444 return 0;
2445fail:
2446 uncore_type_exit(type);
2447 return -ENOMEM;
2448}
2449
2450static int __init uncore_types_init(struct intel_uncore_type **types)
2451{
2452 int i, ret;
2453
2454 for (i = 0; types[i]; i++) {
2455 ret = uncore_type_init(types[i]);
2456 if (ret)
2457 goto fail;
2458 }
2459 return 0;
2460fail:
2461 while (--i >= 0)
2462 uncore_type_exit(types[i]);
2463 return ret;
2464}
2465
14371cce
YZ
2466static struct pci_driver *uncore_pci_driver;
2467static bool pcidrv_registered;
2468
2469/*
2470 * add a pci uncore device
2471 */
254298c7 2472static int __devinit uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev)
14371cce
YZ
2473{
2474 struct intel_uncore_pmu *pmu;
2475 struct intel_uncore_box *box;
2476 int i, phys_id;
2477
2478 phys_id = pcibus_to_physid[pdev->bus->number];
2479 if (phys_id < 0)
2480 return -ENODEV;
2481
6a67943a 2482 box = uncore_alloc_box(type, 0);
14371cce
YZ
2483 if (!box)
2484 return -ENOMEM;
2485
2486 /*
2487 * for performance monitoring unit with multiple boxes,
2488 * each box has a different function id.
2489 */
2490 for (i = 0; i < type->num_boxes; i++) {
2491 pmu = &type->pmus[i];
2492 if (pmu->func_id == pdev->devfn)
2493 break;
2494 if (pmu->func_id < 0) {
2495 pmu->func_id = pdev->devfn;
2496 break;
2497 }
2498 pmu = NULL;
2499 }
2500
2501 if (!pmu) {
2502 kfree(box);
2503 return -EINVAL;
2504 }
2505
2506 box->phys_id = phys_id;
2507 box->pci_dev = pdev;
2508 box->pmu = pmu;
2509 uncore_box_init(box);
2510 pci_set_drvdata(pdev, box);
2511
2512 raw_spin_lock(&uncore_box_lock);
2513 list_add_tail(&box->list, &pmu->box_list);
2514 raw_spin_unlock(&uncore_box_lock);
2515
2516 return 0;
2517}
2518
357398e9 2519static void uncore_pci_remove(struct pci_dev *pdev)
14371cce
YZ
2520{
2521 struct intel_uncore_box *box = pci_get_drvdata(pdev);
2522 struct intel_uncore_pmu *pmu = box->pmu;
2523 int cpu, phys_id = pcibus_to_physid[pdev->bus->number];
2524
2525 if (WARN_ON_ONCE(phys_id != box->phys_id))
2526 return;
2527
2528 raw_spin_lock(&uncore_box_lock);
2529 list_del(&box->list);
2530 raw_spin_unlock(&uncore_box_lock);
2531
2532 for_each_possible_cpu(cpu) {
2533 if (*per_cpu_ptr(pmu->box, cpu) == box) {
2534 *per_cpu_ptr(pmu->box, cpu) = NULL;
2535 atomic_dec(&box->refcnt);
2536 }
2537 }
2538
2539 WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
2540 kfree(box);
2541}
2542
2543static int __devinit uncore_pci_probe(struct pci_dev *pdev,
2544 const struct pci_device_id *id)
2545{
2546 struct intel_uncore_type *type;
2547
2548 type = (struct intel_uncore_type *)id->driver_data;
254298c7 2549
14371cce
YZ
2550 return uncore_pci_add(type, pdev);
2551}
2552
2553static int __init uncore_pci_init(void)
2554{
2555 int ret;
2556
2557 switch (boot_cpu_data.x86_model) {
7c94ee2e
YZ
2558 case 45: /* Sandy Bridge-EP */
2559 pci_uncores = snbep_pci_uncores;
2560 uncore_pci_driver = &snbep_uncore_pci_driver;
2561 snbep_pci2phy_map_init();
2562 break;
14371cce
YZ
2563 default:
2564 return 0;
2565 }
2566
2567 ret = uncore_types_init(pci_uncores);
2568 if (ret)
2569 return ret;
2570
2571 uncore_pci_driver->probe = uncore_pci_probe;
2572 uncore_pci_driver->remove = uncore_pci_remove;
2573
2574 ret = pci_register_driver(uncore_pci_driver);
2575 if (ret == 0)
2576 pcidrv_registered = true;
2577 else
2578 uncore_types_exit(pci_uncores);
2579
2580 return ret;
2581}
2582
2583static void __init uncore_pci_exit(void)
2584{
2585 if (pcidrv_registered) {
2586 pcidrv_registered = false;
2587 pci_unregister_driver(uncore_pci_driver);
2588 uncore_types_exit(pci_uncores);
2589 }
2590}
2591
087bfbb0
YZ
2592static void __cpuinit uncore_cpu_dying(int cpu)
2593{
2594 struct intel_uncore_type *type;
2595 struct intel_uncore_pmu *pmu;
2596 struct intel_uncore_box *box;
2597 int i, j;
2598
2599 for (i = 0; msr_uncores[i]; i++) {
2600 type = msr_uncores[i];
2601 for (j = 0; j < type->num_boxes; j++) {
2602 pmu = &type->pmus[j];
2603 box = *per_cpu_ptr(pmu->box, cpu);
2604 *per_cpu_ptr(pmu->box, cpu) = NULL;
2605 if (box && atomic_dec_and_test(&box->refcnt))
2606 kfree(box);
2607 }
2608 }
2609}
2610
2611static int __cpuinit uncore_cpu_starting(int cpu)
2612{
2613 struct intel_uncore_type *type;
2614 struct intel_uncore_pmu *pmu;
2615 struct intel_uncore_box *box, *exist;
2616 int i, j, k, phys_id;
2617
2618 phys_id = topology_physical_package_id(cpu);
2619
2620 for (i = 0; msr_uncores[i]; i++) {
2621 type = msr_uncores[i];
2622 for (j = 0; j < type->num_boxes; j++) {
2623 pmu = &type->pmus[j];
2624 box = *per_cpu_ptr(pmu->box, cpu);
2625 /* called by uncore_cpu_init? */
2626 if (box && box->phys_id >= 0) {
2627 uncore_box_init(box);
2628 continue;
2629 }
2630
2631 for_each_online_cpu(k) {
2632 exist = *per_cpu_ptr(pmu->box, k);
2633 if (exist && exist->phys_id == phys_id) {
2634 atomic_inc(&exist->refcnt);
2635 *per_cpu_ptr(pmu->box, cpu) = exist;
2636 kfree(box);
2637 box = NULL;
2638 break;
2639 }
2640 }
2641
2642 if (box) {
2643 box->phys_id = phys_id;
2644 uncore_box_init(box);
2645 }
2646 }
2647 }
2648 return 0;
2649}
2650
2651static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
2652{
2653 struct intel_uncore_type *type;
2654 struct intel_uncore_pmu *pmu;
2655 struct intel_uncore_box *box;
2656 int i, j;
2657
2658 for (i = 0; msr_uncores[i]; i++) {
2659 type = msr_uncores[i];
2660 for (j = 0; j < type->num_boxes; j++) {
2661 pmu = &type->pmus[j];
2662 if (pmu->func_id < 0)
2663 pmu->func_id = j;
2664
6a67943a 2665 box = uncore_alloc_box(type, cpu);
087bfbb0
YZ
2666 if (!box)
2667 return -ENOMEM;
2668
2669 box->pmu = pmu;
2670 box->phys_id = phys_id;
2671 *per_cpu_ptr(pmu->box, cpu) = box;
2672 }
2673 }
2674 return 0;
2675}
2676
254298c7
YZ
2677static void __cpuinit
2678uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
087bfbb0
YZ
2679{
2680 struct intel_uncore_type *type;
2681 struct intel_uncore_pmu *pmu;
2682 struct intel_uncore_box *box;
2683 int i, j;
2684
2685 for (i = 0; uncores[i]; i++) {
2686 type = uncores[i];
2687 for (j = 0; j < type->num_boxes; j++) {
2688 pmu = &type->pmus[j];
2689 if (old_cpu < 0)
2690 box = uncore_pmu_to_box(pmu, new_cpu);
2691 else
2692 box = uncore_pmu_to_box(pmu, old_cpu);
2693 if (!box)
2694 continue;
2695
2696 if (old_cpu < 0) {
2697 WARN_ON_ONCE(box->cpu != -1);
2698 box->cpu = new_cpu;
2699 continue;
2700 }
2701
2702 WARN_ON_ONCE(box->cpu != old_cpu);
2703 if (new_cpu >= 0) {
2704 uncore_pmu_cancel_hrtimer(box);
2705 perf_pmu_migrate_context(&pmu->pmu,
2706 old_cpu, new_cpu);
2707 box->cpu = new_cpu;
2708 } else {
2709 box->cpu = -1;
2710 }
2711 }
2712 }
2713}
2714
2715static void __cpuinit uncore_event_exit_cpu(int cpu)
2716{
2717 int i, phys_id, target;
2718
2719 /* if exiting cpu is used for collecting uncore events */
2720 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
2721 return;
2722
2723 /* find a new cpu to collect uncore events */
2724 phys_id = topology_physical_package_id(cpu);
2725 target = -1;
2726 for_each_online_cpu(i) {
2727 if (i == cpu)
2728 continue;
2729 if (phys_id == topology_physical_package_id(i)) {
2730 target = i;
2731 break;
2732 }
2733 }
2734
2735 /* migrate uncore events to the new cpu */
2736 if (target >= 0)
2737 cpumask_set_cpu(target, &uncore_cpu_mask);
2738
2739 uncore_change_context(msr_uncores, cpu, target);
14371cce 2740 uncore_change_context(pci_uncores, cpu, target);
087bfbb0
YZ
2741}
2742
2743static void __cpuinit uncore_event_init_cpu(int cpu)
2744{
2745 int i, phys_id;
2746
2747 phys_id = topology_physical_package_id(cpu);
2748 for_each_cpu(i, &uncore_cpu_mask) {
2749 if (phys_id == topology_physical_package_id(i))
2750 return;
2751 }
2752
2753 cpumask_set_cpu(cpu, &uncore_cpu_mask);
2754
2755 uncore_change_context(msr_uncores, -1, cpu);
14371cce 2756 uncore_change_context(pci_uncores, -1, cpu);
087bfbb0
YZ
2757}
2758
254298c7
YZ
2759static int
2760 __cpuinit uncore_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
087bfbb0
YZ
2761{
2762 unsigned int cpu = (long)hcpu;
2763
2764 /* allocate/free data structure for uncore box */
2765 switch (action & ~CPU_TASKS_FROZEN) {
2766 case CPU_UP_PREPARE:
2767 uncore_cpu_prepare(cpu, -1);
2768 break;
2769 case CPU_STARTING:
2770 uncore_cpu_starting(cpu);
2771 break;
2772 case CPU_UP_CANCELED:
2773 case CPU_DYING:
2774 uncore_cpu_dying(cpu);
2775 break;
2776 default:
2777 break;
2778 }
2779
2780 /* select the cpu that collects uncore events */
2781 switch (action & ~CPU_TASKS_FROZEN) {
2782 case CPU_DOWN_FAILED:
2783 case CPU_STARTING:
2784 uncore_event_init_cpu(cpu);
2785 break;
2786 case CPU_DOWN_PREPARE:
2787 uncore_event_exit_cpu(cpu);
2788 break;
2789 default:
2790 break;
2791 }
2792
2793 return NOTIFY_OK;
2794}
2795
2796static struct notifier_block uncore_cpu_nb __cpuinitdata = {
254298c7 2797 .notifier_call = uncore_cpu_notifier,
087bfbb0
YZ
2798 /*
2799 * to migrate uncore events, our notifier should be executed
2800 * before perf core's notifier.
2801 */
254298c7 2802 .priority = CPU_PRI_PERF + 1,
087bfbb0
YZ
2803};
2804
2805static void __init uncore_cpu_setup(void *dummy)
2806{
2807 uncore_cpu_starting(smp_processor_id());
2808}
2809
2810static int __init uncore_cpu_init(void)
2811{
42089697 2812 int ret, cpu, max_cores;
087bfbb0 2813
42089697 2814 max_cores = boot_cpu_data.x86_max_cores;
087bfbb0 2815 switch (boot_cpu_data.x86_model) {
fcde10e9
YZ
2816 case 26: /* Nehalem */
2817 case 30:
2818 case 37: /* Westmere */
2819 case 44:
2820 msr_uncores = nhm_msr_uncores;
2821 break;
2822 case 42: /* Sandy Bridge */
42089697
YZ
2823 if (snb_uncore_cbox.num_boxes > max_cores)
2824 snb_uncore_cbox.num_boxes = max_cores;
fcde10e9
YZ
2825 msr_uncores = snb_msr_uncores;
2826 break;
7c94ee2e 2827 case 45: /* Sandy Birdge-EP */
42089697
YZ
2828 if (snbep_uncore_cbox.num_boxes > max_cores)
2829 snbep_uncore_cbox.num_boxes = max_cores;
7c94ee2e
YZ
2830 msr_uncores = snbep_msr_uncores;
2831 break;
cb37af77
YZ
2832 case 46: /* Nehalem-EX */
2833 uncore_nhmex = true;
2834 case 47: /* Westmere-EX aka. Xeon E7 */
2835 if (!uncore_nhmex)
2836 nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
2837 if (nhmex_uncore_cbox.num_boxes > max_cores)
2838 nhmex_uncore_cbox.num_boxes = max_cores;
254298c7
YZ
2839 msr_uncores = nhmex_msr_uncores;
2840 break;
087bfbb0
YZ
2841 default:
2842 return 0;
2843 }
2844
2845 ret = uncore_types_init(msr_uncores);
2846 if (ret)
2847 return ret;
2848
2849 get_online_cpus();
2850
2851 for_each_online_cpu(cpu) {
2852 int i, phys_id = topology_physical_package_id(cpu);
2853
2854 for_each_cpu(i, &uncore_cpu_mask) {
2855 if (phys_id == topology_physical_package_id(i)) {
2856 phys_id = -1;
2857 break;
2858 }
2859 }
2860 if (phys_id < 0)
2861 continue;
2862
2863 uncore_cpu_prepare(cpu, phys_id);
2864 uncore_event_init_cpu(cpu);
2865 }
2866 on_each_cpu(uncore_cpu_setup, NULL, 1);
2867
2868 register_cpu_notifier(&uncore_cpu_nb);
2869
2870 put_online_cpus();
2871
2872 return 0;
2873}
2874
2875static int __init uncore_pmus_register(void)
2876{
2877 struct intel_uncore_pmu *pmu;
2878 struct intel_uncore_type *type;
2879 int i, j;
2880
2881 for (i = 0; msr_uncores[i]; i++) {
2882 type = msr_uncores[i];
2883 for (j = 0; j < type->num_boxes; j++) {
2884 pmu = &type->pmus[j];
2885 uncore_pmu_register(pmu);
2886 }
2887 }
2888
14371cce
YZ
2889 for (i = 0; pci_uncores[i]; i++) {
2890 type = pci_uncores[i];
2891 for (j = 0; j < type->num_boxes; j++) {
2892 pmu = &type->pmus[j];
2893 uncore_pmu_register(pmu);
2894 }
2895 }
2896
087bfbb0
YZ
2897 return 0;
2898}
2899
2900static int __init intel_uncore_init(void)
2901{
2902 int ret;
2903
2904 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2905 return -ENODEV;
2906
14371cce 2907 ret = uncore_pci_init();
087bfbb0
YZ
2908 if (ret)
2909 goto fail;
14371cce
YZ
2910 ret = uncore_cpu_init();
2911 if (ret) {
2912 uncore_pci_exit();
2913 goto fail;
2914 }
087bfbb0
YZ
2915
2916 uncore_pmus_register();
2917 return 0;
2918fail:
2919 return ret;
2920}
2921device_initcall(intel_uncore_init);