perf/x86/intel: Fix SNB-EP CBO and PCU uncore PMU filter management
[linux-2.6-block.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.c
CommitLineData
087bfbb0
YZ
1#include "perf_event_intel_uncore.h"
2
3static struct intel_uncore_type *empty_uncore[] = { NULL, };
4static struct intel_uncore_type **msr_uncores = empty_uncore;
14371cce
YZ
5static struct intel_uncore_type **pci_uncores = empty_uncore;
6/* pci bus to socket mapping */
7static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
8
9static DEFINE_RAW_SPINLOCK(uncore_box_lock);
087bfbb0
YZ
10
11/* mask of cpus that collect uncore events */
12static cpumask_t uncore_cpu_mask;
13
14/* constraint for the fixed counter */
15static struct event_constraint constraint_fixed =
16 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
6a67943a
YZ
17static struct event_constraint constraint_empty =
18 EVENT_CONSTRAINT(0, 0, 0);
087bfbb0 19
46bdd905
YZ
20#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
21 ((1ULL << (n)) - 1)))
22
fcde10e9 23DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
c1ece48c 24DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
fcde10e9
YZ
25DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
26DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
6a67943a 27DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
fcde10e9
YZ
28DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
29DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
30DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
7c94ee2e
YZ
31DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
32DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
33DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
34DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
35DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
6a67943a
YZ
36DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
37DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
38DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
39DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
4f3f713f
YZ
40DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
41DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
42DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
43DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
7c94ee2e 44
254298c7
YZ
45static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
46{
47 u64 count;
48
49 rdmsrl(event->hw.event_base, count);
50
51 return count;
52}
53
54/*
55 * generic get constraint function for shared match/mask registers.
56 */
57static struct event_constraint *
58uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
59{
60 struct intel_uncore_extra_reg *er;
61 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
62 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
63 unsigned long flags;
64 bool ok = false;
65
66 /*
67 * reg->alloc can be set due to existing state, so for fake box we
68 * need to ignore this, otherwise we might fail to allocate proper
69 * fake state for this extra reg constraint.
70 */
71 if (reg1->idx == EXTRA_REG_NONE ||
72 (!uncore_box_is_fake(box) && reg1->alloc))
73 return NULL;
74
75 er = &box->shared_regs[reg1->idx];
76 raw_spin_lock_irqsave(&er->lock, flags);
77 if (!atomic_read(&er->ref) ||
78 (er->config1 == reg1->config && er->config2 == reg2->config)) {
79 atomic_inc(&er->ref);
80 er->config1 = reg1->config;
81 er->config2 = reg2->config;
82 ok = true;
83 }
84 raw_spin_unlock_irqrestore(&er->lock, flags);
85
86 if (ok) {
87 if (!uncore_box_is_fake(box))
88 reg1->alloc = 1;
89 return NULL;
90 }
91
92 return &constraint_empty;
93}
94
95static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
96{
97 struct intel_uncore_extra_reg *er;
98 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
99
100 /*
101 * Only put constraint if extra reg was actually allocated. Also
102 * takes care of event which do not use an extra shared reg.
103 *
104 * Also, if this is a fake box we shouldn't touch any event state
105 * (reg->alloc) and we don't care about leaving inconsistent box
106 * state either since it will be thrown out.
107 */
108 if (uncore_box_is_fake(box) || !reg1->alloc)
109 return;
110
111 er = &box->shared_regs[reg1->idx];
112 atomic_dec(&er->ref);
113 reg1->alloc = 0;
114}
115
46bdd905
YZ
116static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
117{
118 struct intel_uncore_extra_reg *er;
119 unsigned long flags;
120 u64 config;
121
122 er = &box->shared_regs[idx];
123
124 raw_spin_lock_irqsave(&er->lock, flags);
125 config = er->config;
126 raw_spin_unlock_irqrestore(&er->lock, flags);
127
128 return config;
129}
130
7c94ee2e 131/* Sandy Bridge-EP uncore support */
6a67943a
YZ
132static struct intel_uncore_type snbep_uncore_cbox;
133static struct intel_uncore_type snbep_uncore_pcu;
134
7c94ee2e
YZ
135static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
136{
137 struct pci_dev *pdev = box->pci_dev;
138 int box_ctl = uncore_pci_box_ctl(box);
032c3851 139 u32 config = 0;
7c94ee2e 140
032c3851
YZ
141 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
142 config |= SNBEP_PMON_BOX_CTL_FRZ;
143 pci_write_config_dword(pdev, box_ctl, config);
144 }
7c94ee2e
YZ
145}
146
147static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
148{
149 struct pci_dev *pdev = box->pci_dev;
150 int box_ctl = uncore_pci_box_ctl(box);
032c3851 151 u32 config = 0;
7c94ee2e 152
032c3851
YZ
153 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
154 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
155 pci_write_config_dword(pdev, box_ctl, config);
156 }
7c94ee2e
YZ
157}
158
254298c7 159static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
160{
161 struct pci_dev *pdev = box->pci_dev;
162 struct hw_perf_event *hwc = &event->hw;
163
254298c7 164 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
7c94ee2e
YZ
165}
166
254298c7 167static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
168{
169 struct pci_dev *pdev = box->pci_dev;
170 struct hw_perf_event *hwc = &event->hw;
171
172 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
173}
174
254298c7 175static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
176{
177 struct pci_dev *pdev = box->pci_dev;
178 struct hw_perf_event *hwc = &event->hw;
032c3851 179 u64 count = 0;
7c94ee2e
YZ
180
181 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
182 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
254298c7 183
7c94ee2e
YZ
184 return count;
185}
186
187static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
188{
189 struct pci_dev *pdev = box->pci_dev;
254298c7
YZ
190
191 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
7c94ee2e
YZ
192}
193
194static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
195{
196 u64 config;
197 unsigned msr;
198
199 msr = uncore_msr_box_ctl(box);
200 if (msr) {
201 rdmsrl(msr, config);
202 config |= SNBEP_PMON_BOX_CTL_FRZ;
203 wrmsrl(msr, config);
7c94ee2e
YZ
204 }
205}
206
207static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
208{
209 u64 config;
210 unsigned msr;
211
212 msr = uncore_msr_box_ctl(box);
213 if (msr) {
214 rdmsrl(msr, config);
215 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
216 wrmsrl(msr, config);
7c94ee2e
YZ
217 }
218}
219
254298c7 220static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
7c94ee2e
YZ
221{
222 struct hw_perf_event *hwc = &event->hw;
6a67943a
YZ
223 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
224
225 if (reg1->idx != EXTRA_REG_NONE)
46bdd905 226 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
7c94ee2e
YZ
227
228 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
229}
230
231static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
232 struct perf_event *event)
233{
234 struct hw_perf_event *hwc = &event->hw;
235
236 wrmsrl(hwc->config_base, hwc->config);
237}
238
7c94ee2e
YZ
239static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
240{
241 unsigned msr = uncore_msr_box_ctl(box);
254298c7 242
7c94ee2e
YZ
243 if (msr)
244 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
245}
246
247static struct attribute *snbep_uncore_formats_attr[] = {
248 &format_attr_event.attr,
249 &format_attr_umask.attr,
250 &format_attr_edge.attr,
251 &format_attr_inv.attr,
252 &format_attr_thresh8.attr,
253 NULL,
254};
255
256static struct attribute *snbep_uncore_ubox_formats_attr[] = {
257 &format_attr_event.attr,
258 &format_attr_umask.attr,
259 &format_attr_edge.attr,
260 &format_attr_inv.attr,
261 &format_attr_thresh5.attr,
262 NULL,
263};
264
6a67943a
YZ
265static struct attribute *snbep_uncore_cbox_formats_attr[] = {
266 &format_attr_event.attr,
267 &format_attr_umask.attr,
268 &format_attr_edge.attr,
269 &format_attr_tid_en.attr,
270 &format_attr_inv.attr,
271 &format_attr_thresh8.attr,
272 &format_attr_filter_tid.attr,
273 &format_attr_filter_nid.attr,
274 &format_attr_filter_state.attr,
275 &format_attr_filter_opc.attr,
276 NULL,
277};
278
7c94ee2e
YZ
279static struct attribute *snbep_uncore_pcu_formats_attr[] = {
280 &format_attr_event.attr,
281 &format_attr_occ_sel.attr,
282 &format_attr_edge.attr,
283 &format_attr_inv.attr,
284 &format_attr_thresh5.attr,
285 &format_attr_occ_invert.attr,
286 &format_attr_occ_edge.attr,
4f3f713f
YZ
287 &format_attr_filter_band0.attr,
288 &format_attr_filter_band1.attr,
289 &format_attr_filter_band2.attr,
290 &format_attr_filter_band3.attr,
7c94ee2e
YZ
291 NULL,
292};
293
c1ece48c
YZ
294static struct attribute *snbep_uncore_qpi_formats_attr[] = {
295 &format_attr_event_ext.attr,
296 &format_attr_umask.attr,
297 &format_attr_edge.attr,
298 &format_attr_inv.attr,
299 &format_attr_thresh8.attr,
300 NULL,
301};
302
7c94ee2e 303static struct uncore_event_desc snbep_uncore_imc_events[] = {
eca26c99 304 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
2992c542
PZ
305 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
306 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
7c94ee2e
YZ
307 { /* end: all zeroes */ },
308};
309
310static struct uncore_event_desc snbep_uncore_qpi_events[] = {
2992c542
PZ
311 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
312 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
313 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"),
314 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"),
7c94ee2e
YZ
315 { /* end: all zeroes */ },
316};
317
318static struct attribute_group snbep_uncore_format_group = {
319 .name = "format",
320 .attrs = snbep_uncore_formats_attr,
321};
322
323static struct attribute_group snbep_uncore_ubox_format_group = {
324 .name = "format",
325 .attrs = snbep_uncore_ubox_formats_attr,
326};
327
6a67943a
YZ
328static struct attribute_group snbep_uncore_cbox_format_group = {
329 .name = "format",
330 .attrs = snbep_uncore_cbox_formats_attr,
331};
332
7c94ee2e
YZ
333static struct attribute_group snbep_uncore_pcu_format_group = {
334 .name = "format",
335 .attrs = snbep_uncore_pcu_formats_attr,
336};
337
c1ece48c
YZ
338static struct attribute_group snbep_uncore_qpi_format_group = {
339 .name = "format",
340 .attrs = snbep_uncore_qpi_formats_attr,
341};
342
46bdd905
YZ
343#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
344 .init_box = snbep_uncore_msr_init_box, \
345 .disable_box = snbep_uncore_msr_disable_box, \
346 .enable_box = snbep_uncore_msr_enable_box, \
347 .disable_event = snbep_uncore_msr_disable_event, \
348 .enable_event = snbep_uncore_msr_enable_event, \
349 .read_counter = uncore_msr_read_counter
350
7c94ee2e 351static struct intel_uncore_ops snbep_uncore_msr_ops = {
46bdd905 352 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
7c94ee2e
YZ
353};
354
355static struct intel_uncore_ops snbep_uncore_pci_ops = {
356 .init_box = snbep_uncore_pci_init_box,
357 .disable_box = snbep_uncore_pci_disable_box,
358 .enable_box = snbep_uncore_pci_enable_box,
359 .disable_event = snbep_uncore_pci_disable_event,
360 .enable_event = snbep_uncore_pci_enable_event,
361 .read_counter = snbep_uncore_pci_read_counter,
362};
363
364static struct event_constraint snbep_uncore_cbox_constraints[] = {
365 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
366 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
367 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
368 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
369 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
370 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
371 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
372 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
373 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
374 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
375 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
376 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
3b19e4c9 377 EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
7c94ee2e
YZ
378 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
379 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
380 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
381 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
382 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
383 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
384 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
385 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
386 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
387 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
388 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
389 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
390 EVENT_CONSTRAINT_END
391};
392
393static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
394 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
395 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
396 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
397 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
398 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
399 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
400 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
401 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
402 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
403 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
404 EVENT_CONSTRAINT_END
405};
406
407static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
408 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
409 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
410 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
411 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
412 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
413 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
414 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
415 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
416 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
417 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
418 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
419 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
420 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
421 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
422 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
423 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
424 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
425 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
426 EVENT_CONSTRAINT_END
427};
428
429static struct intel_uncore_type snbep_uncore_ubox = {
430 .name = "ubox",
431 .num_counters = 2,
432 .num_boxes = 1,
433 .perf_ctr_bits = 44,
434 .fixed_ctr_bits = 48,
435 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
436 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
437 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
438 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
439 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
440 .ops = &snbep_uncore_msr_ops,
441 .format_group = &snbep_uncore_ubox_format_group,
442};
443
46bdd905
YZ
444static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
445 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
446 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
447 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
448 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
449 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
450 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
451 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
452 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
453 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
454 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
455 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
456 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
457 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
458 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
459 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
460 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
461 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
462 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
463 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
464 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
465 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
466 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
467 EVENT_EXTRA_END
468};
469
470static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
471{
472 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
473 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
474 int i;
475
476 if (uncore_box_is_fake(box))
477 return;
478
479 for (i = 0; i < 5; i++) {
480 if (reg1->alloc & (0x1 << i))
481 atomic_sub(1 << (i * 6), &er->ref);
482 }
483 reg1->alloc = 0;
484}
485
486static struct event_constraint *
487__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
488 u64 (*cbox_filter_mask)(int fields))
489{
490 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
491 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
492 int i, alloc = 0;
493 unsigned long flags;
494 u64 mask;
495
496 if (reg1->idx == EXTRA_REG_NONE)
497 return NULL;
498
499 raw_spin_lock_irqsave(&er->lock, flags);
500 for (i = 0; i < 5; i++) {
501 if (!(reg1->idx & (0x1 << i)))
502 continue;
503 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
504 continue;
505
506 mask = cbox_filter_mask(0x1 << i);
507 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
508 !((reg1->config ^ er->config) & mask)) {
509 atomic_add(1 << (i * 6), &er->ref);
510 er->config &= ~mask;
511 er->config |= reg1->config & mask;
512 alloc |= (0x1 << i);
513 } else {
514 break;
515 }
516 }
517 raw_spin_unlock_irqrestore(&er->lock, flags);
518 if (i < 5)
519 goto fail;
520
521 if (!uncore_box_is_fake(box))
522 reg1->alloc |= alloc;
523
524 return 0;
525fail:
526 for (; i >= 0; i--) {
527 if (alloc & (0x1 << i))
528 atomic_sub(1 << (i * 6), &er->ref);
529 }
530 return &constraint_empty;
531}
532
533static u64 snbep_cbox_filter_mask(int fields)
534{
535 u64 mask = 0;
536
537 if (fields & 0x1)
538 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
539 if (fields & 0x2)
540 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
541 if (fields & 0x4)
542 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
543 if (fields & 0x8)
544 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
545
546 return mask;
547}
548
549static struct event_constraint *
550snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
551{
552 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
553}
554
555static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
556{
557 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
558 struct extra_reg *er;
559 int idx = 0;
560
561 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
562 if (er->event != (event->hw.config & er->config_mask))
563 continue;
564 idx |= er->idx;
565 }
566
567 if (idx) {
568 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
569 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
570 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
571 reg1->idx = idx;
572 }
573 return 0;
574}
575
576static struct intel_uncore_ops snbep_uncore_cbox_ops = {
577 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
578 .hw_config = snbep_cbox_hw_config,
579 .get_constraint = snbep_cbox_get_constraint,
580 .put_constraint = snbep_cbox_put_constraint,
581};
582
7c94ee2e 583static struct intel_uncore_type snbep_uncore_cbox = {
6a67943a
YZ
584 .name = "cbox",
585 .num_counters = 4,
586 .num_boxes = 8,
587 .perf_ctr_bits = 44,
588 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
589 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
590 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
591 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
592 .msr_offset = SNBEP_CBO_MSR_OFFSET,
593 .num_shared_regs = 1,
594 .constraints = snbep_uncore_cbox_constraints,
46bdd905 595 .ops = &snbep_uncore_cbox_ops,
6a67943a 596 .format_group = &snbep_uncore_cbox_format_group,
7c94ee2e
YZ
597};
598
46bdd905
YZ
599static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
600{
601 struct hw_perf_event *hwc = &event->hw;
602 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
603 u64 config = reg1->config;
604
605 if (new_idx > reg1->idx)
606 config <<= 8 * (new_idx - reg1->idx);
607 else
608 config >>= 8 * (reg1->idx - new_idx);
609
610 if (modify) {
611 hwc->config += new_idx - reg1->idx;
612 reg1->config = config;
613 reg1->idx = new_idx;
614 }
615 return config;
616}
617
618static struct event_constraint *
619snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
620{
621 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
622 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
623 unsigned long flags;
624 int idx = reg1->idx;
625 u64 mask, config1 = reg1->config;
626 bool ok = false;
627
628 if (reg1->idx == EXTRA_REG_NONE ||
629 (!uncore_box_is_fake(box) && reg1->alloc))
630 return NULL;
631again:
632 mask = 0xff << (idx * 8);
633 raw_spin_lock_irqsave(&er->lock, flags);
634 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
635 !((config1 ^ er->config) & mask)) {
636 atomic_add(1 << (idx * 8), &er->ref);
637 er->config &= ~mask;
638 er->config |= config1 & mask;
639 ok = true;
640 }
641 raw_spin_unlock_irqrestore(&er->lock, flags);
642
643 if (!ok) {
644 idx = (idx + 1) % 4;
645 if (idx != reg1->idx) {
646 config1 = snbep_pcu_alter_er(event, idx, false);
647 goto again;
648 }
649 return &constraint_empty;
650 }
651
652 if (!uncore_box_is_fake(box)) {
653 if (idx != reg1->idx)
654 snbep_pcu_alter_er(event, idx, true);
655 reg1->alloc = 1;
656 }
657 return NULL;
658}
659
660static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
661{
662 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
663 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
664
665 if (uncore_box_is_fake(box) || !reg1->alloc)
666 return;
667
668 atomic_sub(1 << (reg1->idx * 8), &er->ref);
669 reg1->alloc = 0;
670}
671
672static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
673{
674 struct hw_perf_event *hwc = &event->hw;
675 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
676 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
677
678 if (ev_sel >= 0xb && ev_sel <= 0xe) {
679 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
680 reg1->idx = ev_sel - 0xb;
681 reg1->config = event->attr.config1 & (0xff << reg1->idx);
682 }
683 return 0;
684}
685
686static struct intel_uncore_ops snbep_uncore_pcu_ops = {
687 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
688 .hw_config = snbep_pcu_hw_config,
689 .get_constraint = snbep_pcu_get_constraint,
690 .put_constraint = snbep_pcu_put_constraint,
691};
692
7c94ee2e 693static struct intel_uncore_type snbep_uncore_pcu = {
6a67943a
YZ
694 .name = "pcu",
695 .num_counters = 4,
696 .num_boxes = 1,
697 .perf_ctr_bits = 48,
698 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
699 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
700 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
701 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
702 .num_shared_regs = 1,
46bdd905 703 .ops = &snbep_uncore_pcu_ops,
6a67943a 704 .format_group = &snbep_uncore_pcu_format_group,
7c94ee2e
YZ
705};
706
707static struct intel_uncore_type *snbep_msr_uncores[] = {
708 &snbep_uncore_ubox,
709 &snbep_uncore_cbox,
710 &snbep_uncore_pcu,
711 NULL,
712};
713
714#define SNBEP_UNCORE_PCI_COMMON_INIT() \
715 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
716 .event_ctl = SNBEP_PCI_PMON_CTL0, \
717 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
718 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
719 .ops = &snbep_uncore_pci_ops, \
720 .format_group = &snbep_uncore_format_group
721
722static struct intel_uncore_type snbep_uncore_ha = {
723 .name = "ha",
724 .num_counters = 4,
725 .num_boxes = 1,
726 .perf_ctr_bits = 48,
727 SNBEP_UNCORE_PCI_COMMON_INIT(),
728};
729
730static struct intel_uncore_type snbep_uncore_imc = {
731 .name = "imc",
732 .num_counters = 4,
733 .num_boxes = 4,
734 .perf_ctr_bits = 48,
735 .fixed_ctr_bits = 48,
736 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
737 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
738 .event_descs = snbep_uncore_imc_events,
739 SNBEP_UNCORE_PCI_COMMON_INIT(),
740};
741
742static struct intel_uncore_type snbep_uncore_qpi = {
743 .name = "qpi",
744 .num_counters = 4,
745 .num_boxes = 2,
746 .perf_ctr_bits = 48,
c1ece48c
YZ
747 .perf_ctr = SNBEP_PCI_PMON_CTR0,
748 .event_ctl = SNBEP_PCI_PMON_CTL0,
749 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
750 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
751 .ops = &snbep_uncore_pci_ops,
7c94ee2e 752 .event_descs = snbep_uncore_qpi_events,
c1ece48c 753 .format_group = &snbep_uncore_qpi_format_group,
7c94ee2e
YZ
754};
755
756
757static struct intel_uncore_type snbep_uncore_r2pcie = {
758 .name = "r2pcie",
759 .num_counters = 4,
760 .num_boxes = 1,
761 .perf_ctr_bits = 44,
762 .constraints = snbep_uncore_r2pcie_constraints,
763 SNBEP_UNCORE_PCI_COMMON_INIT(),
764};
765
766static struct intel_uncore_type snbep_uncore_r3qpi = {
767 .name = "r3qpi",
768 .num_counters = 3,
769 .num_boxes = 2,
770 .perf_ctr_bits = 44,
771 .constraints = snbep_uncore_r3qpi_constraints,
772 SNBEP_UNCORE_PCI_COMMON_INIT(),
773};
774
775static struct intel_uncore_type *snbep_pci_uncores[] = {
776 &snbep_uncore_ha,
777 &snbep_uncore_imc,
778 &snbep_uncore_qpi,
779 &snbep_uncore_r2pcie,
780 &snbep_uncore_r3qpi,
781 NULL,
782};
783
784static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
785 { /* Home Agent */
786 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
787 .driver_data = (unsigned long)&snbep_uncore_ha,
788 },
789 { /* MC Channel 0 */
790 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
791 .driver_data = (unsigned long)&snbep_uncore_imc,
792 },
793 { /* MC Channel 1 */
794 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
795 .driver_data = (unsigned long)&snbep_uncore_imc,
796 },
797 { /* MC Channel 2 */
798 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
799 .driver_data = (unsigned long)&snbep_uncore_imc,
800 },
801 { /* MC Channel 3 */
802 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
803 .driver_data = (unsigned long)&snbep_uncore_imc,
804 },
805 { /* QPI Port 0 */
806 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
807 .driver_data = (unsigned long)&snbep_uncore_qpi,
808 },
809 { /* QPI Port 1 */
810 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
811 .driver_data = (unsigned long)&snbep_uncore_qpi,
812 },
813 { /* P2PCIe */
814 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
815 .driver_data = (unsigned long)&snbep_uncore_r2pcie,
816 },
817 { /* R3QPI Link 0 */
818 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
819 .driver_data = (unsigned long)&snbep_uncore_r3qpi,
820 },
821 { /* R3QPI Link 1 */
822 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
823 .driver_data = (unsigned long)&snbep_uncore_r3qpi,
824 },
825 { /* end: all zeroes */ }
826};
827
828static struct pci_driver snbep_uncore_pci_driver = {
829 .name = "snbep_uncore",
830 .id_table = snbep_uncore_pci_ids,
831};
832
833/*
834 * build pci bus to socket mapping
835 */
032c3851 836static int snbep_pci2phy_map_init(void)
7c94ee2e
YZ
837{
838 struct pci_dev *ubox_dev = NULL;
839 int i, bus, nodeid;
032c3851
YZ
840 int err = 0;
841 u32 config = 0;
7c94ee2e
YZ
842
843 while (1) {
844 /* find the UBOX device */
845 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
846 PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX,
847 ubox_dev);
848 if (!ubox_dev)
849 break;
850 bus = ubox_dev->bus->number;
851 /* get the Node ID of the local register */
032c3851
YZ
852 err = pci_read_config_dword(ubox_dev, 0x40, &config);
853 if (err)
854 break;
7c94ee2e
YZ
855 nodeid = config;
856 /* get the Node ID mapping */
032c3851
YZ
857 err = pci_read_config_dword(ubox_dev, 0x54, &config);
858 if (err)
859 break;
7c94ee2e 860 /*
254298c7
YZ
861 * every three bits in the Node ID mapping register maps
862 * to a particular node.
863 */
864 for (i = 0; i < 8; i++) {
865 if (nodeid == ((config >> (3 * i)) & 0x7)) {
866 pcibus_to_physid[bus] = i;
867 break;
868 }
869 }
870 };
032c3851
YZ
871
872 if (ubox_dev)
873 pci_dev_put(ubox_dev);
874
875 return err ? pcibios_err_to_errno(err) : 0;
254298c7
YZ
876}
877/* end of Sandy Bridge-EP uncore support */
878
879/* Sandy Bridge uncore support */
880static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
881{
882 struct hw_perf_event *hwc = &event->hw;
883
884 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
885 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
886 else
887 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
888}
889
890static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
891{
892 wrmsrl(event->hw.config_base, 0);
893}
894
895static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
896{
897 if (box->pmu->pmu_idx == 0) {
898 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
899 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
900 }
901}
902
35534b20
SE
903static struct uncore_event_desc snb_uncore_events[] = {
904 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
905 { /* end: all zeroes */ },
906};
907
254298c7
YZ
908static struct attribute *snb_uncore_formats_attr[] = {
909 &format_attr_event.attr,
910 &format_attr_umask.attr,
911 &format_attr_edge.attr,
912 &format_attr_inv.attr,
913 &format_attr_cmask5.attr,
914 NULL,
915};
916
917static struct attribute_group snb_uncore_format_group = {
918 .name = "format",
919 .attrs = snb_uncore_formats_attr,
920};
921
922static struct intel_uncore_ops snb_uncore_msr_ops = {
923 .init_box = snb_uncore_msr_init_box,
924 .disable_event = snb_uncore_msr_disable_event,
925 .enable_event = snb_uncore_msr_enable_event,
926 .read_counter = uncore_msr_read_counter,
927};
928
929static struct event_constraint snb_uncore_cbox_constraints[] = {
930 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
931 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
932 EVENT_CONSTRAINT_END
933};
934
935static struct intel_uncore_type snb_uncore_cbox = {
936 .name = "cbox",
937 .num_counters = 2,
938 .num_boxes = 4,
939 .perf_ctr_bits = 44,
940 .fixed_ctr_bits = 48,
941 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
942 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
943 .fixed_ctr = SNB_UNC_FIXED_CTR,
944 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
945 .single_fixed = 1,
946 .event_mask = SNB_UNC_RAW_EVENT_MASK,
947 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
948 .constraints = snb_uncore_cbox_constraints,
949 .ops = &snb_uncore_msr_ops,
950 .format_group = &snb_uncore_format_group,
35534b20 951 .event_descs = snb_uncore_events,
254298c7
YZ
952};
953
954static struct intel_uncore_type *snb_msr_uncores[] = {
955 &snb_uncore_cbox,
956 NULL,
957};
958/* end of Sandy Bridge uncore support */
959
960/* Nehalem uncore support */
961static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
962{
963 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
964}
965
966static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
967{
968 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
969}
970
971static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
972{
973 struct hw_perf_event *hwc = &event->hw;
974
975 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
976 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
977 else
978 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
979}
980
981static struct attribute *nhm_uncore_formats_attr[] = {
982 &format_attr_event.attr,
983 &format_attr_umask.attr,
984 &format_attr_edge.attr,
985 &format_attr_inv.attr,
986 &format_attr_cmask8.attr,
987 NULL,
988};
989
990static struct attribute_group nhm_uncore_format_group = {
991 .name = "format",
992 .attrs = nhm_uncore_formats_attr,
993};
994
995static struct uncore_event_desc nhm_uncore_events[] = {
996 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
997 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
998 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
999 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
1000 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
1001 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
1002 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
1003 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
1004 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
1005 { /* end: all zeroes */ },
1006};
1007
1008static struct intel_uncore_ops nhm_uncore_msr_ops = {
1009 .disable_box = nhm_uncore_msr_disable_box,
1010 .enable_box = nhm_uncore_msr_enable_box,
1011 .disable_event = snb_uncore_msr_disable_event,
1012 .enable_event = nhm_uncore_msr_enable_event,
1013 .read_counter = uncore_msr_read_counter,
1014};
1015
1016static struct intel_uncore_type nhm_uncore = {
1017 .name = "",
1018 .num_counters = 8,
1019 .num_boxes = 1,
1020 .perf_ctr_bits = 48,
1021 .fixed_ctr_bits = 48,
1022 .event_ctl = NHM_UNC_PERFEVTSEL0,
1023 .perf_ctr = NHM_UNC_UNCORE_PMC0,
1024 .fixed_ctr = NHM_UNC_FIXED_CTR,
1025 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
1026 .event_mask = NHM_UNC_RAW_EVENT_MASK,
1027 .event_descs = nhm_uncore_events,
1028 .ops = &nhm_uncore_msr_ops,
1029 .format_group = &nhm_uncore_format_group,
1030};
1031
1032static struct intel_uncore_type *nhm_msr_uncores[] = {
1033 &nhm_uncore,
1034 NULL,
1035};
1036/* end of Nehalem uncore support */
1037
1038/* Nehalem-EX uncore support */
254298c7
YZ
1039DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
1040DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
254298c7
YZ
1041DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
1042DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
1043
1044static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
1045{
1046 wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
1047}
1048
1049static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
1050{
1051 unsigned msr = uncore_msr_box_ctl(box);
1052 u64 config;
1053
1054 if (msr) {
1055 rdmsrl(msr, config);
1056 config &= ~((1ULL << uncore_num_counters(box)) - 1);
1057 /* WBox has a fixed counter */
1058 if (uncore_msr_fixed_ctl(box))
1059 config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
1060 wrmsrl(msr, config);
1061 }
1062}
1063
1064static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
1065{
1066 unsigned msr = uncore_msr_box_ctl(box);
1067 u64 config;
1068
1069 if (msr) {
1070 rdmsrl(msr, config);
1071 config |= (1ULL << uncore_num_counters(box)) - 1;
1072 /* WBox has a fixed counter */
1073 if (uncore_msr_fixed_ctl(box))
1074 config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
1075 wrmsrl(msr, config);
1076 }
1077}
1078
1079static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1080{
1081 wrmsrl(event->hw.config_base, 0);
1082}
1083
1084static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1085{
1086 struct hw_perf_event *hwc = &event->hw;
1087
1088 if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
1089 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
1090 else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
1091 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
1092 else
1093 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
1094}
1095
1096#define NHMEX_UNCORE_OPS_COMMON_INIT() \
1097 .init_box = nhmex_uncore_msr_init_box, \
1098 .disable_box = nhmex_uncore_msr_disable_box, \
1099 .enable_box = nhmex_uncore_msr_enable_box, \
1100 .disable_event = nhmex_uncore_msr_disable_event, \
1101 .read_counter = uncore_msr_read_counter
1102
1103static struct intel_uncore_ops nhmex_uncore_ops = {
1104 NHMEX_UNCORE_OPS_COMMON_INIT(),
1105 .enable_event = nhmex_uncore_msr_enable_event,
1106};
1107
1108static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
1109 &format_attr_event.attr,
1110 &format_attr_edge.attr,
1111 NULL,
1112};
1113
1114static struct attribute_group nhmex_uncore_ubox_format_group = {
1115 .name = "format",
1116 .attrs = nhmex_uncore_ubox_formats_attr,
1117};
1118
1119static struct intel_uncore_type nhmex_uncore_ubox = {
1120 .name = "ubox",
1121 .num_counters = 1,
1122 .num_boxes = 1,
1123 .perf_ctr_bits = 48,
1124 .event_ctl = NHMEX_U_MSR_PMON_EV_SEL,
1125 .perf_ctr = NHMEX_U_MSR_PMON_CTR,
1126 .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK,
1127 .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL,
1128 .ops = &nhmex_uncore_ops,
1129 .format_group = &nhmex_uncore_ubox_format_group
1130};
1131
1132static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
1133 &format_attr_event.attr,
1134 &format_attr_umask.attr,
1135 &format_attr_edge.attr,
1136 &format_attr_inv.attr,
1137 &format_attr_thresh8.attr,
1138 NULL,
1139};
1140
1141static struct attribute_group nhmex_uncore_cbox_format_group = {
1142 .name = "format",
1143 .attrs = nhmex_uncore_cbox_formats_attr,
1144};
1145
cb37af77
YZ
1146/* msr offset for each instance of cbox */
1147static unsigned nhmex_cbox_msr_offsets[] = {
1148 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
1149};
1150
254298c7
YZ
1151static struct intel_uncore_type nhmex_uncore_cbox = {
1152 .name = "cbox",
1153 .num_counters = 6,
cb37af77 1154 .num_boxes = 10,
254298c7
YZ
1155 .perf_ctr_bits = 48,
1156 .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0,
1157 .perf_ctr = NHMEX_C0_MSR_PMON_CTR0,
1158 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
1159 .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
cb37af77 1160 .msr_offsets = nhmex_cbox_msr_offsets,
254298c7
YZ
1161 .pair_ctr_ctl = 1,
1162 .ops = &nhmex_uncore_ops,
1163 .format_group = &nhmex_uncore_cbox_format_group
1164};
1165
1166static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
1167 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
1168 { /* end: all zeroes */ },
1169};
1170
1171static struct intel_uncore_type nhmex_uncore_wbox = {
1172 .name = "wbox",
1173 .num_counters = 4,
1174 .num_boxes = 1,
1175 .perf_ctr_bits = 48,
1176 .event_ctl = NHMEX_W_MSR_PMON_CNT0,
1177 .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0,
1178 .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR,
1179 .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL,
1180 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
1181 .box_ctl = NHMEX_W_MSR_GLOBAL_CTL,
1182 .pair_ctr_ctl = 1,
1183 .event_descs = nhmex_uncore_wbox_events,
1184 .ops = &nhmex_uncore_ops,
1185 .format_group = &nhmex_uncore_cbox_format_group
1186};
1187
1188static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1189{
1190 struct hw_perf_event *hwc = &event->hw;
1191 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1192 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1193 int ctr, ev_sel;
1194
1195 ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
1196 NHMEX_B_PMON_CTR_SHIFT;
1197 ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
1198 NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
1199
1200 /* events that do not use the match/mask registers */
1201 if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
1202 (ctr == 2 && ev_sel != 0x4) || ctr == 3)
1203 return 0;
1204
1205 if (box->pmu->pmu_idx == 0)
1206 reg1->reg = NHMEX_B0_MSR_MATCH;
1207 else
1208 reg1->reg = NHMEX_B1_MSR_MATCH;
1209 reg1->idx = 0;
1210 reg1->config = event->attr.config1;
1211 reg2->config = event->attr.config2;
1212 return 0;
1213}
1214
1215static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1216{
1217 struct hw_perf_event *hwc = &event->hw;
1218 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1219 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1220
1221 if (reg1->idx != EXTRA_REG_NONE) {
1222 wrmsrl(reg1->reg, reg1->config);
1223 wrmsrl(reg1->reg + 1, reg2->config);
1224 }
1225 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
1226 (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
1227}
1228
1229/*
1230 * The Bbox has 4 counters, but each counter monitors different events.
1231 * Use bits 6-7 in the event config to select counter.
1232 */
1233static struct event_constraint nhmex_uncore_bbox_constraints[] = {
1234 EVENT_CONSTRAINT(0 , 1, 0xc0),
1235 EVENT_CONSTRAINT(0x40, 2, 0xc0),
1236 EVENT_CONSTRAINT(0x80, 4, 0xc0),
1237 EVENT_CONSTRAINT(0xc0, 8, 0xc0),
1238 EVENT_CONSTRAINT_END,
1239};
1240
1241static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
1242 &format_attr_event5.attr,
1243 &format_attr_counter.attr,
1244 &format_attr_match.attr,
1245 &format_attr_mask.attr,
1246 NULL,
1247};
1248
1249static struct attribute_group nhmex_uncore_bbox_format_group = {
1250 .name = "format",
1251 .attrs = nhmex_uncore_bbox_formats_attr,
1252};
1253
1254static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
1255 NHMEX_UNCORE_OPS_COMMON_INIT(),
1256 .enable_event = nhmex_bbox_msr_enable_event,
1257 .hw_config = nhmex_bbox_hw_config,
1258 .get_constraint = uncore_get_constraint,
1259 .put_constraint = uncore_put_constraint,
1260};
1261
1262static struct intel_uncore_type nhmex_uncore_bbox = {
1263 .name = "bbox",
1264 .num_counters = 4,
1265 .num_boxes = 2,
1266 .perf_ctr_bits = 48,
1267 .event_ctl = NHMEX_B0_MSR_PMON_CTL0,
1268 .perf_ctr = NHMEX_B0_MSR_PMON_CTR0,
1269 .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK,
1270 .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
1271 .msr_offset = NHMEX_B_MSR_OFFSET,
1272 .pair_ctr_ctl = 1,
1273 .num_shared_regs = 1,
1274 .constraints = nhmex_uncore_bbox_constraints,
1275 .ops = &nhmex_uncore_bbox_ops,
1276 .format_group = &nhmex_uncore_bbox_format_group
1277};
1278
1279static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1280{
ebb6cc03
YZ
1281 struct hw_perf_event *hwc = &event->hw;
1282 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1283 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
254298c7 1284
ebb6cc03
YZ
1285 /* only TO_R_PROG_EV event uses the match/mask register */
1286 if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
1287 NHMEX_S_EVENT_TO_R_PROG_EV)
1288 return 0;
254298c7
YZ
1289
1290 if (box->pmu->pmu_idx == 0)
1291 reg1->reg = NHMEX_S0_MSR_MM_CFG;
1292 else
1293 reg1->reg = NHMEX_S1_MSR_MM_CFG;
254298c7 1294 reg1->idx = 0;
ebb6cc03
YZ
1295 reg1->config = event->attr.config1;
1296 reg2->config = event->attr.config2;
254298c7
YZ
1297 return 0;
1298}
1299
1300static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1301{
1302 struct hw_perf_event *hwc = &event->hw;
1303 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1304 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1305
ebb6cc03
YZ
1306 if (reg1->idx != EXTRA_REG_NONE) {
1307 wrmsrl(reg1->reg, 0);
254298c7
YZ
1308 wrmsrl(reg1->reg + 1, reg1->config);
1309 wrmsrl(reg1->reg + 2, reg2->config);
1310 wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
1311 }
1312 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
1313}
1314
1315static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
1316 &format_attr_event.attr,
1317 &format_attr_umask.attr,
1318 &format_attr_edge.attr,
1319 &format_attr_inv.attr,
1320 &format_attr_thresh8.attr,
254298c7
YZ
1321 &format_attr_match.attr,
1322 &format_attr_mask.attr,
1323 NULL,
1324};
1325
1326static struct attribute_group nhmex_uncore_sbox_format_group = {
1327 .name = "format",
1328 .attrs = nhmex_uncore_sbox_formats_attr,
1329};
1330
1331static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
1332 NHMEX_UNCORE_OPS_COMMON_INIT(),
1333 .enable_event = nhmex_sbox_msr_enable_event,
1334 .hw_config = nhmex_sbox_hw_config,
1335 .get_constraint = uncore_get_constraint,
1336 .put_constraint = uncore_put_constraint,
1337};
1338
1339static struct intel_uncore_type nhmex_uncore_sbox = {
1340 .name = "sbox",
1341 .num_counters = 4,
1342 .num_boxes = 2,
1343 .perf_ctr_bits = 48,
1344 .event_ctl = NHMEX_S0_MSR_PMON_CTL0,
1345 .perf_ctr = NHMEX_S0_MSR_PMON_CTR0,
1346 .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
1347 .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
1348 .msr_offset = NHMEX_S_MSR_OFFSET,
1349 .pair_ctr_ctl = 1,
1350 .num_shared_regs = 1,
1351 .ops = &nhmex_uncore_sbox_ops,
1352 .format_group = &nhmex_uncore_sbox_format_group
1353};
1354
1355enum {
1356 EXTRA_REG_NHMEX_M_FILTER,
1357 EXTRA_REG_NHMEX_M_DSP,
1358 EXTRA_REG_NHMEX_M_ISS,
1359 EXTRA_REG_NHMEX_M_MAP,
1360 EXTRA_REG_NHMEX_M_MSC_THR,
1361 EXTRA_REG_NHMEX_M_PGT,
1362 EXTRA_REG_NHMEX_M_PLD,
1363 EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
1364};
1365
1366static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
1367 MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
1368 MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
1369 MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
1370 MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
1371 /* event 0xa uses two extra registers */
1372 MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
1373 MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
1374 MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
1375 /* events 0xd ~ 0x10 use the same extra register */
1376 MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
1377 MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
1378 MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
1379 MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
1380 MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
1381 MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
1382 MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
1383 MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
1384 MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
1385 EVENT_EXTRA_END
1386};
1387
cb37af77 1388/* Nehalem-EX or Westmere-EX ? */
46bdd905 1389static bool uncore_nhmex;
cb37af77 1390
254298c7
YZ
1391static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
1392{
1393 struct intel_uncore_extra_reg *er;
1394 unsigned long flags;
1395 bool ret = false;
1396 u64 mask;
1397
1398 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
1399 er = &box->shared_regs[idx];
1400 raw_spin_lock_irqsave(&er->lock, flags);
1401 if (!atomic_read(&er->ref) || er->config == config) {
1402 atomic_inc(&er->ref);
1403 er->config = config;
1404 ret = true;
1405 }
1406 raw_spin_unlock_irqrestore(&er->lock, flags);
1407
1408 return ret;
1409 }
1410 /*
1411 * The ZDP_CTL_FVC MSR has 4 fields which are used to control
1412 * events 0xd ~ 0x10. Besides these 4 fields, there are additional
1413 * fields which are shared.
1414 */
1415 idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1416 if (WARN_ON_ONCE(idx >= 4))
1417 return false;
1418
1419 /* mask of the shared fields */
cb37af77
YZ
1420 if (uncore_nhmex)
1421 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
1422 else
1423 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
254298c7
YZ
1424 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
1425
1426 raw_spin_lock_irqsave(&er->lock, flags);
1427 /* add mask of the non-shared field if it's in use */
cb37af77
YZ
1428 if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
1429 if (uncore_nhmex)
1430 mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1431 else
1432 mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1433 }
254298c7
YZ
1434
1435 if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
1436 atomic_add(1 << (idx * 8), &er->ref);
cb37af77
YZ
1437 if (uncore_nhmex)
1438 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
1439 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1440 else
1441 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
1442 WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
254298c7
YZ
1443 er->config &= ~mask;
1444 er->config |= (config & mask);
1445 ret = true;
1446 }
1447 raw_spin_unlock_irqrestore(&er->lock, flags);
1448
1449 return ret;
1450}
1451
1452static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
1453{
1454 struct intel_uncore_extra_reg *er;
1455
1456 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
1457 er = &box->shared_regs[idx];
1458 atomic_dec(&er->ref);
1459 return;
1460 }
1461
1462 idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1463 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
1464 atomic_sub(1 << (idx * 8), &er->ref);
1465}
1466
46bdd905 1467static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
254298c7
YZ
1468{
1469 struct hw_perf_event *hwc = &event->hw;
1470 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1471 int idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
1472 u64 config = reg1->config;
1473
1474 /* get the non-shared control bits and shift them */
1475 idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
cb37af77
YZ
1476 if (uncore_nhmex)
1477 config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
1478 else
1479 config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
254298c7
YZ
1480 if (new_idx > orig_idx) {
1481 idx = new_idx - orig_idx;
1482 config <<= 3 * idx;
1483 } else {
1484 idx = orig_idx - new_idx;
1485 config >>= 3 * idx;
1486 }
1487
1488 /* add the shared control bits back */
cb37af77
YZ
1489 if (uncore_nhmex)
1490 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
1491 else
1492 config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
254298c7
YZ
1493 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
1494 if (modify) {
1495 /* adjust the main event selector */
1496 if (new_idx > orig_idx)
1497 hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
1498 else
1499 hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
1500 reg1->config = config;
1501 reg1->idx = ~0xff | new_idx;
1502 }
1503 return config;
1504}
1505
1506static struct event_constraint *
1507nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1508{
1509 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1510 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1511 int i, idx[2], alloc = 0;
1512 u64 config1 = reg1->config;
1513
1514 idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
1515 idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
1516again:
1517 for (i = 0; i < 2; i++) {
1518 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
1519 idx[i] = 0xff;
1520
1521 if (idx[i] == 0xff)
1522 continue;
1523
1524 if (!nhmex_mbox_get_shared_reg(box, idx[i],
1525 __BITS_VALUE(config1, i, 32)))
1526 goto fail;
1527 alloc |= (0x1 << i);
1528 }
1529
1530 /* for the match/mask registers */
ebb6cc03
YZ
1531 if (reg2->idx != EXTRA_REG_NONE &&
1532 (uncore_box_is_fake(box) || !reg2->alloc) &&
254298c7
YZ
1533 !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
1534 goto fail;
1535
1536 /*
1537 * If it's a fake box -- as per validate_{group,event}() we
1538 * shouldn't touch event state and we can avoid doing so
1539 * since both will only call get_event_constraints() once
1540 * on each event, this avoids the need for reg->alloc.
1541 */
1542 if (!uncore_box_is_fake(box)) {
1543 if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
1544 nhmex_mbox_alter_er(event, idx[0], true);
1545 reg1->alloc |= alloc;
ebb6cc03
YZ
1546 if (reg2->idx != EXTRA_REG_NONE)
1547 reg2->alloc = 1;
254298c7
YZ
1548 }
1549 return NULL;
1550fail:
1551 if (idx[0] != 0xff && !(alloc & 0x1) &&
1552 idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
1553 /*
1554 * events 0xd ~ 0x10 are functional identical, but are
1555 * controlled by different fields in the ZDP_CTL_FVC
1556 * register. If we failed to take one field, try the
1557 * rest 3 choices.
7c94ee2e 1558 */
254298c7
YZ
1559 BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
1560 idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1561 idx[0] = (idx[0] + 1) % 4;
1562 idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
1563 if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
1564 config1 = nhmex_mbox_alter_er(event, idx[0], false);
1565 goto again;
7c94ee2e 1566 }
254298c7 1567 }
7c94ee2e 1568
254298c7
YZ
1569 if (alloc & 0x1)
1570 nhmex_mbox_put_shared_reg(box, idx[0]);
1571 if (alloc & 0x2)
1572 nhmex_mbox_put_shared_reg(box, idx[1]);
1573 return &constraint_empty;
1574}
fcde10e9 1575
254298c7 1576static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 1577{
254298c7
YZ
1578 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1579 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
fcde10e9 1580
254298c7
YZ
1581 if (uncore_box_is_fake(box))
1582 return;
1583
1584 if (reg1->alloc & 0x1)
1585 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
1586 if (reg1->alloc & 0x2)
1587 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
1588 reg1->alloc = 0;
1589
1590 if (reg2->alloc) {
1591 nhmex_mbox_put_shared_reg(box, reg2->idx);
1592 reg2->alloc = 0;
1593 }
fcde10e9
YZ
1594}
1595
254298c7 1596static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
fcde10e9 1597{
254298c7
YZ
1598 if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
1599 return er->idx;
1600 return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
fcde10e9
YZ
1601}
1602
254298c7 1603static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 1604{
254298c7
YZ
1605 struct intel_uncore_type *type = box->pmu->type;
1606 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1607 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1608 struct extra_reg *er;
1609 unsigned msr;
1610 int reg_idx = 0;
254298c7
YZ
1611 /*
1612 * The mbox events may require 2 extra MSRs at the most. But only
1613 * the lower 32 bits in these MSRs are significant, so we can use
1614 * config1 to pass two MSRs' config.
1615 */
1616 for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
1617 if (er->event != (event->hw.config & er->config_mask))
1618 continue;
1619 if (event->attr.config1 & ~er->valid_mask)
1620 return -EINVAL;
254298c7
YZ
1621
1622 msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
1623 if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
1624 return -EINVAL;
1625
1626 /* always use the 32~63 bits to pass the PLD config */
1627 if (er->idx == EXTRA_REG_NHMEX_M_PLD)
1628 reg_idx = 1;
ebb6cc03
YZ
1629 else if (WARN_ON_ONCE(reg_idx > 0))
1630 return -EINVAL;
254298c7
YZ
1631
1632 reg1->idx &= ~(0xff << (reg_idx * 8));
1633 reg1->reg &= ~(0xffff << (reg_idx * 16));
1634 reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
1635 reg1->reg |= msr << (reg_idx * 16);
1636 reg1->config = event->attr.config1;
1637 reg_idx++;
1638 }
ebb6cc03
YZ
1639 /*
1640 * The mbox only provides ability to perform address matching
1641 * for the PLD events.
1642 */
1643 if (reg_idx == 2) {
1644 reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
1645 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
1646 reg2->config = event->attr.config2;
1647 else
1648 reg2->config = ~0ULL;
1649 if (box->pmu->pmu_idx == 0)
1650 reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
1651 else
1652 reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
1653 }
254298c7 1654 return 0;
fcde10e9
YZ
1655}
1656
254298c7 1657static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
fcde10e9 1658{
254298c7
YZ
1659 struct intel_uncore_extra_reg *er;
1660 unsigned long flags;
1661 u64 config;
1662
1663 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
1664 return box->shared_regs[idx].config;
1665
1666 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
1667 raw_spin_lock_irqsave(&er->lock, flags);
1668 config = er->config;
1669 raw_spin_unlock_irqrestore(&er->lock, flags);
1670 return config;
1671}
1672
1673static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1674{
1675 struct hw_perf_event *hwc = &event->hw;
1676 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1677 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1678 int idx;
1679
1680 idx = __BITS_VALUE(reg1->idx, 0, 8);
1681 if (idx != 0xff)
1682 wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
1683 nhmex_mbox_shared_reg_config(box, idx));
1684 idx = __BITS_VALUE(reg1->idx, 1, 8);
1685 if (idx != 0xff)
1686 wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
1687 nhmex_mbox_shared_reg_config(box, idx));
1688
ebb6cc03
YZ
1689 if (reg2->idx != EXTRA_REG_NONE) {
1690 wrmsrl(reg2->reg, 0);
1691 if (reg2->config != ~0ULL) {
1692 wrmsrl(reg2->reg + 1,
1693 reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
1694 wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
1695 (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
1696 wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
1697 }
fcde10e9 1698 }
254298c7
YZ
1699
1700 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
fcde10e9
YZ
1701}
1702
ebb6cc03
YZ
1703DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
1704DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5");
1705DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6");
1706DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7");
1707DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13");
1708DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21");
1709DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63");
1710DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33");
1711DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61");
1712DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31");
1713DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31");
1714DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31");
1715DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
1716DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
1717DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31");
1718DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63");
254298c7
YZ
1719
1720static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
1721 &format_attr_count_mode.attr,
1722 &format_attr_storage_mode.attr,
1723 &format_attr_wrap_mode.attr,
1724 &format_attr_flag_mode.attr,
1725 &format_attr_inc_sel.attr,
1726 &format_attr_set_flag_sel.attr,
ebb6cc03 1727 &format_attr_filter_cfg_en.attr,
254298c7
YZ
1728 &format_attr_filter_match.attr,
1729 &format_attr_filter_mask.attr,
1730 &format_attr_dsp.attr,
1731 &format_attr_thr.attr,
1732 &format_attr_fvc.attr,
1733 &format_attr_pgt.attr,
1734 &format_attr_map.attr,
1735 &format_attr_iss.attr,
1736 &format_attr_pld.attr,
fcde10e9
YZ
1737 NULL,
1738};
1739
254298c7
YZ
1740static struct attribute_group nhmex_uncore_mbox_format_group = {
1741 .name = "format",
1742 .attrs = nhmex_uncore_mbox_formats_attr,
fcde10e9
YZ
1743};
1744
254298c7
YZ
1745static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
1746 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
1747 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
1748 { /* end: all zeroes */ },
fcde10e9
YZ
1749};
1750
cb37af77
YZ
1751static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
1752 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
1753 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
1754 { /* end: all zeroes */ },
1755};
1756
254298c7
YZ
1757static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
1758 NHMEX_UNCORE_OPS_COMMON_INIT(),
1759 .enable_event = nhmex_mbox_msr_enable_event,
1760 .hw_config = nhmex_mbox_hw_config,
1761 .get_constraint = nhmex_mbox_get_constraint,
1762 .put_constraint = nhmex_mbox_put_constraint,
fcde10e9
YZ
1763};
1764
254298c7
YZ
1765static struct intel_uncore_type nhmex_uncore_mbox = {
1766 .name = "mbox",
1767 .num_counters = 6,
1768 .num_boxes = 2,
1769 .perf_ctr_bits = 48,
1770 .event_ctl = NHMEX_M0_MSR_PMU_CTL0,
1771 .perf_ctr = NHMEX_M0_MSR_PMU_CNT0,
1772 .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK,
1773 .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL,
1774 .msr_offset = NHMEX_M_MSR_OFFSET,
1775 .pair_ctr_ctl = 1,
1776 .num_shared_regs = 8,
1777 .event_descs = nhmex_uncore_mbox_events,
1778 .ops = &nhmex_uncore_mbox_ops,
1779 .format_group = &nhmex_uncore_mbox_format_group,
fcde10e9
YZ
1780};
1781
46bdd905 1782static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
254298c7
YZ
1783{
1784 struct hw_perf_event *hwc = &event->hw;
1785 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
fcde10e9 1786
ebb6cc03 1787 /* adjust the main event selector and extra register index */
254298c7
YZ
1788 if (reg1->idx % 2) {
1789 reg1->idx--;
1790 hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1791 } else {
1792 reg1->idx++;
1793 hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1794 }
1795
ebb6cc03 1796 /* adjust extra register config */
254298c7 1797 switch (reg1->idx % 6) {
254298c7 1798 case 2:
ebb6cc03 1799 /* shift the 8~15 bits to the 0~7 bits */
254298c7
YZ
1800 reg1->config >>= 8;
1801 break;
1802 case 3:
ebb6cc03 1803 /* shift the 0~7 bits to the 8~15 bits */
254298c7
YZ
1804 reg1->config <<= 8;
1805 break;
254298c7
YZ
1806 };
1807}
1808
1809/*
1810 * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
1811 * An event set consists of 6 events, the 3rd and 4th events in
1812 * an event set use the same extra register. So an event set uses
1813 * 5 extra registers.
1814 */
1815static struct event_constraint *
1816nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 1817{
254298c7
YZ
1818 struct hw_perf_event *hwc = &event->hw;
1819 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1820 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1821 struct intel_uncore_extra_reg *er;
1822 unsigned long flags;
1823 int idx, er_idx;
1824 u64 config1;
1825 bool ok = false;
1826
1827 if (!uncore_box_is_fake(box) && reg1->alloc)
1828 return NULL;
1829
1830 idx = reg1->idx % 6;
1831 config1 = reg1->config;
1832again:
1833 er_idx = idx;
1834 /* the 3rd and 4th events use the same extra register */
1835 if (er_idx > 2)
1836 er_idx--;
1837 er_idx += (reg1->idx / 6) * 5;
1838
1839 er = &box->shared_regs[er_idx];
1840 raw_spin_lock_irqsave(&er->lock, flags);
1841 if (idx < 2) {
1842 if (!atomic_read(&er->ref) || er->config == reg1->config) {
1843 atomic_inc(&er->ref);
1844 er->config = reg1->config;
1845 ok = true;
1846 }
1847 } else if (idx == 2 || idx == 3) {
1848 /*
1849 * these two events use different fields in a extra register,
1850 * the 0~7 bits and the 8~15 bits respectively.
1851 */
1852 u64 mask = 0xff << ((idx - 2) * 8);
1853 if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
1854 !((er->config ^ config1) & mask)) {
1855 atomic_add(1 << ((idx - 2) * 8), &er->ref);
1856 er->config &= ~mask;
1857 er->config |= config1 & mask;
1858 ok = true;
1859 }
1860 } else {
1861 if (!atomic_read(&er->ref) ||
1862 (er->config == (hwc->config >> 32) &&
1863 er->config1 == reg1->config &&
1864 er->config2 == reg2->config)) {
1865 atomic_inc(&er->ref);
1866 er->config = (hwc->config >> 32);
1867 er->config1 = reg1->config;
1868 er->config2 = reg2->config;
1869 ok = true;
1870 }
1871 }
1872 raw_spin_unlock_irqrestore(&er->lock, flags);
1873
1874 if (!ok) {
1875 /*
1876 * The Rbox events are always in pairs. The paired
1877 * events are functional identical, but use different
1878 * extra registers. If we failed to take an extra
1879 * register, try the alternative.
1880 */
1881 if (idx % 2)
1882 idx--;
1883 else
1884 idx++;
1885 if (idx != reg1->idx % 6) {
1886 if (idx == 2)
1887 config1 >>= 8;
1888 else if (idx == 3)
1889 config1 <<= 8;
1890 goto again;
1891 }
1892 } else {
1893 if (!uncore_box_is_fake(box)) {
1894 if (idx != reg1->idx % 6)
1895 nhmex_rbox_alter_er(box, event);
1896 reg1->alloc = 1;
1897 }
1898 return NULL;
1899 }
1900 return &constraint_empty;
fcde10e9
YZ
1901}
1902
254298c7 1903static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9 1904{
254298c7
YZ
1905 struct intel_uncore_extra_reg *er;
1906 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1907 int idx, er_idx;
1908
1909 if (uncore_box_is_fake(box) || !reg1->alloc)
1910 return;
1911
1912 idx = reg1->idx % 6;
1913 er_idx = idx;
1914 if (er_idx > 2)
1915 er_idx--;
1916 er_idx += (reg1->idx / 6) * 5;
1917
1918 er = &box->shared_regs[er_idx];
1919 if (idx == 2 || idx == 3)
1920 atomic_sub(1 << ((idx - 2) * 8), &er->ref);
1921 else
1922 atomic_dec(&er->ref);
1923
1924 reg1->alloc = 0;
fcde10e9
YZ
1925}
1926
254298c7 1927static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
fcde10e9
YZ
1928{
1929 struct hw_perf_event *hwc = &event->hw;
254298c7
YZ
1930 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1931 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
ebb6cc03 1932 int idx;
fcde10e9 1933
254298c7
YZ
1934 idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
1935 NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1936 if (idx >= 0x18)
1937 return -EINVAL;
1938
1939 reg1->idx = idx;
1940 reg1->config = event->attr.config1;
1941
ebb6cc03 1942 switch (idx % 6) {
254298c7
YZ
1943 case 4:
1944 case 5:
254298c7 1945 hwc->config |= event->attr.config & (~0ULL << 32);
ebb6cc03 1946 reg2->config = event->attr.config2;
254298c7
YZ
1947 break;
1948 };
1949 return 0;
fcde10e9
YZ
1950}
1951
254298c7
YZ
1952static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1953{
1954 struct hw_perf_event *hwc = &event->hw;
1955 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1956 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
ebb6cc03 1957 int idx, port;
254298c7 1958
ebb6cc03
YZ
1959 idx = reg1->idx;
1960 port = idx / 6 + box->pmu->pmu_idx * 4;
254298c7 1961
ebb6cc03 1962 switch (idx % 6) {
254298c7 1963 case 0:
ebb6cc03
YZ
1964 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
1965 break;
254298c7 1966 case 1:
ebb6cc03 1967 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
254298c7
YZ
1968 break;
1969 case 2:
1970 case 3:
ebb6cc03 1971 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
46bdd905 1972 uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
254298c7
YZ
1973 break;
1974 case 4:
ebb6cc03
YZ
1975 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
1976 hwc->config >> 32);
1977 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
1978 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
1979 break;
254298c7 1980 case 5:
ebb6cc03
YZ
1981 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
1982 hwc->config >> 32);
1983 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
1984 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
254298c7
YZ
1985 break;
1986 };
1987
1988 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
1989 (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
1990}
1991
ebb6cc03
YZ
1992DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
1993DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
254298c7
YZ
1994DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
1995DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
1996DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
1997
1998static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
1999 &format_attr_event5.attr,
2000 &format_attr_xbr_mm_cfg.attr,
2001 &format_attr_xbr_match.attr,
2002 &format_attr_xbr_mask.attr,
2003 &format_attr_qlx_cfg.attr,
2004 &format_attr_iperf_cfg.attr,
fcde10e9
YZ
2005 NULL,
2006};
2007
254298c7 2008static struct attribute_group nhmex_uncore_rbox_format_group = {
fcde10e9 2009 .name = "format",
254298c7 2010 .attrs = nhmex_uncore_rbox_formats_attr,
fcde10e9
YZ
2011};
2012
254298c7
YZ
2013static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
2014 INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"),
2015 INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"),
2016 INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"),
2017 INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"),
2018 INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"),
2019 INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"),
fcde10e9
YZ
2020 { /* end: all zeroes */ },
2021};
2022
254298c7
YZ
2023static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
2024 NHMEX_UNCORE_OPS_COMMON_INIT(),
2025 .enable_event = nhmex_rbox_msr_enable_event,
2026 .hw_config = nhmex_rbox_hw_config,
2027 .get_constraint = nhmex_rbox_get_constraint,
2028 .put_constraint = nhmex_rbox_put_constraint,
fcde10e9
YZ
2029};
2030
254298c7
YZ
2031static struct intel_uncore_type nhmex_uncore_rbox = {
2032 .name = "rbox",
2033 .num_counters = 8,
2034 .num_boxes = 2,
2035 .perf_ctr_bits = 48,
2036 .event_ctl = NHMEX_R_MSR_PMON_CTL0,
2037 .perf_ctr = NHMEX_R_MSR_PMON_CNT0,
2038 .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK,
2039 .box_ctl = NHMEX_R_MSR_GLOBAL_CTL,
2040 .msr_offset = NHMEX_R_MSR_OFFSET,
2041 .pair_ctr_ctl = 1,
2042 .num_shared_regs = 20,
2043 .event_descs = nhmex_uncore_rbox_events,
2044 .ops = &nhmex_uncore_rbox_ops,
2045 .format_group = &nhmex_uncore_rbox_format_group
fcde10e9
YZ
2046};
2047
254298c7
YZ
2048static struct intel_uncore_type *nhmex_msr_uncores[] = {
2049 &nhmex_uncore_ubox,
2050 &nhmex_uncore_cbox,
2051 &nhmex_uncore_bbox,
2052 &nhmex_uncore_sbox,
2053 &nhmex_uncore_mbox,
2054 &nhmex_uncore_rbox,
2055 &nhmex_uncore_wbox,
fcde10e9
YZ
2056 NULL,
2057};
254298c7 2058/* end of Nehalem-EX uncore support */
fcde10e9 2059
254298c7 2060static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
087bfbb0
YZ
2061{
2062 struct hw_perf_event *hwc = &event->hw;
2063
2064 hwc->idx = idx;
2065 hwc->last_tag = ++box->tags[idx];
2066
2067 if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
14371cce
YZ
2068 hwc->event_base = uncore_fixed_ctr(box);
2069 hwc->config_base = uncore_fixed_ctl(box);
087bfbb0
YZ
2070 return;
2071 }
2072
14371cce
YZ
2073 hwc->config_base = uncore_event_ctl(box, hwc->idx);
2074 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
087bfbb0
YZ
2075}
2076
254298c7 2077static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
087bfbb0
YZ
2078{
2079 u64 prev_count, new_count, delta;
2080 int shift;
2081
2082 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
2083 shift = 64 - uncore_fixed_ctr_bits(box);
2084 else
2085 shift = 64 - uncore_perf_ctr_bits(box);
2086
2087 /* the hrtimer might modify the previous event value */
2088again:
2089 prev_count = local64_read(&event->hw.prev_count);
2090 new_count = uncore_read_counter(box, event);
2091 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
2092 goto again;
2093
2094 delta = (new_count << shift) - (prev_count << shift);
2095 delta >>= shift;
2096
2097 local64_add(delta, &event->count);
2098}
2099
2100/*
2101 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
2102 * for SandyBridge. So we use hrtimer to periodically poll the counter
2103 * to avoid overflow.
2104 */
2105static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
2106{
2107 struct intel_uncore_box *box;
2108 unsigned long flags;
2109 int bit;
2110
2111 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
2112 if (!box->n_active || box->cpu != smp_processor_id())
2113 return HRTIMER_NORESTART;
2114 /*
2115 * disable local interrupt to prevent uncore_pmu_event_start/stop
2116 * to interrupt the update process
2117 */
2118 local_irq_save(flags);
2119
2120 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
2121 uncore_perf_event_update(box, box->events[bit]);
2122
2123 local_irq_restore(flags);
2124
2125 hrtimer_forward_now(hrtimer, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL));
2126 return HRTIMER_RESTART;
2127}
2128
2129static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
2130{
2131 __hrtimer_start_range_ns(&box->hrtimer,
2132 ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0,
2133 HRTIMER_MODE_REL_PINNED, 0);
2134}
2135
2136static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
2137{
2138 hrtimer_cancel(&box->hrtimer);
2139}
2140
2141static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
2142{
2143 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2144 box->hrtimer.function = uncore_pmu_hrtimer;
2145}
2146
254298c7 2147struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu)
087bfbb0
YZ
2148{
2149 struct intel_uncore_box *box;
6a67943a 2150 int i, size;
087bfbb0 2151
254298c7 2152 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
6a67943a
YZ
2153
2154 box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu));
087bfbb0
YZ
2155 if (!box)
2156 return NULL;
2157
6a67943a
YZ
2158 for (i = 0; i < type->num_shared_regs; i++)
2159 raw_spin_lock_init(&box->shared_regs[i].lock);
2160
087bfbb0
YZ
2161 uncore_pmu_init_hrtimer(box);
2162 atomic_set(&box->refcnt, 1);
2163 box->cpu = -1;
2164 box->phys_id = -1;
2165
2166 return box;
2167}
2168
2169static struct intel_uncore_box *
2170uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
2171{
402537fd 2172 struct intel_uncore_box *box;
14371cce
YZ
2173
2174 box = *per_cpu_ptr(pmu->box, cpu);
2175 if (box)
2176 return box;
2177
2178 raw_spin_lock(&uncore_box_lock);
2179 list_for_each_entry(box, &pmu->box_list, list) {
2180 if (box->phys_id == topology_physical_package_id(cpu)) {
2181 atomic_inc(&box->refcnt);
2182 *per_cpu_ptr(pmu->box, cpu) = box;
2183 break;
2184 }
2185 }
2186 raw_spin_unlock(&uncore_box_lock);
2187
087bfbb0
YZ
2188 return *per_cpu_ptr(pmu->box, cpu);
2189}
2190
2191static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
2192{
2193 return container_of(event->pmu, struct intel_uncore_pmu, pmu);
2194}
2195
2196static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
2197{
2198 /*
2199 * perf core schedules event on the basis of cpu, uncore events are
2200 * collected by one of the cpus inside a physical package.
2201 */
254298c7 2202 return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
087bfbb0
YZ
2203}
2204
254298c7
YZ
2205static int
2206uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
087bfbb0
YZ
2207{
2208 struct perf_event *event;
2209 int n, max_count;
2210
2211 max_count = box->pmu->type->num_counters;
2212 if (box->pmu->type->fixed_ctl)
2213 max_count++;
2214
2215 if (box->n_events >= max_count)
2216 return -EINVAL;
2217
2218 n = box->n_events;
2219 box->event_list[n] = leader;
2220 n++;
2221 if (!dogrp)
2222 return n;
2223
2224 list_for_each_entry(event, &leader->sibling_list, group_entry) {
2225 if (event->state <= PERF_EVENT_STATE_OFF)
2226 continue;
2227
2228 if (n >= max_count)
2229 return -EINVAL;
2230
2231 box->event_list[n] = event;
2232 n++;
2233 }
2234 return n;
2235}
2236
2237static struct event_constraint *
254298c7 2238uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
087bfbb0 2239{
6a67943a 2240 struct intel_uncore_type *type = box->pmu->type;
087bfbb0
YZ
2241 struct event_constraint *c;
2242
6a67943a
YZ
2243 if (type->ops->get_constraint) {
2244 c = type->ops->get_constraint(box, event);
2245 if (c)
2246 return c;
2247 }
2248
087bfbb0
YZ
2249 if (event->hw.config == ~0ULL)
2250 return &constraint_fixed;
2251
2252 if (type->constraints) {
2253 for_each_event_constraint(c, type->constraints) {
2254 if ((event->hw.config & c->cmask) == c->code)
2255 return c;
2256 }
2257 }
2258
2259 return &type->unconstrainted;
2260}
2261
254298c7 2262static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
6a67943a
YZ
2263{
2264 if (box->pmu->type->ops->put_constraint)
2265 box->pmu->type->ops->put_constraint(box, event);
2266}
2267
254298c7 2268static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
087bfbb0
YZ
2269{
2270 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
2271 struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX];
6a67943a 2272 int i, wmin, wmax, ret = 0;
087bfbb0
YZ
2273 struct hw_perf_event *hwc;
2274
2275 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
2276
2277 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
6a67943a 2278 c = uncore_get_event_constraint(box, box->event_list[i]);
087bfbb0
YZ
2279 constraints[i] = c;
2280 wmin = min(wmin, c->weight);
2281 wmax = max(wmax, c->weight);
2282 }
2283
2284 /* fastpath, try to reuse previous register */
2285 for (i = 0; i < n; i++) {
2286 hwc = &box->event_list[i]->hw;
2287 c = constraints[i];
2288
2289 /* never assigned */
2290 if (hwc->idx == -1)
2291 break;
2292
2293 /* constraint still honored */
2294 if (!test_bit(hwc->idx, c->idxmsk))
2295 break;
2296
2297 /* not already used */
2298 if (test_bit(hwc->idx, used_mask))
2299 break;
2300
2301 __set_bit(hwc->idx, used_mask);
6a67943a
YZ
2302 if (assign)
2303 assign[i] = hwc->idx;
087bfbb0 2304 }
087bfbb0 2305 /* slow path */
6a67943a
YZ
2306 if (i != n)
2307 ret = perf_assign_events(constraints, n, wmin, wmax, assign);
2308
2309 if (!assign || ret) {
2310 for (i = 0; i < n; i++)
2311 uncore_put_event_constraint(box, box->event_list[i]);
2312 }
087bfbb0
YZ
2313 return ret ? -EINVAL : 0;
2314}
2315
2316static void uncore_pmu_event_start(struct perf_event *event, int flags)
2317{
2318 struct intel_uncore_box *box = uncore_event_to_box(event);
2319 int idx = event->hw.idx;
2320
2321 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
2322 return;
2323
2324 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
2325 return;
2326
2327 event->hw.state = 0;
2328 box->events[idx] = event;
2329 box->n_active++;
2330 __set_bit(idx, box->active_mask);
2331
2332 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
2333 uncore_enable_event(box, event);
2334
2335 if (box->n_active == 1) {
2336 uncore_enable_box(box);
2337 uncore_pmu_start_hrtimer(box);
2338 }
2339}
2340
2341static void uncore_pmu_event_stop(struct perf_event *event, int flags)
2342{
2343 struct intel_uncore_box *box = uncore_event_to_box(event);
2344 struct hw_perf_event *hwc = &event->hw;
2345
2346 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
2347 uncore_disable_event(box, event);
2348 box->n_active--;
2349 box->events[hwc->idx] = NULL;
2350 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
2351 hwc->state |= PERF_HES_STOPPED;
2352
2353 if (box->n_active == 0) {
2354 uncore_disable_box(box);
2355 uncore_pmu_cancel_hrtimer(box);
2356 }
2357 }
2358
2359 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
2360 /*
2361 * Drain the remaining delta count out of a event
2362 * that we are disabling:
2363 */
2364 uncore_perf_event_update(box, event);
2365 hwc->state |= PERF_HES_UPTODATE;
2366 }
2367}
2368
2369static int uncore_pmu_event_add(struct perf_event *event, int flags)
2370{
2371 struct intel_uncore_box *box = uncore_event_to_box(event);
2372 struct hw_perf_event *hwc = &event->hw;
2373 int assign[UNCORE_PMC_IDX_MAX];
2374 int i, n, ret;
2375
2376 if (!box)
2377 return -ENODEV;
2378
2379 ret = n = uncore_collect_events(box, event, false);
2380 if (ret < 0)
2381 return ret;
2382
2383 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
2384 if (!(flags & PERF_EF_START))
2385 hwc->state |= PERF_HES_ARCH;
2386
2387 ret = uncore_assign_events(box, assign, n);
2388 if (ret)
2389 return ret;
2390
2391 /* save events moving to new counters */
2392 for (i = 0; i < box->n_events; i++) {
2393 event = box->event_list[i];
2394 hwc = &event->hw;
2395
2396 if (hwc->idx == assign[i] &&
2397 hwc->last_tag == box->tags[assign[i]])
2398 continue;
2399 /*
2400 * Ensure we don't accidentally enable a stopped
2401 * counter simply because we rescheduled.
2402 */
2403 if (hwc->state & PERF_HES_STOPPED)
2404 hwc->state |= PERF_HES_ARCH;
2405
2406 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
2407 }
2408
2409 /* reprogram moved events into new counters */
2410 for (i = 0; i < n; i++) {
2411 event = box->event_list[i];
2412 hwc = &event->hw;
2413
2414 if (hwc->idx != assign[i] ||
2415 hwc->last_tag != box->tags[assign[i]])
2416 uncore_assign_hw_event(box, event, assign[i]);
2417 else if (i < box->n_events)
2418 continue;
2419
2420 if (hwc->state & PERF_HES_ARCH)
2421 continue;
2422
2423 uncore_pmu_event_start(event, 0);
2424 }
2425 box->n_events = n;
2426
2427 return 0;
2428}
2429
2430static void uncore_pmu_event_del(struct perf_event *event, int flags)
2431{
2432 struct intel_uncore_box *box = uncore_event_to_box(event);
2433 int i;
2434
2435 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
2436
2437 for (i = 0; i < box->n_events; i++) {
2438 if (event == box->event_list[i]) {
6a67943a
YZ
2439 uncore_put_event_constraint(box, event);
2440
087bfbb0
YZ
2441 while (++i < box->n_events)
2442 box->event_list[i - 1] = box->event_list[i];
2443
2444 --box->n_events;
2445 break;
2446 }
2447 }
2448
2449 event->hw.idx = -1;
2450 event->hw.last_tag = ~0ULL;
2451}
2452
2453static void uncore_pmu_event_read(struct perf_event *event)
2454{
2455 struct intel_uncore_box *box = uncore_event_to_box(event);
2456 uncore_perf_event_update(box, event);
2457}
2458
2459/*
2460 * validation ensures the group can be loaded onto the
2461 * PMU if it was the only group available.
2462 */
2463static int uncore_validate_group(struct intel_uncore_pmu *pmu,
2464 struct perf_event *event)
2465{
2466 struct perf_event *leader = event->group_leader;
2467 struct intel_uncore_box *fake_box;
087bfbb0
YZ
2468 int ret = -EINVAL, n;
2469
6a67943a 2470 fake_box = uncore_alloc_box(pmu->type, smp_processor_id());
087bfbb0
YZ
2471 if (!fake_box)
2472 return -ENOMEM;
2473
2474 fake_box->pmu = pmu;
2475 /*
2476 * the event is not yet connected with its
2477 * siblings therefore we must first collect
2478 * existing siblings, then add the new event
2479 * before we can simulate the scheduling
2480 */
2481 n = uncore_collect_events(fake_box, leader, true);
2482 if (n < 0)
2483 goto out;
2484
2485 fake_box->n_events = n;
2486 n = uncore_collect_events(fake_box, event, false);
2487 if (n < 0)
2488 goto out;
2489
2490 fake_box->n_events = n;
2491
6a67943a 2492 ret = uncore_assign_events(fake_box, NULL, n);
087bfbb0
YZ
2493out:
2494 kfree(fake_box);
2495 return ret;
2496}
2497
46bdd905 2498static int uncore_pmu_event_init(struct perf_event *event)
087bfbb0
YZ
2499{
2500 struct intel_uncore_pmu *pmu;
2501 struct intel_uncore_box *box;
2502 struct hw_perf_event *hwc = &event->hw;
2503 int ret;
2504
2505 if (event->attr.type != event->pmu->type)
2506 return -ENOENT;
2507
2508 pmu = uncore_event_to_pmu(event);
2509 /* no device found for this pmu */
2510 if (pmu->func_id < 0)
2511 return -ENOENT;
2512
2513 /*
2514 * Uncore PMU does measure at all privilege level all the time.
2515 * So it doesn't make sense to specify any exclude bits.
2516 */
2517 if (event->attr.exclude_user || event->attr.exclude_kernel ||
2518 event->attr.exclude_hv || event->attr.exclude_idle)
2519 return -EINVAL;
2520
2521 /* Sampling not supported yet */
2522 if (hwc->sample_period)
2523 return -EINVAL;
2524
2525 /*
2526 * Place all uncore events for a particular physical package
2527 * onto a single cpu
2528 */
2529 if (event->cpu < 0)
2530 return -EINVAL;
2531 box = uncore_pmu_to_box(pmu, event->cpu);
2532 if (!box || box->cpu < 0)
2533 return -EINVAL;
2534 event->cpu = box->cpu;
2535
6a67943a
YZ
2536 event->hw.idx = -1;
2537 event->hw.last_tag = ~0ULL;
2538 event->hw.extra_reg.idx = EXTRA_REG_NONE;
ebb6cc03 2539 event->hw.branch_reg.idx = EXTRA_REG_NONE;
6a67943a 2540
087bfbb0
YZ
2541 if (event->attr.config == UNCORE_FIXED_EVENT) {
2542 /* no fixed counter */
2543 if (!pmu->type->fixed_ctl)
2544 return -EINVAL;
2545 /*
2546 * if there is only one fixed counter, only the first pmu
2547 * can access the fixed counter
2548 */
2549 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
2550 return -EINVAL;
2551 hwc->config = ~0ULL;
2552 } else {
2553 hwc->config = event->attr.config & pmu->type->event_mask;
6a67943a
YZ
2554 if (pmu->type->ops->hw_config) {
2555 ret = pmu->type->ops->hw_config(box, event);
2556 if (ret)
2557 return ret;
2558 }
087bfbb0
YZ
2559 }
2560
087bfbb0
YZ
2561 if (event->group_leader != event)
2562 ret = uncore_validate_group(pmu, event);
2563 else
2564 ret = 0;
2565
2566 return ret;
2567}
2568
314d9f63
YZ
2569static ssize_t uncore_get_attr_cpumask(struct device *dev,
2570 struct device_attribute *attr, char *buf)
2571{
2572 int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &uncore_cpu_mask);
2573
2574 buf[n++] = '\n';
2575 buf[n] = '\0';
2576 return n;
2577}
2578
2579static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
2580
2581static struct attribute *uncore_pmu_attrs[] = {
2582 &dev_attr_cpumask.attr,
2583 NULL,
2584};
2585
2586static struct attribute_group uncore_pmu_attr_group = {
2587 .attrs = uncore_pmu_attrs,
2588};
2589
087bfbb0
YZ
2590static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
2591{
2592 int ret;
2593
2594 pmu->pmu = (struct pmu) {
2595 .attr_groups = pmu->type->attr_groups,
2596 .task_ctx_nr = perf_invalid_context,
2597 .event_init = uncore_pmu_event_init,
2598 .add = uncore_pmu_event_add,
2599 .del = uncore_pmu_event_del,
2600 .start = uncore_pmu_event_start,
2601 .stop = uncore_pmu_event_stop,
2602 .read = uncore_pmu_event_read,
2603 };
2604
2605 if (pmu->type->num_boxes == 1) {
2606 if (strlen(pmu->type->name) > 0)
2607 sprintf(pmu->name, "uncore_%s", pmu->type->name);
2608 else
2609 sprintf(pmu->name, "uncore");
2610 } else {
2611 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
2612 pmu->pmu_idx);
2613 }
2614
2615 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
2616 return ret;
2617}
2618
2619static void __init uncore_type_exit(struct intel_uncore_type *type)
2620{
2621 int i;
2622
2623 for (i = 0; i < type->num_boxes; i++)
2624 free_percpu(type->pmus[i].box);
2625 kfree(type->pmus);
2626 type->pmus = NULL;
314d9f63
YZ
2627 kfree(type->events_group);
2628 type->events_group = NULL;
087bfbb0
YZ
2629}
2630
cffa59ba 2631static void __init uncore_types_exit(struct intel_uncore_type **types)
14371cce
YZ
2632{
2633 int i;
2634 for (i = 0; types[i]; i++)
2635 uncore_type_exit(types[i]);
2636}
2637
087bfbb0
YZ
2638static int __init uncore_type_init(struct intel_uncore_type *type)
2639{
2640 struct intel_uncore_pmu *pmus;
2641 struct attribute_group *events_group;
2642 struct attribute **attrs;
2643 int i, j;
2644
2645 pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
2646 if (!pmus)
2647 return -ENOMEM;
2648
2649 type->unconstrainted = (struct event_constraint)
2650 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
9fac2cf3 2651 0, type->num_counters, 0, 0);
087bfbb0
YZ
2652
2653 for (i = 0; i < type->num_boxes; i++) {
2654 pmus[i].func_id = -1;
2655 pmus[i].pmu_idx = i;
2656 pmus[i].type = type;
14371cce 2657 INIT_LIST_HEAD(&pmus[i].box_list);
087bfbb0
YZ
2658 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
2659 if (!pmus[i].box)
2660 goto fail;
2661 }
2662
2663 if (type->event_descs) {
2664 i = 0;
2665 while (type->event_descs[i].attr.attr.name)
2666 i++;
2667
2668 events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
2669 sizeof(*events_group), GFP_KERNEL);
2670 if (!events_group)
2671 goto fail;
2672
2673 attrs = (struct attribute **)(events_group + 1);
2674 events_group->name = "events";
2675 events_group->attrs = attrs;
2676
2677 for (j = 0; j < i; j++)
2678 attrs[j] = &type->event_descs[j].attr.attr;
2679
314d9f63 2680 type->events_group = events_group;
087bfbb0
YZ
2681 }
2682
314d9f63 2683 type->pmu_group = &uncore_pmu_attr_group;
087bfbb0
YZ
2684 type->pmus = pmus;
2685 return 0;
2686fail:
2687 uncore_type_exit(type);
2688 return -ENOMEM;
2689}
2690
2691static int __init uncore_types_init(struct intel_uncore_type **types)
2692{
2693 int i, ret;
2694
2695 for (i = 0; types[i]; i++) {
2696 ret = uncore_type_init(types[i]);
2697 if (ret)
2698 goto fail;
2699 }
2700 return 0;
2701fail:
2702 while (--i >= 0)
2703 uncore_type_exit(types[i]);
2704 return ret;
2705}
2706
14371cce
YZ
2707static struct pci_driver *uncore_pci_driver;
2708static bool pcidrv_registered;
2709
2710/*
2711 * add a pci uncore device
2712 */
a18e3690 2713static int uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev)
14371cce
YZ
2714{
2715 struct intel_uncore_pmu *pmu;
2716 struct intel_uncore_box *box;
2717 int i, phys_id;
2718
2719 phys_id = pcibus_to_physid[pdev->bus->number];
2720 if (phys_id < 0)
2721 return -ENODEV;
2722
6a67943a 2723 box = uncore_alloc_box(type, 0);
14371cce
YZ
2724 if (!box)
2725 return -ENOMEM;
2726
2727 /*
2728 * for performance monitoring unit with multiple boxes,
2729 * each box has a different function id.
2730 */
2731 for (i = 0; i < type->num_boxes; i++) {
2732 pmu = &type->pmus[i];
2733 if (pmu->func_id == pdev->devfn)
2734 break;
2735 if (pmu->func_id < 0) {
2736 pmu->func_id = pdev->devfn;
2737 break;
2738 }
2739 pmu = NULL;
2740 }
2741
2742 if (!pmu) {
2743 kfree(box);
2744 return -EINVAL;
2745 }
2746
2747 box->phys_id = phys_id;
2748 box->pci_dev = pdev;
2749 box->pmu = pmu;
2750 uncore_box_init(box);
2751 pci_set_drvdata(pdev, box);
2752
2753 raw_spin_lock(&uncore_box_lock);
2754 list_add_tail(&box->list, &pmu->box_list);
2755 raw_spin_unlock(&uncore_box_lock);
2756
2757 return 0;
2758}
2759
357398e9 2760static void uncore_pci_remove(struct pci_dev *pdev)
14371cce
YZ
2761{
2762 struct intel_uncore_box *box = pci_get_drvdata(pdev);
2763 struct intel_uncore_pmu *pmu = box->pmu;
2764 int cpu, phys_id = pcibus_to_physid[pdev->bus->number];
2765
2766 if (WARN_ON_ONCE(phys_id != box->phys_id))
2767 return;
2768
2769 raw_spin_lock(&uncore_box_lock);
2770 list_del(&box->list);
2771 raw_spin_unlock(&uncore_box_lock);
2772
2773 for_each_possible_cpu(cpu) {
2774 if (*per_cpu_ptr(pmu->box, cpu) == box) {
2775 *per_cpu_ptr(pmu->box, cpu) = NULL;
2776 atomic_dec(&box->refcnt);
2777 }
2778 }
2779
2780 WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
2781 kfree(box);
2782}
2783
a18e3690
GKH
2784static int uncore_pci_probe(struct pci_dev *pdev,
2785 const struct pci_device_id *id)
14371cce
YZ
2786{
2787 struct intel_uncore_type *type;
2788
2789 type = (struct intel_uncore_type *)id->driver_data;
254298c7 2790
14371cce
YZ
2791 return uncore_pci_add(type, pdev);
2792}
2793
2794static int __init uncore_pci_init(void)
2795{
2796 int ret;
2797
2798 switch (boot_cpu_data.x86_model) {
7c94ee2e 2799 case 45: /* Sandy Bridge-EP */
032c3851
YZ
2800 ret = snbep_pci2phy_map_init();
2801 if (ret)
2802 return ret;
7c94ee2e
YZ
2803 pci_uncores = snbep_pci_uncores;
2804 uncore_pci_driver = &snbep_uncore_pci_driver;
7c94ee2e 2805 break;
14371cce
YZ
2806 default:
2807 return 0;
2808 }
2809
2810 ret = uncore_types_init(pci_uncores);
2811 if (ret)
2812 return ret;
2813
2814 uncore_pci_driver->probe = uncore_pci_probe;
2815 uncore_pci_driver->remove = uncore_pci_remove;
2816
2817 ret = pci_register_driver(uncore_pci_driver);
2818 if (ret == 0)
2819 pcidrv_registered = true;
2820 else
2821 uncore_types_exit(pci_uncores);
2822
2823 return ret;
2824}
2825
2826static void __init uncore_pci_exit(void)
2827{
2828 if (pcidrv_registered) {
2829 pcidrv_registered = false;
2830 pci_unregister_driver(uncore_pci_driver);
2831 uncore_types_exit(pci_uncores);
2832 }
2833}
2834
22cc4ccf
YZ
2835/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
2836static LIST_HEAD(boxes_to_free);
2837
2838static void __cpuinit uncore_kfree_boxes(void)
2839{
2840 struct intel_uncore_box *box;
2841
2842 while (!list_empty(&boxes_to_free)) {
2843 box = list_entry(boxes_to_free.next,
2844 struct intel_uncore_box, list);
2845 list_del(&box->list);
2846 kfree(box);
2847 }
2848}
2849
087bfbb0
YZ
2850static void __cpuinit uncore_cpu_dying(int cpu)
2851{
2852 struct intel_uncore_type *type;
2853 struct intel_uncore_pmu *pmu;
2854 struct intel_uncore_box *box;
2855 int i, j;
2856
2857 for (i = 0; msr_uncores[i]; i++) {
2858 type = msr_uncores[i];
2859 for (j = 0; j < type->num_boxes; j++) {
2860 pmu = &type->pmus[j];
2861 box = *per_cpu_ptr(pmu->box, cpu);
2862 *per_cpu_ptr(pmu->box, cpu) = NULL;
2863 if (box && atomic_dec_and_test(&box->refcnt))
22cc4ccf 2864 list_add(&box->list, &boxes_to_free);
087bfbb0
YZ
2865 }
2866 }
2867}
2868
2869static int __cpuinit uncore_cpu_starting(int cpu)
2870{
2871 struct intel_uncore_type *type;
2872 struct intel_uncore_pmu *pmu;
2873 struct intel_uncore_box *box, *exist;
2874 int i, j, k, phys_id;
2875
2876 phys_id = topology_physical_package_id(cpu);
2877
2878 for (i = 0; msr_uncores[i]; i++) {
2879 type = msr_uncores[i];
2880 for (j = 0; j < type->num_boxes; j++) {
2881 pmu = &type->pmus[j];
2882 box = *per_cpu_ptr(pmu->box, cpu);
2883 /* called by uncore_cpu_init? */
2884 if (box && box->phys_id >= 0) {
2885 uncore_box_init(box);
2886 continue;
2887 }
2888
2889 for_each_online_cpu(k) {
2890 exist = *per_cpu_ptr(pmu->box, k);
2891 if (exist && exist->phys_id == phys_id) {
2892 atomic_inc(&exist->refcnt);
2893 *per_cpu_ptr(pmu->box, cpu) = exist;
22cc4ccf
YZ
2894 if (box) {
2895 list_add(&box->list,
2896 &boxes_to_free);
2897 box = NULL;
2898 }
087bfbb0
YZ
2899 break;
2900 }
2901 }
2902
2903 if (box) {
2904 box->phys_id = phys_id;
2905 uncore_box_init(box);
2906 }
2907 }
2908 }
2909 return 0;
2910}
2911
2912static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
2913{
2914 struct intel_uncore_type *type;
2915 struct intel_uncore_pmu *pmu;
2916 struct intel_uncore_box *box;
2917 int i, j;
2918
2919 for (i = 0; msr_uncores[i]; i++) {
2920 type = msr_uncores[i];
2921 for (j = 0; j < type->num_boxes; j++) {
2922 pmu = &type->pmus[j];
2923 if (pmu->func_id < 0)
2924 pmu->func_id = j;
2925
6a67943a 2926 box = uncore_alloc_box(type, cpu);
087bfbb0
YZ
2927 if (!box)
2928 return -ENOMEM;
2929
2930 box->pmu = pmu;
2931 box->phys_id = phys_id;
2932 *per_cpu_ptr(pmu->box, cpu) = box;
2933 }
2934 }
2935 return 0;
2936}
2937
254298c7
YZ
2938static void __cpuinit
2939uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
087bfbb0
YZ
2940{
2941 struct intel_uncore_type *type;
2942 struct intel_uncore_pmu *pmu;
2943 struct intel_uncore_box *box;
2944 int i, j;
2945
2946 for (i = 0; uncores[i]; i++) {
2947 type = uncores[i];
2948 for (j = 0; j < type->num_boxes; j++) {
2949 pmu = &type->pmus[j];
2950 if (old_cpu < 0)
2951 box = uncore_pmu_to_box(pmu, new_cpu);
2952 else
2953 box = uncore_pmu_to_box(pmu, old_cpu);
2954 if (!box)
2955 continue;
2956
2957 if (old_cpu < 0) {
2958 WARN_ON_ONCE(box->cpu != -1);
2959 box->cpu = new_cpu;
2960 continue;
2961 }
2962
2963 WARN_ON_ONCE(box->cpu != old_cpu);
2964 if (new_cpu >= 0) {
2965 uncore_pmu_cancel_hrtimer(box);
2966 perf_pmu_migrate_context(&pmu->pmu,
2967 old_cpu, new_cpu);
2968 box->cpu = new_cpu;
2969 } else {
2970 box->cpu = -1;
2971 }
2972 }
2973 }
2974}
2975
2976static void __cpuinit uncore_event_exit_cpu(int cpu)
2977{
2978 int i, phys_id, target;
2979
2980 /* if exiting cpu is used for collecting uncore events */
2981 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
2982 return;
2983
2984 /* find a new cpu to collect uncore events */
2985 phys_id = topology_physical_package_id(cpu);
2986 target = -1;
2987 for_each_online_cpu(i) {
2988 if (i == cpu)
2989 continue;
2990 if (phys_id == topology_physical_package_id(i)) {
2991 target = i;
2992 break;
2993 }
2994 }
2995
2996 /* migrate uncore events to the new cpu */
2997 if (target >= 0)
2998 cpumask_set_cpu(target, &uncore_cpu_mask);
2999
3000 uncore_change_context(msr_uncores, cpu, target);
14371cce 3001 uncore_change_context(pci_uncores, cpu, target);
087bfbb0
YZ
3002}
3003
3004static void __cpuinit uncore_event_init_cpu(int cpu)
3005{
3006 int i, phys_id;
3007
3008 phys_id = topology_physical_package_id(cpu);
3009 for_each_cpu(i, &uncore_cpu_mask) {
3010 if (phys_id == topology_physical_package_id(i))
3011 return;
3012 }
3013
3014 cpumask_set_cpu(cpu, &uncore_cpu_mask);
3015
3016 uncore_change_context(msr_uncores, -1, cpu);
14371cce 3017 uncore_change_context(pci_uncores, -1, cpu);
087bfbb0
YZ
3018}
3019
254298c7
YZ
3020static int
3021 __cpuinit uncore_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
087bfbb0
YZ
3022{
3023 unsigned int cpu = (long)hcpu;
3024
3025 /* allocate/free data structure for uncore box */
3026 switch (action & ~CPU_TASKS_FROZEN) {
3027 case CPU_UP_PREPARE:
3028 uncore_cpu_prepare(cpu, -1);
3029 break;
3030 case CPU_STARTING:
3031 uncore_cpu_starting(cpu);
3032 break;
3033 case CPU_UP_CANCELED:
3034 case CPU_DYING:
3035 uncore_cpu_dying(cpu);
3036 break;
22cc4ccf
YZ
3037 case CPU_ONLINE:
3038 case CPU_DEAD:
3039 uncore_kfree_boxes();
3040 break;
087bfbb0
YZ
3041 default:
3042 break;
3043 }
3044
3045 /* select the cpu that collects uncore events */
3046 switch (action & ~CPU_TASKS_FROZEN) {
3047 case CPU_DOWN_FAILED:
3048 case CPU_STARTING:
3049 uncore_event_init_cpu(cpu);
3050 break;
3051 case CPU_DOWN_PREPARE:
3052 uncore_event_exit_cpu(cpu);
3053 break;
3054 default:
3055 break;
3056 }
3057
3058 return NOTIFY_OK;
3059}
3060
3061static struct notifier_block uncore_cpu_nb __cpuinitdata = {
254298c7 3062 .notifier_call = uncore_cpu_notifier,
087bfbb0
YZ
3063 /*
3064 * to migrate uncore events, our notifier should be executed
3065 * before perf core's notifier.
3066 */
254298c7 3067 .priority = CPU_PRI_PERF + 1,
087bfbb0
YZ
3068};
3069
3070static void __init uncore_cpu_setup(void *dummy)
3071{
3072 uncore_cpu_starting(smp_processor_id());
3073}
3074
3075static int __init uncore_cpu_init(void)
3076{
42089697 3077 int ret, cpu, max_cores;
087bfbb0 3078
42089697 3079 max_cores = boot_cpu_data.x86_max_cores;
087bfbb0 3080 switch (boot_cpu_data.x86_model) {
fcde10e9
YZ
3081 case 26: /* Nehalem */
3082 case 30:
3083 case 37: /* Westmere */
3084 case 44:
3085 msr_uncores = nhm_msr_uncores;
3086 break;
3087 case 42: /* Sandy Bridge */
42089697
YZ
3088 if (snb_uncore_cbox.num_boxes > max_cores)
3089 snb_uncore_cbox.num_boxes = max_cores;
fcde10e9
YZ
3090 msr_uncores = snb_msr_uncores;
3091 break;
7c94ee2e 3092 case 45: /* Sandy Birdge-EP */
42089697
YZ
3093 if (snbep_uncore_cbox.num_boxes > max_cores)
3094 snbep_uncore_cbox.num_boxes = max_cores;
7c94ee2e
YZ
3095 msr_uncores = snbep_msr_uncores;
3096 break;
cb37af77
YZ
3097 case 46: /* Nehalem-EX */
3098 uncore_nhmex = true;
3099 case 47: /* Westmere-EX aka. Xeon E7 */
3100 if (!uncore_nhmex)
3101 nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
3102 if (nhmex_uncore_cbox.num_boxes > max_cores)
3103 nhmex_uncore_cbox.num_boxes = max_cores;
254298c7
YZ
3104 msr_uncores = nhmex_msr_uncores;
3105 break;
087bfbb0
YZ
3106 default:
3107 return 0;
3108 }
3109
3110 ret = uncore_types_init(msr_uncores);
3111 if (ret)
3112 return ret;
3113
3114 get_online_cpus();
3115
3116 for_each_online_cpu(cpu) {
3117 int i, phys_id = topology_physical_package_id(cpu);
3118
3119 for_each_cpu(i, &uncore_cpu_mask) {
3120 if (phys_id == topology_physical_package_id(i)) {
3121 phys_id = -1;
3122 break;
3123 }
3124 }
3125 if (phys_id < 0)
3126 continue;
3127
3128 uncore_cpu_prepare(cpu, phys_id);
3129 uncore_event_init_cpu(cpu);
3130 }
3131 on_each_cpu(uncore_cpu_setup, NULL, 1);
3132
3133 register_cpu_notifier(&uncore_cpu_nb);
3134
3135 put_online_cpus();
3136
3137 return 0;
3138}
3139
3140static int __init uncore_pmus_register(void)
3141{
3142 struct intel_uncore_pmu *pmu;
3143 struct intel_uncore_type *type;
3144 int i, j;
3145
3146 for (i = 0; msr_uncores[i]; i++) {
3147 type = msr_uncores[i];
3148 for (j = 0; j < type->num_boxes; j++) {
3149 pmu = &type->pmus[j];
3150 uncore_pmu_register(pmu);
3151 }
3152 }
3153
14371cce
YZ
3154 for (i = 0; pci_uncores[i]; i++) {
3155 type = pci_uncores[i];
3156 for (j = 0; j < type->num_boxes; j++) {
3157 pmu = &type->pmus[j];
3158 uncore_pmu_register(pmu);
3159 }
3160 }
3161
087bfbb0
YZ
3162 return 0;
3163}
3164
3165static int __init intel_uncore_init(void)
3166{
3167 int ret;
3168
3169 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
3170 return -ENODEV;
3171
a05123bd
YZ
3172 if (cpu_has_hypervisor)
3173 return -ENODEV;
3174
14371cce 3175 ret = uncore_pci_init();
087bfbb0
YZ
3176 if (ret)
3177 goto fail;
14371cce
YZ
3178 ret = uncore_cpu_init();
3179 if (ret) {
3180 uncore_pci_exit();
3181 goto fail;
3182 }
087bfbb0
YZ
3183
3184 uncore_pmus_register();
3185 return 0;
3186fail:
3187 return ret;
3188}
3189device_initcall(intel_uncore_init);