1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/slab.h>
4 #include <asm/apicdef.h>
5 #include <asm/intel-family.h>
6 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/perf_event.h>
9 #include "../perf_event.h"
11 #define UNCORE_PMU_NAME_LEN 32
12 #define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
13 #define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
15 #define UNCORE_FIXED_EVENT 0xff
16 #define UNCORE_PMC_IDX_MAX_GENERIC 8
17 #define UNCORE_PMC_IDX_MAX_FIXED 1
18 #define UNCORE_PMC_IDX_MAX_FREERUNNING 1
19 #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
20 #define UNCORE_PMC_IDX_FREERUNNING (UNCORE_PMC_IDX_FIXED + \
21 UNCORE_PMC_IDX_MAX_FIXED)
22 #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FREERUNNING + \
23 UNCORE_PMC_IDX_MAX_FREERUNNING)
25 #define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \
26 ((dev << 24) | (func << 16) | (type << 8) | idx)
27 #define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx)
28 #define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff)
29 #define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff)
30 #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
31 #define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
32 #define UNCORE_EXTRA_PCI_DEV 0xff
33 #define UNCORE_EXTRA_PCI_DEV_MAX 4
35 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
37 struct pci_extra_dev {
38 struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX];
41 struct intel_uncore_ops;
42 struct intel_uncore_pmu;
43 struct intel_uncore_box;
44 struct uncore_event_desc;
45 struct freerunning_counters;
46 struct intel_uncore_topology;
48 struct intel_uncore_type {
54 int num_freerunning_types;
59 unsigned event_mask_ext;
63 u64 *box_ctls; /* Unit ctrl addr of the first box of each die */
68 unsigned mmio_map_size;
69 unsigned num_shared_regs:8;
70 unsigned single_fixed:1;
71 unsigned pair_ctr_ctl:1;
73 unsigned *msr_offsets;
74 unsigned *pci_offsets;
75 unsigned *mmio_offsets;
78 struct event_constraint unconstrainted;
79 struct event_constraint *constraints;
80 struct intel_uncore_pmu *pmus;
81 struct intel_uncore_ops *ops;
82 struct uncore_event_desc *event_descs;
83 struct freerunning_counters *freerunning;
84 const struct attribute_group *attr_groups[4];
85 const struct attribute_group **attr_update;
86 struct pmu *pmu; /* for custom pmu ops */
88 * Uncore PMU would store relevant platform topology configuration here
89 * to identify which platform component each PMON block of that type is
90 * supposed to monitor.
92 struct intel_uncore_topology **topology;
94 * Optional callbacks for managing mapping of Uncore units to PMONs
96 int (*get_topology)(struct intel_uncore_type *type);
97 void (*set_mapping)(struct intel_uncore_type *type);
98 void (*cleanup_mapping)(struct intel_uncore_type *type);
101 #define pmu_group attr_groups[0]
102 #define format_group attr_groups[1]
103 #define events_group attr_groups[2]
105 struct intel_uncore_ops {
106 void (*init_box)(struct intel_uncore_box *);
107 void (*exit_box)(struct intel_uncore_box *);
108 void (*disable_box)(struct intel_uncore_box *);
109 void (*enable_box)(struct intel_uncore_box *);
110 void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
111 void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
112 u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
113 int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
114 struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
115 struct perf_event *);
116 void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
119 struct intel_uncore_pmu {
121 char name[UNCORE_PMU_NAME_LEN];
125 atomic_t activeboxes;
126 struct intel_uncore_type *type;
127 struct intel_uncore_box **boxes;
130 struct intel_uncore_extra_reg {
132 u64 config, config1, config2;
136 struct intel_uncore_box {
137 int dieid; /* Logical die ID */
138 int n_active; /* number of active events */
140 int cpu; /* cpu to collect events */
143 struct perf_event *events[UNCORE_PMC_IDX_MAX];
144 struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
145 struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
146 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
147 u64 tags[UNCORE_PMC_IDX_MAX];
148 struct pci_dev *pci_dev;
149 struct intel_uncore_pmu *pmu;
150 u64 hrtimer_duration; /* hrtimer timeout for this box */
151 struct hrtimer hrtimer;
152 struct list_head list;
153 struct list_head active_list;
154 void __iomem *io_addr;
155 struct intel_uncore_extra_reg shared_regs[];
158 /* CFL uncore 8th cbox MSRs */
159 #define CFL_UNC_CBO_7_PERFEVTSEL0 0xf70
160 #define CFL_UNC_CBO_7_PER_CTR0 0xf76
162 #define UNCORE_BOX_FLAG_INITIATED 0
163 /* event config registers are 8-byte apart */
164 #define UNCORE_BOX_FLAG_CTL_OFFS8 1
165 /* CFL 8th CBOX has different MSR space */
166 #define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2
168 struct uncore_event_desc {
169 struct device_attribute attr;
173 struct freerunning_counters {
174 unsigned int counter_base;
175 unsigned int counter_offset;
176 unsigned int box_offset;
177 unsigned int num_counters;
179 unsigned *box_offsets;
182 struct uncore_iio_topology {
187 struct uncore_upi_topology {
193 struct intel_uncore_topology {
197 struct uncore_iio_topology *iio;
198 struct uncore_upi_topology *upi;
203 struct list_head list;
205 int pbus_to_dieid[256];
208 struct pci2phy_map *__find_pci2phy_map(int segment);
209 int uncore_pcibus_to_dieid(struct pci_bus *bus);
210 int uncore_die_to_segment(int die);
211 int uncore_device_to_die(struct pci_dev *dev);
213 ssize_t uncore_event_show(struct device *dev,
214 struct device_attribute *attr, char *buf);
216 static inline struct intel_uncore_pmu *dev_to_uncore_pmu(struct device *dev)
218 return container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu);
221 #define to_device_attribute(n) container_of(n, struct device_attribute, attr)
222 #define to_dev_ext_attribute(n) container_of(n, struct dev_ext_attribute, attr)
223 #define attr_to_ext_attr(n) to_dev_ext_attribute(to_device_attribute(n))
225 extern int __uncore_max_dies;
226 #define uncore_max_dies() (__uncore_max_dies)
228 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
230 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
234 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
235 static ssize_t __uncore_##_var##_show(struct device *dev, \
236 struct device_attribute *attr, \
239 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
240 return sprintf(page, _format "\n"); \
242 static struct device_attribute format_attr_##_var = \
243 __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
245 static inline bool uncore_pmc_fixed(int idx)
247 return idx == UNCORE_PMC_IDX_FIXED;
250 static inline bool uncore_pmc_freerunning(int idx)
252 return idx == UNCORE_PMC_IDX_FREERUNNING;
255 static inline bool uncore_mmio_is_valid_offset(struct intel_uncore_box *box,
256 unsigned long offset)
258 if (offset < box->pmu->type->mmio_map_size)
261 pr_warn_once("perf uncore: Invalid offset 0x%lx exceeds mapped area of %s.\n",
262 offset, box->pmu->type->name);
268 unsigned int uncore_mmio_box_ctl(struct intel_uncore_box *box)
270 return box->pmu->type->box_ctl +
271 box->pmu->type->mmio_offset * box->pmu->pmu_idx;
274 static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
276 return box->pmu->type->box_ctl;
279 static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
281 return box->pmu->type->fixed_ctl;
284 static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
286 return box->pmu->type->fixed_ctr;
290 unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
292 if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags))
293 return idx * 8 + box->pmu->type->event_ctl;
295 return idx * 4 + box->pmu->type->event_ctl;
299 unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
301 return idx * 8 + box->pmu->type->perf_ctr;
304 static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
306 struct intel_uncore_pmu *pmu = box->pmu;
307 return pmu->type->msr_offsets ?
308 pmu->type->msr_offsets[pmu->pmu_idx] :
309 pmu->type->msr_offset * pmu->pmu_idx;
312 static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
314 if (!box->pmu->type->box_ctl)
316 return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
319 static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
321 if (!box->pmu->type->fixed_ctl)
323 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
326 static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
328 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
333 * In the uncore document, there is no event-code assigned to free running
334 * counters. Some events need to be defined to indicate the free running
335 * counters. The events are encoded as event-code + umask-code.
337 * The event-code for all free running counters is 0xff, which is the same as
338 * the fixed counters.
340 * The umask-code is used to distinguish a fixed counter and a free running
341 * counter, and different types of free running counters.
342 * - For fixed counters, the umask-code is 0x0X.
343 * X indicates the index of the fixed counter, which starts from 0.
344 * - For free running counters, the umask-code uses the rest of the space.
345 * It would bare the format of 0xXY.
346 * X stands for the type of free running counters, which starts from 1.
347 * Y stands for the index of free running counters of same type, which
350 * For example, there are three types of IIO free running counters on Skylake
351 * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters.
352 * The event-code for all the free running counters is 0xff.
353 * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type,
354 * which umask-code starts from 0x10.
355 * So 'ioclk' is encoded as event=0xff,umask=0x10
356 * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is
357 * the second type, which umask-code starts from 0x20.
358 * So 'bw_in_port2' is encoded as event=0xff,umask=0x22
360 static inline unsigned int uncore_freerunning_idx(u64 config)
362 return ((config >> 8) & 0xf);
365 #define UNCORE_FREERUNNING_UMASK_START 0x10
367 static inline unsigned int uncore_freerunning_type(u64 config)
369 return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf);
373 unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
374 struct perf_event *event)
376 unsigned int type = uncore_freerunning_type(event->hw.config);
377 unsigned int idx = uncore_freerunning_idx(event->hw.config);
378 struct intel_uncore_pmu *pmu = box->pmu;
380 return pmu->type->freerunning[type].counter_base +
381 pmu->type->freerunning[type].counter_offset * idx +
382 (pmu->type->freerunning[type].box_offsets ?
383 pmu->type->freerunning[type].box_offsets[pmu->pmu_idx] :
384 pmu->type->freerunning[type].box_offset * pmu->pmu_idx);
388 unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
390 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
391 return CFL_UNC_CBO_7_PERFEVTSEL0 +
392 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
394 return box->pmu->type->event_ctl +
395 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
396 uncore_msr_box_offset(box);
401 unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
403 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
404 return CFL_UNC_CBO_7_PER_CTR0 +
405 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
407 return box->pmu->type->perf_ctr +
408 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
409 uncore_msr_box_offset(box);
414 unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
416 if (box->pci_dev || box->io_addr)
417 return uncore_pci_fixed_ctl(box);
419 return uncore_msr_fixed_ctl(box);
423 unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
425 if (box->pci_dev || box->io_addr)
426 return uncore_pci_fixed_ctr(box);
428 return uncore_msr_fixed_ctr(box);
432 unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
434 if (box->pci_dev || box->io_addr)
435 return uncore_pci_event_ctl(box, idx);
437 return uncore_msr_event_ctl(box, idx);
441 unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
443 if (box->pci_dev || box->io_addr)
444 return uncore_pci_perf_ctr(box, idx);
446 return uncore_msr_perf_ctr(box, idx);
449 static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
451 return box->pmu->type->perf_ctr_bits;
454 static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
456 return box->pmu->type->fixed_ctr_bits;
460 unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
461 struct perf_event *event)
463 unsigned int type = uncore_freerunning_type(event->hw.config);
465 return box->pmu->type->freerunning[type].bits;
468 static inline int uncore_num_freerunning(struct intel_uncore_box *box,
469 struct perf_event *event)
471 unsigned int type = uncore_freerunning_type(event->hw.config);
473 return box->pmu->type->freerunning[type].num_counters;
476 static inline int uncore_num_freerunning_types(struct intel_uncore_box *box,
477 struct perf_event *event)
479 return box->pmu->type->num_freerunning_types;
482 static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
483 struct perf_event *event)
485 unsigned int type = uncore_freerunning_type(event->hw.config);
486 unsigned int idx = uncore_freerunning_idx(event->hw.config);
488 return (type < uncore_num_freerunning_types(box, event)) &&
489 (idx < uncore_num_freerunning(box, event));
492 static inline int uncore_num_counters(struct intel_uncore_box *box)
494 return box->pmu->type->num_counters;
497 static inline bool is_freerunning_event(struct perf_event *event)
499 u64 cfg = event->attr.config;
501 return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) &&
502 (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START);
505 /* Check and reject invalid config */
506 static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
507 struct perf_event *event)
509 if (is_freerunning_event(event))
515 static inline void uncore_disable_event(struct intel_uncore_box *box,
516 struct perf_event *event)
518 box->pmu->type->ops->disable_event(box, event);
521 static inline void uncore_enable_event(struct intel_uncore_box *box,
522 struct perf_event *event)
524 box->pmu->type->ops->enable_event(box, event);
527 static inline u64 uncore_read_counter(struct intel_uncore_box *box,
528 struct perf_event *event)
530 return box->pmu->type->ops->read_counter(box, event);
533 static inline void uncore_box_init(struct intel_uncore_box *box)
535 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
536 if (box->pmu->type->ops->init_box)
537 box->pmu->type->ops->init_box(box);
541 static inline void uncore_box_exit(struct intel_uncore_box *box)
543 if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
544 if (box->pmu->type->ops->exit_box)
545 box->pmu->type->ops->exit_box(box);
549 static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
551 return (box->dieid < 0);
554 static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
556 return container_of(event->pmu, struct intel_uncore_pmu, pmu);
559 static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
561 return event->pmu_private;
564 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
565 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
566 void uncore_mmio_exit_box(struct intel_uncore_box *box);
567 u64 uncore_mmio_read_counter(struct intel_uncore_box *box,
568 struct perf_event *event);
569 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
570 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
571 void uncore_pmu_event_start(struct perf_event *event, int flags);
572 void uncore_pmu_event_stop(struct perf_event *event, int flags);
573 int uncore_pmu_event_add(struct perf_event *event, int flags);
574 void uncore_pmu_event_del(struct perf_event *event, int flags);
575 void uncore_pmu_event_read(struct perf_event *event);
576 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
577 struct event_constraint *
578 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
579 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
580 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
581 void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu);
583 extern struct intel_uncore_type *empty_uncore[];
584 extern struct intel_uncore_type **uncore_msr_uncores;
585 extern struct intel_uncore_type **uncore_pci_uncores;
586 extern struct intel_uncore_type **uncore_mmio_uncores;
587 extern struct pci_driver *uncore_pci_driver;
588 extern struct pci_driver *uncore_pci_sub_driver;
589 extern raw_spinlock_t pci2phy_map_lock;
590 extern struct list_head pci2phy_map_head;
591 extern struct pci_extra_dev *uncore_extra_pci_dev;
592 extern struct event_constraint uncore_constraint_empty;
595 int snb_uncore_pci_init(void);
596 int ivb_uncore_pci_init(void);
597 int hsw_uncore_pci_init(void);
598 int bdw_uncore_pci_init(void);
599 int skl_uncore_pci_init(void);
600 void snb_uncore_cpu_init(void);
601 void nhm_uncore_cpu_init(void);
602 void skl_uncore_cpu_init(void);
603 void icl_uncore_cpu_init(void);
604 void tgl_uncore_cpu_init(void);
605 void adl_uncore_cpu_init(void);
606 void tgl_uncore_mmio_init(void);
607 void tgl_l_uncore_mmio_init(void);
608 void adl_uncore_mmio_init(void);
609 int snb_pci2phy_map_init(int devid);
612 int snbep_uncore_pci_init(void);
613 void snbep_uncore_cpu_init(void);
614 int ivbep_uncore_pci_init(void);
615 void ivbep_uncore_cpu_init(void);
616 int hswep_uncore_pci_init(void);
617 void hswep_uncore_cpu_init(void);
618 int bdx_uncore_pci_init(void);
619 void bdx_uncore_cpu_init(void);
620 int knl_uncore_pci_init(void);
621 void knl_uncore_cpu_init(void);
622 int skx_uncore_pci_init(void);
623 void skx_uncore_cpu_init(void);
624 int snr_uncore_pci_init(void);
625 void snr_uncore_cpu_init(void);
626 void snr_uncore_mmio_init(void);
627 int icx_uncore_pci_init(void);
628 void icx_uncore_cpu_init(void);
629 void icx_uncore_mmio_init(void);
630 int spr_uncore_pci_init(void);
631 void spr_uncore_cpu_init(void);
632 void spr_uncore_mmio_init(void);
635 void nhmex_uncore_cpu_init(void);