2 * perf_event_intel_cstate.c: support cstate residency counters
4 * Copyright (C) 2015, Intel Corp.
5 * Author: Kan Liang (kan.liang@intel.com)
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Library General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Library General Public License for more details.
20 * This file export cstate related free running (read-only) counters
21 * for perf. These counters may be use simultaneously by other tools,
22 * such as turbostat. However, it still make sense to implement them
23 * in perf. Because we can conveniently collect them together with
24 * other events, and allow to use them from tools without special MSR
27 * The events only support system-wide mode counting. There is no
28 * sampling support because it is not supported by the hardware.
30 * According to counters' scope and category, two PMUs are registered
31 * with the perf_event core subsystem.
32 * - 'cstate_core': The counter is available for each physical core.
33 * The counters include CORE_C*_RESIDENCY.
34 * - 'cstate_pkg': The counter is available for each physical package.
35 * The counters include PKG_C*_RESIDENCY.
37 * All of these counters are specified in the IntelĀ® 64 and IA-32
38 * Architectures Software Developer.s Manual Vol3b.
40 * Model specific counters:
41 * MSR_CORE_C1_RES: CORE C1 Residency Counter
43 * Available model: SLM,AMT
44 * Scope: Core (each processor core has a MSR)
45 * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
47 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
49 * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
51 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
53 * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
55 * Available model: SNB,IVB,HSW,BDW,SKL
57 * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
59 * Available model: SNB,IVB,HSW,BDW,SKL
60 * Scope: Package (physical package)
61 * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
63 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
64 * Scope: Package (physical package)
65 * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
67 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
68 * Scope: Package (physical package)
69 * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
71 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
72 * Scope: Package (physical package)
73 * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
75 * Available model: HSW ULT only
76 * Scope: Package (physical package)
77 * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
79 * Available model: HSW ULT only
80 * Scope: Package (physical package)
81 * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
83 * Available model: HSW ULT only
84 * Scope: Package (physical package)
88 #include <linux/module.h>
89 #include <linux/slab.h>
90 #include <linux/perf_event.h>
91 #include <asm/cpu_device_id.h>
92 #include "../perf_event.h"
94 MODULE_LICENSE("GPL");
96 #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
97 static ssize_t __cstate_##_var##_show(struct kobject *kobj, \
98 struct kobj_attribute *attr, \
101 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
102 return sprintf(page, _format "\n"); \
104 static struct kobj_attribute format_attr_##_var = \
105 __ATTR(_name, 0444, __cstate_##_var##_show, NULL)
107 static ssize_t cstate_get_attr_cpumask(struct device *dev,
108 struct device_attribute *attr,
111 /* Model -> events mapping */
112 struct cstate_model {
113 unsigned long core_events;
114 unsigned long pkg_events;
115 unsigned long quirks;
119 #define SLM_PKG_C6_USE_C7_MSR (1UL << 0)
121 struct perf_cstate_msr {
123 struct perf_pmu_events_attr *attr;
127 /* cstate_core PMU */
128 static struct pmu cstate_core_pmu;
129 static bool has_cstate_core;
131 enum perf_cstate_core_events {
132 PERF_CSTATE_CORE_C1_RES = 0,
133 PERF_CSTATE_CORE_C3_RES,
134 PERF_CSTATE_CORE_C6_RES,
135 PERF_CSTATE_CORE_C7_RES,
137 PERF_CSTATE_CORE_EVENT_MAX,
140 PMU_EVENT_ATTR_STRING(c1-residency, evattr_cstate_core_c1, "event=0x00");
141 PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_core_c3, "event=0x01");
142 PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_core_c6, "event=0x02");
143 PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_core_c7, "event=0x03");
145 static struct perf_cstate_msr core_msr[] = {
146 [PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES, &evattr_cstate_core_c1 },
147 [PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY, &evattr_cstate_core_c3 },
148 [PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY, &evattr_cstate_core_c6 },
149 [PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY, &evattr_cstate_core_c7 },
152 static struct attribute *core_events_attrs[PERF_CSTATE_CORE_EVENT_MAX + 1] = {
156 static struct attribute_group core_events_attr_group = {
158 .attrs = core_events_attrs,
161 DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63");
162 static struct attribute *core_format_attrs[] = {
163 &format_attr_core_event.attr,
167 static struct attribute_group core_format_attr_group = {
169 .attrs = core_format_attrs,
172 static cpumask_t cstate_core_cpu_mask;
173 static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL);
175 static struct attribute *cstate_cpumask_attrs[] = {
176 &dev_attr_cpumask.attr,
180 static struct attribute_group cpumask_attr_group = {
181 .attrs = cstate_cpumask_attrs,
184 static const struct attribute_group *core_attr_groups[] = {
185 &core_events_attr_group,
186 &core_format_attr_group,
192 static struct pmu cstate_pkg_pmu;
193 static bool has_cstate_pkg;
195 enum perf_cstate_pkg_events {
196 PERF_CSTATE_PKG_C2_RES = 0,
197 PERF_CSTATE_PKG_C3_RES,
198 PERF_CSTATE_PKG_C6_RES,
199 PERF_CSTATE_PKG_C7_RES,
200 PERF_CSTATE_PKG_C8_RES,
201 PERF_CSTATE_PKG_C9_RES,
202 PERF_CSTATE_PKG_C10_RES,
204 PERF_CSTATE_PKG_EVENT_MAX,
207 PMU_EVENT_ATTR_STRING(c2-residency, evattr_cstate_pkg_c2, "event=0x00");
208 PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_pkg_c3, "event=0x01");
209 PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_pkg_c6, "event=0x02");
210 PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_pkg_c7, "event=0x03");
211 PMU_EVENT_ATTR_STRING(c8-residency, evattr_cstate_pkg_c8, "event=0x04");
212 PMU_EVENT_ATTR_STRING(c9-residency, evattr_cstate_pkg_c9, "event=0x05");
213 PMU_EVENT_ATTR_STRING(c10-residency, evattr_cstate_pkg_c10, "event=0x06");
215 static struct perf_cstate_msr pkg_msr[] = {
216 [PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY, &evattr_cstate_pkg_c2 },
217 [PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY, &evattr_cstate_pkg_c3 },
218 [PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY, &evattr_cstate_pkg_c6 },
219 [PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY, &evattr_cstate_pkg_c7 },
220 [PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY, &evattr_cstate_pkg_c8 },
221 [PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY, &evattr_cstate_pkg_c9 },
222 [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &evattr_cstate_pkg_c10 },
225 static struct attribute *pkg_events_attrs[PERF_CSTATE_PKG_EVENT_MAX + 1] = {
229 static struct attribute_group pkg_events_attr_group = {
231 .attrs = pkg_events_attrs,
234 DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63");
235 static struct attribute *pkg_format_attrs[] = {
236 &format_attr_pkg_event.attr,
239 static struct attribute_group pkg_format_attr_group = {
241 .attrs = pkg_format_attrs,
244 static cpumask_t cstate_pkg_cpu_mask;
246 static const struct attribute_group *pkg_attr_groups[] = {
247 &pkg_events_attr_group,
248 &pkg_format_attr_group,
253 static ssize_t cstate_get_attr_cpumask(struct device *dev,
254 struct device_attribute *attr,
257 struct pmu *pmu = dev_get_drvdata(dev);
259 if (pmu == &cstate_core_pmu)
260 return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask);
261 else if (pmu == &cstate_pkg_pmu)
262 return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask);
267 static int cstate_pmu_event_init(struct perf_event *event)
269 u64 cfg = event->attr.config;
272 if (event->attr.type != event->pmu->type)
275 /* unsupported modes and filters */
276 if (event->attr.exclude_user ||
277 event->attr.exclude_kernel ||
278 event->attr.exclude_hv ||
279 event->attr.exclude_idle ||
280 event->attr.exclude_host ||
281 event->attr.exclude_guest ||
282 event->attr.sample_period) /* no sampling */
288 if (event->pmu == &cstate_core_pmu) {
289 if (cfg >= PERF_CSTATE_CORE_EVENT_MAX)
291 if (!core_msr[cfg].attr)
293 event->hw.event_base = core_msr[cfg].msr;
294 cpu = cpumask_any_and(&cstate_core_cpu_mask,
295 topology_sibling_cpumask(event->cpu));
296 } else if (event->pmu == &cstate_pkg_pmu) {
297 if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
299 if (!pkg_msr[cfg].attr)
301 event->hw.event_base = pkg_msr[cfg].msr;
302 cpu = cpumask_any_and(&cstate_pkg_cpu_mask,
303 topology_core_cpumask(event->cpu));
308 if (cpu >= nr_cpu_ids)
312 event->hw.config = cfg;
317 static inline u64 cstate_pmu_read_counter(struct perf_event *event)
321 rdmsrl(event->hw.event_base, val);
325 static void cstate_pmu_event_update(struct perf_event *event)
327 struct hw_perf_event *hwc = &event->hw;
328 u64 prev_raw_count, new_raw_count;
331 prev_raw_count = local64_read(&hwc->prev_count);
332 new_raw_count = cstate_pmu_read_counter(event);
334 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
335 new_raw_count) != prev_raw_count)
338 local64_add(new_raw_count - prev_raw_count, &event->count);
341 static void cstate_pmu_event_start(struct perf_event *event, int mode)
343 local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event));
346 static void cstate_pmu_event_stop(struct perf_event *event, int mode)
348 cstate_pmu_event_update(event);
351 static void cstate_pmu_event_del(struct perf_event *event, int mode)
353 cstate_pmu_event_stop(event, PERF_EF_UPDATE);
356 static int cstate_pmu_event_add(struct perf_event *event, int mode)
358 if (mode & PERF_EF_START)
359 cstate_pmu_event_start(event, mode);
365 * Check if exiting cpu is the designated reader. If so migrate the
366 * events when there is a valid target available
368 static void cstate_cpu_exit(int cpu)
372 if (has_cstate_core &&
373 cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask)) {
375 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
376 /* Migrate events if there is a valid target */
377 if (target < nr_cpu_ids) {
378 cpumask_set_cpu(target, &cstate_core_cpu_mask);
379 perf_pmu_migrate_context(&cstate_core_pmu, cpu, target);
383 if (has_cstate_pkg &&
384 cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask)) {
386 target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
387 /* Migrate events if there is a valid target */
388 if (target < nr_cpu_ids) {
389 cpumask_set_cpu(target, &cstate_pkg_cpu_mask);
390 perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
395 static void cstate_cpu_init(int cpu)
400 * If this is the first online thread of that core, set it in
401 * the core cpu mask as the designated reader.
403 target = cpumask_any_and(&cstate_core_cpu_mask,
404 topology_sibling_cpumask(cpu));
406 if (has_cstate_core && target >= nr_cpu_ids)
407 cpumask_set_cpu(cpu, &cstate_core_cpu_mask);
410 * If this is the first online thread of that package, set it
411 * in the package cpu mask as the designated reader.
413 target = cpumask_any_and(&cstate_pkg_cpu_mask,
414 topology_core_cpumask(cpu));
415 if (has_cstate_pkg && target >= nr_cpu_ids)
416 cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
419 static int cstate_cpu_notifier(struct notifier_block *self,
420 unsigned long action, void *hcpu)
422 unsigned int cpu = (long)hcpu;
424 switch (action & ~CPU_TASKS_FROZEN) {
426 cstate_cpu_init(cpu);
428 case CPU_DOWN_PREPARE:
429 cstate_cpu_exit(cpu);
437 static struct notifier_block cstate_cpu_nb = {
438 .notifier_call = cstate_cpu_notifier,
439 .priority = CPU_PRI_PERF + 1,
442 static struct pmu cstate_core_pmu = {
443 .attr_groups = core_attr_groups,
444 .name = "cstate_core",
445 .task_ctx_nr = perf_invalid_context,
446 .event_init = cstate_pmu_event_init,
447 .add = cstate_pmu_event_add,
448 .del = cstate_pmu_event_del,
449 .start = cstate_pmu_event_start,
450 .stop = cstate_pmu_event_stop,
451 .read = cstate_pmu_event_update,
452 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
455 static struct pmu cstate_pkg_pmu = {
456 .attr_groups = pkg_attr_groups,
457 .name = "cstate_pkg",
458 .task_ctx_nr = perf_invalid_context,
459 .event_init = cstate_pmu_event_init,
460 .add = cstate_pmu_event_add,
461 .del = cstate_pmu_event_del,
462 .start = cstate_pmu_event_start,
463 .stop = cstate_pmu_event_stop,
464 .read = cstate_pmu_event_update,
465 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
468 static const struct cstate_model nhm_cstates __initconst = {
469 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) |
470 BIT(PERF_CSTATE_CORE_C6_RES),
472 .pkg_events = BIT(PERF_CSTATE_PKG_C3_RES) |
473 BIT(PERF_CSTATE_PKG_C6_RES) |
474 BIT(PERF_CSTATE_PKG_C7_RES),
477 static const struct cstate_model snb_cstates __initconst = {
478 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) |
479 BIT(PERF_CSTATE_CORE_C6_RES) |
480 BIT(PERF_CSTATE_CORE_C7_RES),
482 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
483 BIT(PERF_CSTATE_PKG_C3_RES) |
484 BIT(PERF_CSTATE_PKG_C6_RES) |
485 BIT(PERF_CSTATE_PKG_C7_RES),
488 static const struct cstate_model hswult_cstates __initconst = {
489 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) |
490 BIT(PERF_CSTATE_CORE_C6_RES) |
491 BIT(PERF_CSTATE_CORE_C7_RES),
493 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
494 BIT(PERF_CSTATE_PKG_C3_RES) |
495 BIT(PERF_CSTATE_PKG_C6_RES) |
496 BIT(PERF_CSTATE_PKG_C7_RES) |
497 BIT(PERF_CSTATE_PKG_C8_RES) |
498 BIT(PERF_CSTATE_PKG_C9_RES) |
499 BIT(PERF_CSTATE_PKG_C10_RES),
502 static const struct cstate_model slm_cstates __initconst = {
503 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
504 BIT(PERF_CSTATE_CORE_C6_RES),
506 .pkg_events = BIT(PERF_CSTATE_PKG_C6_RES),
507 .quirks = SLM_PKG_C6_USE_C7_MSR,
510 #define X86_CSTATES_MODEL(model, states) \
511 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
513 static const struct x86_cpu_id intel_cstates_match[] __initconst = {
514 X86_CSTATES_MODEL(30, nhm_cstates), /* 45nm Nehalem */
515 X86_CSTATES_MODEL(26, nhm_cstates), /* 45nm Nehalem-EP */
516 X86_CSTATES_MODEL(46, nhm_cstates), /* 45nm Nehalem-EX */
518 X86_CSTATES_MODEL(37, nhm_cstates), /* 32nm Westmere */
519 X86_CSTATES_MODEL(44, nhm_cstates), /* 32nm Westmere-EP */
520 X86_CSTATES_MODEL(47, nhm_cstates), /* 32nm Westmere-EX */
522 X86_CSTATES_MODEL(42, snb_cstates), /* 32nm SandyBridge */
523 X86_CSTATES_MODEL(45, snb_cstates), /* 32nm SandyBridge-E/EN/EP */
525 X86_CSTATES_MODEL(58, snb_cstates), /* 22nm IvyBridge */
526 X86_CSTATES_MODEL(62, snb_cstates), /* 22nm IvyBridge-EP/EX */
528 X86_CSTATES_MODEL(60, snb_cstates), /* 22nm Haswell Core */
529 X86_CSTATES_MODEL(63, snb_cstates), /* 22nm Haswell Server */
530 X86_CSTATES_MODEL(70, snb_cstates), /* 22nm Haswell + GT3e */
532 X86_CSTATES_MODEL(69, hswult_cstates), /* 22nm Haswell ULT */
534 X86_CSTATES_MODEL(55, slm_cstates), /* 22nm Atom Silvermont */
535 X86_CSTATES_MODEL(77, slm_cstates), /* 22nm Atom Avoton/Rangely */
536 X86_CSTATES_MODEL(76, slm_cstates), /* 22nm Atom Airmont */
538 X86_CSTATES_MODEL(61, snb_cstates), /* 14nm Broadwell Core-M */
539 X86_CSTATES_MODEL(86, snb_cstates), /* 14nm Broadwell Xeon D */
540 X86_CSTATES_MODEL(71, snb_cstates), /* 14nm Broadwell + GT3e */
541 X86_CSTATES_MODEL(79, snb_cstates), /* 14nm Broadwell Server */
543 X86_CSTATES_MODEL(78, snb_cstates), /* 14nm Skylake Mobile */
544 X86_CSTATES_MODEL(94, snb_cstates), /* 14nm Skylake Desktop */
547 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
550 * Probe the cstate events and insert the available one into sysfs attrs
551 * Return false if there are no available events.
553 static bool __init cstate_probe_msr(const unsigned long evmsk, int max,
554 struct perf_cstate_msr *msr,
555 struct attribute **attrs)
561 for (bit = 0; bit < max; bit++) {
562 if (test_bit(bit, &evmsk) && !rdmsrl_safe(msr[bit].msr, &val)) {
563 *attrs++ = &msr[bit].attr->attr.attr;
566 msr[bit].attr = NULL;
574 static int __init cstate_probe(const struct cstate_model *cm)
576 /* SLM has different MSR for PKG C6 */
577 if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
578 pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
580 has_cstate_core = cstate_probe_msr(cm->core_events,
581 PERF_CSTATE_CORE_EVENT_MAX,
582 core_msr, core_events_attrs);
584 has_cstate_pkg = cstate_probe_msr(cm->pkg_events,
585 PERF_CSTATE_PKG_EVENT_MAX,
586 pkg_msr, pkg_events_attrs);
588 return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV;
591 static inline void cstate_cleanup(void)
594 perf_pmu_unregister(&cstate_core_pmu);
597 perf_pmu_unregister(&cstate_pkg_pmu);
600 static int __init cstate_init(void)
604 cpu_notifier_register_begin();
605 for_each_online_cpu(cpu)
606 cstate_cpu_init(cpu);
608 if (has_cstate_core) {
609 err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
611 has_cstate_core = false;
612 pr_info("Failed to register cstate core pmu\n");
617 if (has_cstate_pkg) {
618 err = perf_pmu_register(&cstate_pkg_pmu, cstate_pkg_pmu.name, -1);
620 has_cstate_pkg = false;
621 pr_info("Failed to register cstate pkg pmu\n");
626 __register_cpu_notifier(&cstate_cpu_nb);
628 cpu_notifier_register_done();
632 static int __init cstate_pmu_init(void)
634 const struct x86_cpu_id *id;
637 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
640 id = x86_match_cpu(intel_cstates_match);
644 err = cstate_probe((const struct cstate_model *) id->driver_data);
648 return cstate_init();
650 module_init(cstate_pmu_init);
652 static void __exit cstate_pmu_exit(void)
654 cpu_notifier_register_begin();
655 __unregister_cpu_notifier(&cstate_cpu_nb);
657 cpu_notifier_register_done();
659 module_exit(cstate_pmu_exit);