2 * SPDX-License-Identifier: MIT
4 * Copyright © 2017-2018 Intel Corporation
8 #include <linux/pm_runtime.h>
10 #include "intel_ringbuffer.h"
13 /* Frequency for the sampling timer for events which need it. */
15 #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
17 #define ENGINE_SAMPLE_MASK \
18 (BIT(I915_SAMPLE_BUSY) | \
19 BIT(I915_SAMPLE_WAIT) | \
20 BIT(I915_SAMPLE_SEMA))
22 #define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
24 static cpumask_t i915_pmu_cpumask;
26 static u8 engine_config_sample(u64 config)
28 return config & I915_PMU_SAMPLE_MASK;
31 static u8 engine_event_sample(struct perf_event *event)
33 return engine_config_sample(event->attr.config);
36 static u8 engine_event_class(struct perf_event *event)
38 return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
41 static u8 engine_event_instance(struct perf_event *event)
43 return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
46 static bool is_engine_config(u64 config)
48 return config < __I915_PMU_OTHER(0);
51 static unsigned int config_enabled_bit(u64 config)
53 if (is_engine_config(config))
54 return engine_config_sample(config);
56 return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
59 static u64 config_enabled_mask(u64 config)
61 return BIT_ULL(config_enabled_bit(config));
64 static bool is_engine_event(struct perf_event *event)
66 return is_engine_config(event->attr.config);
69 static unsigned int event_enabled_bit(struct perf_event *event)
71 return config_enabled_bit(event->attr.config);
74 static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
79 * Only some counters need the sampling timer.
81 * We start with a bitmask of all currently enabled events.
83 enable = i915->pmu.enable;
86 * Mask out all the ones which do not need the timer, or in
87 * other words keep all the ones that could need the timer.
89 enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
90 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) |
94 * When the GPU is idle per-engine counters do not need to be
95 * running so clear those bits out.
98 enable &= ~ENGINE_SAMPLE_MASK;
100 * Also there is software busyness tracking available we do not
101 * need the timer for I915_SAMPLE_BUSY counter.
103 * Use RCS as proxy for all engines.
105 else if (intel_engine_supports_stats(i915->engine[RCS0]))
106 enable &= ~BIT(I915_SAMPLE_BUSY);
109 * If some bits remain it means we need the sampling timer running.
114 void i915_pmu_gt_parked(struct drm_i915_private *i915)
116 if (!i915->pmu.base.event_init)
119 spin_lock_irq(&i915->pmu.lock);
121 * Signal sampling timer to stop if only engine events are enabled and
124 i915->pmu.timer_enabled = pmu_needs_timer(i915, false);
125 spin_unlock_irq(&i915->pmu.lock);
128 static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
130 if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
131 i915->pmu.timer_enabled = true;
132 i915->pmu.timer_last = ktime_get();
133 hrtimer_start_range_ns(&i915->pmu.timer,
134 ns_to_ktime(PERIOD), 0,
135 HRTIMER_MODE_REL_PINNED);
139 void i915_pmu_gt_unparked(struct drm_i915_private *i915)
141 if (!i915->pmu.base.event_init)
144 spin_lock_irq(&i915->pmu.lock);
146 * Re-enable sampling timer when GPU goes active.
148 __i915_pmu_maybe_start_timer(i915);
149 spin_unlock_irq(&i915->pmu.lock);
153 add_sample(struct i915_pmu_sample *sample, u32 val)
159 engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
161 struct intel_engine_cs *engine;
162 enum intel_engine_id id;
163 intel_wakeref_t wakeref;
166 if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
170 if (READ_ONCE(dev_priv->gt.awake))
171 wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
175 spin_lock_irqsave(&dev_priv->uncore.lock, flags);
176 for_each_engine(engine, dev_priv, id) {
177 struct intel_engine_pmu *pmu = &engine->pmu;
181 val = I915_READ_FW(RING_CTL(engine->mmio_base));
182 if (val == 0) /* powerwell off => engine idle */
186 add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
187 if (val & RING_WAIT_SEMAPHORE)
188 add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
191 * While waiting on a semaphore or event, MI_MODE reports the
192 * ring as idle. However, previously using the seqno, and with
193 * execlists sampling, we account for the ring waiting as the
194 * engine being busy. Therefore, we record the sample as being
195 * busy if either waiting or !idle.
197 busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
199 val = I915_READ_FW(RING_MI_MODE(engine->mmio_base));
200 busy = !(val & MODE_IDLE);
203 add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
205 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
207 intel_runtime_pm_put(dev_priv, wakeref);
211 add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
213 sample->cur += mul_u32_u32(val, mul);
217 frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
219 if (dev_priv->pmu.enable &
220 config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
223 val = dev_priv->gt_pm.rps.cur_freq;
224 if (dev_priv->gt.awake) {
225 intel_wakeref_t wakeref;
227 with_intel_runtime_pm_if_in_use(dev_priv, wakeref)
228 val = intel_get_cagf(dev_priv,
229 I915_READ_NOTRACE(GEN6_RPSTAT1));
232 add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
233 intel_gpu_freq(dev_priv, val),
237 if (dev_priv->pmu.enable &
238 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
239 add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ],
240 intel_gpu_freq(dev_priv,
241 dev_priv->gt_pm.rps.cur_freq),
246 static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
248 struct drm_i915_private *i915 =
249 container_of(hrtimer, struct drm_i915_private, pmu.timer);
250 unsigned int period_ns;
253 if (!READ_ONCE(i915->pmu.timer_enabled))
254 return HRTIMER_NORESTART;
257 period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last));
258 i915->pmu.timer_last = now;
261 * Strictly speaking the passed in period may not be 100% accurate for
262 * all internal calculation, since some amount of time can be spent on
263 * grabbing the forcewake. However the potential error from timer call-
264 * back delay greatly dominates this so we keep it simple.
266 engines_sample(i915, period_ns);
267 frequency_sample(i915, period_ns);
269 hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
271 return HRTIMER_RESTART;
274 static u64 count_interrupts(struct drm_i915_private *i915)
276 /* open-coded kstat_irqs() */
277 struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
281 if (!desc || !desc->kstat_irqs)
284 for_each_possible_cpu(cpu)
285 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
290 static void engine_event_destroy(struct perf_event *event)
292 struct drm_i915_private *i915 =
293 container_of(event->pmu, typeof(*i915), pmu.base);
294 struct intel_engine_cs *engine;
296 engine = intel_engine_lookup_user(i915,
297 engine_event_class(event),
298 engine_event_instance(event));
299 if (WARN_ON_ONCE(!engine))
302 if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
303 intel_engine_supports_stats(engine))
304 intel_disable_engine_stats(engine);
307 static void i915_pmu_event_destroy(struct perf_event *event)
309 WARN_ON(event->parent);
311 if (is_engine_event(event))
312 engine_event_destroy(event);
316 engine_event_status(struct intel_engine_cs *engine,
317 enum drm_i915_pmu_engine_sample sample)
320 case I915_SAMPLE_BUSY:
321 case I915_SAMPLE_WAIT:
323 case I915_SAMPLE_SEMA:
324 if (INTEL_GEN(engine->i915) < 6)
335 config_status(struct drm_i915_private *i915, u64 config)
338 case I915_PMU_ACTUAL_FREQUENCY:
339 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
340 /* Requires a mutex for sampling! */
343 case I915_PMU_REQUESTED_FREQUENCY:
344 if (INTEL_GEN(i915) < 6)
347 case I915_PMU_INTERRUPTS:
349 case I915_PMU_RC6_RESIDENCY:
360 static int engine_event_init(struct perf_event *event)
362 struct drm_i915_private *i915 =
363 container_of(event->pmu, typeof(*i915), pmu.base);
364 struct intel_engine_cs *engine;
368 engine = intel_engine_lookup_user(i915, engine_event_class(event),
369 engine_event_instance(event));
373 sample = engine_event_sample(event);
374 ret = engine_event_status(engine, sample);
378 if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine))
379 ret = intel_enable_engine_stats(engine);
384 static int i915_pmu_event_init(struct perf_event *event)
386 struct drm_i915_private *i915 =
387 container_of(event->pmu, typeof(*i915), pmu.base);
390 if (event->attr.type != event->pmu->type)
393 /* unsupported modes and filters */
394 if (event->attr.sample_period) /* no sampling */
397 if (has_branch_stack(event))
403 /* only allow running on one cpu at a time */
404 if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask))
407 if (is_engine_event(event))
408 ret = engine_event_init(event);
410 ret = config_status(i915, event->attr.config);
415 event->destroy = i915_pmu_event_destroy;
420 static u64 __get_rc6(struct drm_i915_private *i915)
424 val = intel_rc6_residency_ns(i915,
425 IS_VALLEYVIEW(i915) ?
430 val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
433 val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
438 static u64 get_rc6(struct drm_i915_private *i915)
440 #if IS_ENABLED(CONFIG_PM)
441 intel_wakeref_t wakeref;
445 wakeref = intel_runtime_pm_get_if_in_use(i915);
447 val = __get_rc6(i915);
448 intel_runtime_pm_put(i915, wakeref);
451 * If we are coming back from being runtime suspended we must
452 * be careful not to report a larger value than returned
456 spin_lock_irqsave(&i915->pmu.lock, flags);
458 if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
459 i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
460 i915->pmu.sample[__I915_SAMPLE_RC6].cur = val;
462 val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
465 spin_unlock_irqrestore(&i915->pmu.lock, flags);
467 struct pci_dev *pdev = i915->drm.pdev;
468 struct device *kdev = &pdev->dev;
471 * We are runtime suspended.
473 * Report the delta from when the device was suspended to now,
474 * on top of the last known real value, as the approximated RC6
477 spin_lock_irqsave(&i915->pmu.lock, flags);
480 * After the above branch intel_runtime_pm_get_if_in_use failed
481 * to get the runtime PM reference we cannot assume we are in
482 * runtime suspend since we can either: a) race with coming out
483 * of it before we took the power.lock, or b) there are other
484 * states than suspended which can bring us here.
486 * We need to double-check that we are indeed currently runtime
487 * suspended and if not we cannot do better than report the last
490 if (pm_runtime_status_suspended(kdev)) {
491 val = pm_runtime_suspended_time(kdev);
493 if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
494 i915->pmu.suspended_time_last = val;
496 val -= i915->pmu.suspended_time_last;
497 val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
499 i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
500 } else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
501 val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
503 val = i915->pmu.sample[__I915_SAMPLE_RC6].cur;
506 spin_unlock_irqrestore(&i915->pmu.lock, flags);
511 return __get_rc6(i915);
515 static u64 __i915_pmu_event_read(struct perf_event *event)
517 struct drm_i915_private *i915 =
518 container_of(event->pmu, typeof(*i915), pmu.base);
521 if (is_engine_event(event)) {
522 u8 sample = engine_event_sample(event);
523 struct intel_engine_cs *engine;
525 engine = intel_engine_lookup_user(i915,
526 engine_event_class(event),
527 engine_event_instance(event));
529 if (WARN_ON_ONCE(!engine)) {
531 } else if (sample == I915_SAMPLE_BUSY &&
532 intel_engine_supports_stats(engine)) {
533 val = ktime_to_ns(intel_engine_get_busy_time(engine));
535 val = engine->pmu.sample[sample].cur;
538 switch (event->attr.config) {
539 case I915_PMU_ACTUAL_FREQUENCY:
541 div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
542 USEC_PER_SEC /* to MHz */);
544 case I915_PMU_REQUESTED_FREQUENCY:
546 div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
547 USEC_PER_SEC /* to MHz */);
549 case I915_PMU_INTERRUPTS:
550 val = count_interrupts(i915);
552 case I915_PMU_RC6_RESIDENCY:
561 static void i915_pmu_event_read(struct perf_event *event)
563 struct hw_perf_event *hwc = &event->hw;
567 prev = local64_read(&hwc->prev_count);
568 new = __i915_pmu_event_read(event);
570 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
573 local64_add(new - prev, &event->count);
576 static void i915_pmu_enable(struct perf_event *event)
578 struct drm_i915_private *i915 =
579 container_of(event->pmu, typeof(*i915), pmu.base);
580 unsigned int bit = event_enabled_bit(event);
583 spin_lock_irqsave(&i915->pmu.lock, flags);
586 * Update the bitmask of enabled events and increment
587 * the event reference counter.
589 BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS);
590 GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
591 GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
592 i915->pmu.enable |= BIT_ULL(bit);
593 i915->pmu.enable_count[bit]++;
596 * Start the sampling timer if needed and not already enabled.
598 __i915_pmu_maybe_start_timer(i915);
601 * For per-engine events the bitmask and reference counting
602 * is stored per engine.
604 if (is_engine_event(event)) {
605 u8 sample = engine_event_sample(event);
606 struct intel_engine_cs *engine;
608 engine = intel_engine_lookup_user(i915,
609 engine_event_class(event),
610 engine_event_instance(event));
612 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) !=
613 I915_ENGINE_SAMPLE_COUNT);
614 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) !=
615 I915_ENGINE_SAMPLE_COUNT);
616 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
617 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
618 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
620 engine->pmu.enable |= BIT(sample);
621 engine->pmu.enable_count[sample]++;
624 spin_unlock_irqrestore(&i915->pmu.lock, flags);
627 * Store the current counter value so we can report the correct delta
628 * for all listeners. Even when the event was already enabled and has
629 * an existing non-zero value.
631 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
634 static void i915_pmu_disable(struct perf_event *event)
636 struct drm_i915_private *i915 =
637 container_of(event->pmu, typeof(*i915), pmu.base);
638 unsigned int bit = event_enabled_bit(event);
641 spin_lock_irqsave(&i915->pmu.lock, flags);
643 if (is_engine_event(event)) {
644 u8 sample = engine_event_sample(event);
645 struct intel_engine_cs *engine;
647 engine = intel_engine_lookup_user(i915,
648 engine_event_class(event),
649 engine_event_instance(event));
651 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
652 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
653 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
656 * Decrement the reference count and clear the enabled
657 * bitmask when the last listener on an event goes away.
659 if (--engine->pmu.enable_count[sample] == 0)
660 engine->pmu.enable &= ~BIT(sample);
663 GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
664 GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
666 * Decrement the reference count and clear the enabled
667 * bitmask when the last listener on an event goes away.
669 if (--i915->pmu.enable_count[bit] == 0) {
670 i915->pmu.enable &= ~BIT_ULL(bit);
671 i915->pmu.timer_enabled &= pmu_needs_timer(i915, true);
674 spin_unlock_irqrestore(&i915->pmu.lock, flags);
677 static void i915_pmu_event_start(struct perf_event *event, int flags)
679 i915_pmu_enable(event);
683 static void i915_pmu_event_stop(struct perf_event *event, int flags)
685 if (flags & PERF_EF_UPDATE)
686 i915_pmu_event_read(event);
687 i915_pmu_disable(event);
688 event->hw.state = PERF_HES_STOPPED;
691 static int i915_pmu_event_add(struct perf_event *event, int flags)
693 if (flags & PERF_EF_START)
694 i915_pmu_event_start(event, flags);
699 static void i915_pmu_event_del(struct perf_event *event, int flags)
701 i915_pmu_event_stop(event, PERF_EF_UPDATE);
704 static int i915_pmu_event_event_idx(struct perf_event *event)
709 struct i915_str_attribute {
710 struct device_attribute attr;
714 static ssize_t i915_pmu_format_show(struct device *dev,
715 struct device_attribute *attr, char *buf)
717 struct i915_str_attribute *eattr;
719 eattr = container_of(attr, struct i915_str_attribute, attr);
720 return sprintf(buf, "%s\n", eattr->str);
723 #define I915_PMU_FORMAT_ATTR(_name, _config) \
724 (&((struct i915_str_attribute[]) { \
725 { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
729 static struct attribute *i915_pmu_format_attrs[] = {
730 I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
734 static const struct attribute_group i915_pmu_format_attr_group = {
736 .attrs = i915_pmu_format_attrs,
739 struct i915_ext_attribute {
740 struct device_attribute attr;
744 static ssize_t i915_pmu_event_show(struct device *dev,
745 struct device_attribute *attr, char *buf)
747 struct i915_ext_attribute *eattr;
749 eattr = container_of(attr, struct i915_ext_attribute, attr);
750 return sprintf(buf, "config=0x%lx\n", eattr->val);
753 static struct attribute_group i915_pmu_events_attr_group = {
755 /* Patch in attrs at runtime. */
759 i915_pmu_get_attr_cpumask(struct device *dev,
760 struct device_attribute *attr,
763 return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
766 static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL);
768 static struct attribute *i915_cpumask_attrs[] = {
769 &dev_attr_cpumask.attr,
773 static const struct attribute_group i915_pmu_cpumask_attr_group = {
774 .attrs = i915_cpumask_attrs,
777 static const struct attribute_group *i915_pmu_attr_groups[] = {
778 &i915_pmu_format_attr_group,
779 &i915_pmu_events_attr_group,
780 &i915_pmu_cpumask_attr_group,
784 #define __event(__config, __name, __unit) \
786 .config = (__config), \
791 #define __engine_event(__sample, __name) \
793 .sample = (__sample), \
797 static struct i915_ext_attribute *
798 add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config)
800 sysfs_attr_init(&attr->attr.attr);
801 attr->attr.attr.name = name;
802 attr->attr.attr.mode = 0444;
803 attr->attr.show = i915_pmu_event_show;
809 static struct perf_pmu_events_attr *
810 add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
813 sysfs_attr_init(&attr->attr.attr);
814 attr->attr.attr.name = name;
815 attr->attr.attr.mode = 0444;
816 attr->attr.show = perf_event_sysfs_show;
817 attr->event_str = str;
822 static struct attribute **
823 create_event_attributes(struct drm_i915_private *i915)
825 static const struct {
830 __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"),
831 __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"),
832 __event(I915_PMU_INTERRUPTS, "interrupts", NULL),
833 __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
835 static const struct {
836 enum drm_i915_pmu_engine_sample sample;
838 } engine_events[] = {
839 __engine_event(I915_SAMPLE_BUSY, "busy"),
840 __engine_event(I915_SAMPLE_SEMA, "sema"),
841 __engine_event(I915_SAMPLE_WAIT, "wait"),
843 unsigned int count = 0;
844 struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter;
845 struct i915_ext_attribute *i915_attr = NULL, *i915_iter;
846 struct attribute **attr = NULL, **attr_iter;
847 struct intel_engine_cs *engine;
848 enum intel_engine_id id;
851 /* Count how many counters we will be exposing. */
852 for (i = 0; i < ARRAY_SIZE(events); i++) {
853 if (!config_status(i915, events[i].config))
857 for_each_engine(engine, i915, id) {
858 for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
859 if (!engine_event_status(engine,
860 engine_events[i].sample))
865 /* Allocate attribute objects and table. */
866 i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL);
870 pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL);
874 /* Max one pointer of each attribute type plus a termination entry. */
875 attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL);
879 i915_iter = i915_attr;
883 /* Initialize supported non-engine counters. */
884 for (i = 0; i < ARRAY_SIZE(events); i++) {
887 if (config_status(i915, events[i].config))
890 str = kstrdup(events[i].name, GFP_KERNEL);
894 *attr_iter++ = &i915_iter->attr.attr;
895 i915_iter = add_i915_attr(i915_iter, str, events[i].config);
897 if (events[i].unit) {
898 str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name);
902 *attr_iter++ = &pmu_iter->attr.attr;
903 pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit);
907 /* Initialize supported engine counters. */
908 for_each_engine(engine, i915, id) {
909 for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
912 if (engine_event_status(engine,
913 engine_events[i].sample))
916 str = kasprintf(GFP_KERNEL, "%s-%s",
917 engine->name, engine_events[i].name);
921 *attr_iter++ = &i915_iter->attr.attr;
923 add_i915_attr(i915_iter, str,
924 __I915_PMU_ENGINE(engine->uabi_class,
926 engine_events[i].sample));
928 str = kasprintf(GFP_KERNEL, "%s-%s.unit",
929 engine->name, engine_events[i].name);
933 *attr_iter++ = &pmu_iter->attr.attr;
934 pmu_iter = add_pmu_attr(pmu_iter, str, "ns");
938 i915->pmu.i915_attr = i915_attr;
939 i915->pmu.pmu_attr = pmu_attr;
944 for (attr_iter = attr; *attr_iter; attr_iter++)
945 kfree((*attr_iter)->name);
955 static void free_event_attributes(struct drm_i915_private *i915)
957 struct attribute **attr_iter = i915_pmu_events_attr_group.attrs;
959 for (; *attr_iter; attr_iter++)
960 kfree((*attr_iter)->name);
962 kfree(i915_pmu_events_attr_group.attrs);
963 kfree(i915->pmu.i915_attr);
964 kfree(i915->pmu.pmu_attr);
966 i915_pmu_events_attr_group.attrs = NULL;
967 i915->pmu.i915_attr = NULL;
968 i915->pmu.pmu_attr = NULL;
971 static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
973 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
975 GEM_BUG_ON(!pmu->base.event_init);
977 /* Select the first online CPU as a designated reader. */
978 if (!cpumask_weight(&i915_pmu_cpumask))
979 cpumask_set_cpu(cpu, &i915_pmu_cpumask);
984 static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
986 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
989 GEM_BUG_ON(!pmu->base.event_init);
991 if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
992 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
993 /* Migrate events if there is a valid target */
994 if (target < nr_cpu_ids) {
995 cpumask_set_cpu(target, &i915_pmu_cpumask);
996 perf_pmu_migrate_context(&pmu->base, cpu, target);
1003 static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
1005 static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
1007 enum cpuhp_state slot;
1010 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
1011 "perf/x86/intel/i915:online",
1012 i915_pmu_cpu_online,
1013 i915_pmu_cpu_offline);
1018 ret = cpuhp_state_add_instance(slot, &i915->pmu.node);
1020 cpuhp_remove_multi_state(slot);
1028 static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
1030 WARN_ON(cpuhp_slot == CPUHP_INVALID);
1031 WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node));
1032 cpuhp_remove_multi_state(cpuhp_slot);
1035 void i915_pmu_register(struct drm_i915_private *i915)
1039 if (INTEL_GEN(i915) <= 2) {
1040 DRM_INFO("PMU not supported for this GPU.");
1044 i915_pmu_events_attr_group.attrs = create_event_attributes(i915);
1045 if (!i915_pmu_events_attr_group.attrs) {
1050 i915->pmu.base.attr_groups = i915_pmu_attr_groups;
1051 i915->pmu.base.task_ctx_nr = perf_invalid_context;
1052 i915->pmu.base.event_init = i915_pmu_event_init;
1053 i915->pmu.base.add = i915_pmu_event_add;
1054 i915->pmu.base.del = i915_pmu_event_del;
1055 i915->pmu.base.start = i915_pmu_event_start;
1056 i915->pmu.base.stop = i915_pmu_event_stop;
1057 i915->pmu.base.read = i915_pmu_event_read;
1058 i915->pmu.base.event_idx = i915_pmu_event_event_idx;
1060 spin_lock_init(&i915->pmu.lock);
1061 hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1062 i915->pmu.timer.function = i915_sample;
1064 ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
1068 ret = i915_pmu_register_cpuhp_state(i915);
1075 perf_pmu_unregister(&i915->pmu.base);
1077 i915->pmu.base.event_init = NULL;
1078 free_event_attributes(i915);
1079 DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
1082 void i915_pmu_unregister(struct drm_i915_private *i915)
1084 if (!i915->pmu.base.event_init)
1087 WARN_ON(i915->pmu.enable);
1089 hrtimer_cancel(&i915->pmu.timer);
1091 i915_pmu_unregister_cpuhp_state(i915);
1093 perf_pmu_unregister(&i915->pmu.base);
1094 i915->pmu.base.event_init = NULL;
1095 free_event_attributes(i915);