2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/perf_event.h>
26 #include <linux/pm_runtime.h>
30 #include "intel_ringbuffer.h"
32 /* Frequency for the sampling timer for events which need it. */
34 #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
36 #define ENGINE_SAMPLE_MASK \
37 (BIT(I915_SAMPLE_BUSY) | \
38 BIT(I915_SAMPLE_WAIT) | \
39 BIT(I915_SAMPLE_SEMA))
41 #define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
43 static cpumask_t i915_pmu_cpumask = CPU_MASK_NONE;
45 static u8 engine_config_sample(u64 config)
47 return config & I915_PMU_SAMPLE_MASK;
50 static u8 engine_event_sample(struct perf_event *event)
52 return engine_config_sample(event->attr.config);
55 static u8 engine_event_class(struct perf_event *event)
57 return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
60 static u8 engine_event_instance(struct perf_event *event)
62 return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
65 static bool is_engine_config(u64 config)
67 return config < __I915_PMU_OTHER(0);
70 static unsigned int config_enabled_bit(u64 config)
72 if (is_engine_config(config))
73 return engine_config_sample(config);
75 return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
78 static u64 config_enabled_mask(u64 config)
80 return BIT_ULL(config_enabled_bit(config));
83 static bool is_engine_event(struct perf_event *event)
85 return is_engine_config(event->attr.config);
88 static unsigned int event_enabled_bit(struct perf_event *event)
90 return config_enabled_bit(event->attr.config);
93 static bool supports_busy_stats(struct drm_i915_private *i915)
95 return INTEL_GEN(i915) >= 8;
98 static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
103 * Only some counters need the sampling timer.
105 * We start with a bitmask of all currently enabled events.
107 enable = i915->pmu.enable;
110 * Mask out all the ones which do not need the timer, or in
111 * other words keep all the ones that could need the timer.
113 enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
114 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) |
118 * When the GPU is idle per-engine counters do not need to be
119 * running so clear those bits out.
122 enable &= ~ENGINE_SAMPLE_MASK;
124 * Also there is software busyness tracking available we do not
125 * need the timer for I915_SAMPLE_BUSY counter.
127 else if (supports_busy_stats(i915))
128 enable &= ~BIT(I915_SAMPLE_BUSY);
131 * If some bits remain it means we need the sampling timer running.
136 void i915_pmu_gt_parked(struct drm_i915_private *i915)
138 if (!i915->pmu.base.event_init)
141 spin_lock_irq(&i915->pmu.lock);
143 * Signal sampling timer to stop if only engine events are enabled and
146 i915->pmu.timer_enabled = pmu_needs_timer(i915, false);
147 spin_unlock_irq(&i915->pmu.lock);
150 static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
152 if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
153 i915->pmu.timer_enabled = true;
154 hrtimer_start_range_ns(&i915->pmu.timer,
155 ns_to_ktime(PERIOD), 0,
156 HRTIMER_MODE_REL_PINNED);
160 void i915_pmu_gt_unparked(struct drm_i915_private *i915)
162 if (!i915->pmu.base.event_init)
165 spin_lock_irq(&i915->pmu.lock);
167 * Re-enable sampling timer when GPU goes active.
169 __i915_pmu_maybe_start_timer(i915);
170 spin_unlock_irq(&i915->pmu.lock);
173 static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
176 intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
182 update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val)
185 * Since we are doing stochastic sampling for these counters,
186 * average the delta with the previous value for better accuracy.
188 sample->cur += div_u64(mul_u32_u32(sample->prev + val, unit), 2);
192 static void engines_sample(struct drm_i915_private *dev_priv)
194 struct intel_engine_cs *engine;
195 enum intel_engine_id id;
198 if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
201 if (!dev_priv->gt.awake)
204 if (!intel_runtime_pm_get_if_in_use(dev_priv))
207 for_each_engine(engine, dev_priv, id) {
208 u32 current_seqno = intel_engine_get_seqno(engine);
209 u32 last_seqno = intel_engine_last_submit(engine);
212 val = !i915_seqno_passed(current_seqno, last_seqno);
214 update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
217 if (val && (engine->pmu.enable &
218 (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
219 fw = grab_forcewake(dev_priv, fw);
221 val = I915_READ_FW(RING_CTL(engine->mmio_base));
226 update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
227 PERIOD, !!(val & RING_WAIT));
229 update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
230 PERIOD, !!(val & RING_WAIT_SEMAPHORE));
234 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
236 intel_runtime_pm_put(dev_priv);
239 static void frequency_sample(struct drm_i915_private *dev_priv)
241 if (dev_priv->pmu.enable &
242 config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
245 val = dev_priv->gt_pm.rps.cur_freq;
246 if (dev_priv->gt.awake &&
247 intel_runtime_pm_get_if_in_use(dev_priv)) {
248 val = intel_get_cagf(dev_priv,
249 I915_READ_NOTRACE(GEN6_RPSTAT1));
250 intel_runtime_pm_put(dev_priv);
253 update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
254 1, intel_gpu_freq(dev_priv, val));
257 if (dev_priv->pmu.enable &
258 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
259 update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1,
260 intel_gpu_freq(dev_priv,
261 dev_priv->gt_pm.rps.cur_freq));
265 static void pmu_init_previous_samples(struct drm_i915_private *i915)
267 struct intel_engine_cs *engine;
268 enum intel_engine_id id;
271 for_each_engine(engine, i915, id) {
272 for (i = 0; i < ARRAY_SIZE(engine->pmu.sample); i++)
273 engine->pmu.sample[i].prev = 0;
276 for (i = 0; i < ARRAY_SIZE(i915->pmu.sample); i++)
277 i915->pmu.sample[i].prev = i915->gt_pm.rps.idle_freq;
280 static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
282 struct drm_i915_private *i915 =
283 container_of(hrtimer, struct drm_i915_private, pmu.timer);
285 if (!READ_ONCE(i915->pmu.timer_enabled)) {
286 pmu_init_previous_samples(i915);
288 return HRTIMER_NORESTART;
291 engines_sample(i915);
292 frequency_sample(i915);
294 hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
295 return HRTIMER_RESTART;
298 static u64 count_interrupts(struct drm_i915_private *i915)
300 /* open-coded kstat_irqs() */
301 struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
305 if (!desc || !desc->kstat_irqs)
308 for_each_possible_cpu(cpu)
309 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
314 static void i915_pmu_event_destroy(struct perf_event *event)
316 WARN_ON(event->parent);
319 static int engine_event_init(struct perf_event *event)
321 struct drm_i915_private *i915 =
322 container_of(event->pmu, typeof(*i915), pmu.base);
324 if (!intel_engine_lookup_user(i915, engine_event_class(event),
325 engine_event_instance(event)))
328 switch (engine_event_sample(event)) {
329 case I915_SAMPLE_BUSY:
330 case I915_SAMPLE_WAIT:
332 case I915_SAMPLE_SEMA:
333 if (INTEL_GEN(i915) < 6)
343 static int i915_pmu_event_init(struct perf_event *event)
345 struct drm_i915_private *i915 =
346 container_of(event->pmu, typeof(*i915), pmu.base);
349 if (event->attr.type != event->pmu->type)
352 /* unsupported modes and filters */
353 if (event->attr.sample_period) /* no sampling */
356 if (has_branch_stack(event))
362 cpu = cpumask_any_and(&i915_pmu_cpumask,
363 topology_sibling_cpumask(event->cpu));
364 if (cpu >= nr_cpu_ids)
367 if (is_engine_event(event)) {
368 ret = engine_event_init(event);
371 switch (event->attr.config) {
372 case I915_PMU_ACTUAL_FREQUENCY:
373 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
374 /* Requires a mutex for sampling! */
376 case I915_PMU_REQUESTED_FREQUENCY:
377 if (INTEL_GEN(i915) < 6)
380 case I915_PMU_INTERRUPTS:
382 case I915_PMU_RC6_RESIDENCY:
386 case I915_PMU_RC6p_RESIDENCY:
387 case I915_PMU_RC6pp_RESIDENCY:
401 event->destroy = i915_pmu_event_destroy;
406 static u64 __i915_pmu_event_read(struct perf_event *event)
408 struct drm_i915_private *i915 =
409 container_of(event->pmu, typeof(*i915), pmu.base);
412 if (is_engine_event(event)) {
413 u8 sample = engine_event_sample(event);
414 struct intel_engine_cs *engine;
416 engine = intel_engine_lookup_user(i915,
417 engine_event_class(event),
418 engine_event_instance(event));
420 if (WARN_ON_ONCE(!engine)) {
422 } else if (sample == I915_SAMPLE_BUSY &&
423 engine->pmu.busy_stats) {
424 val = ktime_to_ns(intel_engine_get_busy_time(engine));
426 val = engine->pmu.sample[sample].cur;
429 switch (event->attr.config) {
430 case I915_PMU_ACTUAL_FREQUENCY:
432 div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
435 case I915_PMU_REQUESTED_FREQUENCY:
437 div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
440 case I915_PMU_INTERRUPTS:
441 val = count_interrupts(i915);
443 case I915_PMU_RC6_RESIDENCY:
444 intel_runtime_pm_get(i915);
445 val = intel_rc6_residency_ns(i915,
446 IS_VALLEYVIEW(i915) ?
449 intel_runtime_pm_put(i915);
451 case I915_PMU_RC6p_RESIDENCY:
452 intel_runtime_pm_get(i915);
453 val = intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
454 intel_runtime_pm_put(i915);
456 case I915_PMU_RC6pp_RESIDENCY:
457 intel_runtime_pm_get(i915);
458 val = intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
459 intel_runtime_pm_put(i915);
467 static void i915_pmu_event_read(struct perf_event *event)
469 struct hw_perf_event *hwc = &event->hw;
473 prev = local64_read(&hwc->prev_count);
474 new = __i915_pmu_event_read(event);
476 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
479 local64_add(new - prev, &event->count);
482 static bool engine_needs_busy_stats(struct intel_engine_cs *engine)
484 return supports_busy_stats(engine->i915) &&
485 (engine->pmu.enable & BIT(I915_SAMPLE_BUSY));
488 static void i915_pmu_enable(struct perf_event *event)
490 struct drm_i915_private *i915 =
491 container_of(event->pmu, typeof(*i915), pmu.base);
492 unsigned int bit = event_enabled_bit(event);
495 spin_lock_irqsave(&i915->pmu.lock, flags);
498 * Update the bitmask of enabled events and increment
499 * the event reference counter.
501 GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
502 GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
503 i915->pmu.enable |= BIT_ULL(bit);
504 i915->pmu.enable_count[bit]++;
507 * Start the sampling timer if needed and not already enabled.
509 __i915_pmu_maybe_start_timer(i915);
512 * For per-engine events the bitmask and reference counting
513 * is stored per engine.
515 if (is_engine_event(event)) {
516 u8 sample = engine_event_sample(event);
517 struct intel_engine_cs *engine;
519 engine = intel_engine_lookup_user(i915,
520 engine_event_class(event),
521 engine_event_instance(event));
523 engine->pmu.enable |= BIT(sample);
525 GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
526 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
527 if (engine->pmu.enable_count[sample]++ == 0) {
529 * Enable engine busy stats tracking if needed or
530 * alternatively cancel the scheduled disable.
532 * If the delayed disable was pending, cancel it and
533 * in this case do not enable since it already is.
535 if (engine_needs_busy_stats(engine) &&
536 !engine->pmu.busy_stats) {
537 engine->pmu.busy_stats = true;
538 if (!cancel_delayed_work(&engine->pmu.disable_busy_stats))
539 intel_enable_engine_stats(engine);
545 * Store the current counter value so we can report the correct delta
546 * for all listeners. Even when the event was already enabled and has
547 * an existing non-zero value.
549 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
551 spin_unlock_irqrestore(&i915->pmu.lock, flags);
554 static void __disable_busy_stats(struct work_struct *work)
556 struct intel_engine_cs *engine =
557 container_of(work, typeof(*engine), pmu.disable_busy_stats.work);
559 intel_disable_engine_stats(engine);
562 static void i915_pmu_disable(struct perf_event *event)
564 struct drm_i915_private *i915 =
565 container_of(event->pmu, typeof(*i915), pmu.base);
566 unsigned int bit = event_enabled_bit(event);
569 spin_lock_irqsave(&i915->pmu.lock, flags);
571 if (is_engine_event(event)) {
572 u8 sample = engine_event_sample(event);
573 struct intel_engine_cs *engine;
575 engine = intel_engine_lookup_user(i915,
576 engine_event_class(event),
577 engine_event_instance(event));
579 GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
580 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
582 * Decrement the reference count and clear the enabled
583 * bitmask when the last listener on an event goes away.
585 if (--engine->pmu.enable_count[sample] == 0) {
586 engine->pmu.enable &= ~BIT(sample);
587 if (!engine_needs_busy_stats(engine) &&
588 engine->pmu.busy_stats) {
589 engine->pmu.busy_stats = false;
591 * We request a delayed disable to handle the
592 * rapid on/off cycles on events, which can
593 * happen when tools like perf stat start, in a
596 * In addition, this also helps with busy stats
597 * accuracy with background CPU offline/online
600 queue_delayed_work(system_wq,
601 &engine->pmu.disable_busy_stats,
602 round_jiffies_up_relative(HZ));
607 GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
608 GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
610 * Decrement the reference count and clear the enabled
611 * bitmask when the last listener on an event goes away.
613 if (--i915->pmu.enable_count[bit] == 0) {
614 i915->pmu.enable &= ~BIT_ULL(bit);
615 i915->pmu.timer_enabled &= pmu_needs_timer(i915, true);
618 spin_unlock_irqrestore(&i915->pmu.lock, flags);
621 static void i915_pmu_event_start(struct perf_event *event, int flags)
623 i915_pmu_enable(event);
627 static void i915_pmu_event_stop(struct perf_event *event, int flags)
629 if (flags & PERF_EF_UPDATE)
630 i915_pmu_event_read(event);
631 i915_pmu_disable(event);
632 event->hw.state = PERF_HES_STOPPED;
635 static int i915_pmu_event_add(struct perf_event *event, int flags)
637 if (flags & PERF_EF_START)
638 i915_pmu_event_start(event, flags);
643 static void i915_pmu_event_del(struct perf_event *event, int flags)
645 i915_pmu_event_stop(event, PERF_EF_UPDATE);
648 static int i915_pmu_event_event_idx(struct perf_event *event)
653 static ssize_t i915_pmu_format_show(struct device *dev,
654 struct device_attribute *attr, char *buf)
656 struct dev_ext_attribute *eattr;
658 eattr = container_of(attr, struct dev_ext_attribute, attr);
659 return sprintf(buf, "%s\n", (char *)eattr->var);
662 #define I915_PMU_FORMAT_ATTR(_name, _config) \
663 (&((struct dev_ext_attribute[]) { \
664 { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
665 .var = (void *)_config, } \
668 static struct attribute *i915_pmu_format_attrs[] = {
669 I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
673 static const struct attribute_group i915_pmu_format_attr_group = {
675 .attrs = i915_pmu_format_attrs,
678 static ssize_t i915_pmu_event_show(struct device *dev,
679 struct device_attribute *attr, char *buf)
681 struct dev_ext_attribute *eattr;
683 eattr = container_of(attr, struct dev_ext_attribute, attr);
684 return sprintf(buf, "config=0x%lx\n", (unsigned long)eattr->var);
687 #define I915_EVENT_ATTR(_name, _config) \
688 (&((struct dev_ext_attribute[]) { \
689 { .attr = __ATTR(_name, 0444, i915_pmu_event_show, NULL), \
690 .var = (void *)_config, } \
693 #define I915_EVENT_STR(_name, _str) \
694 (&((struct perf_pmu_events_attr[]) { \
695 { .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
697 .event_str = _str, } \
700 #define I915_EVENT(_name, _config, _unit) \
701 I915_EVENT_ATTR(_name, _config), \
702 I915_EVENT_STR(_name.unit, _unit)
704 #define I915_ENGINE_EVENT(_name, _class, _instance, _sample) \
705 I915_EVENT_ATTR(_name, __I915_PMU_ENGINE(_class, _instance, _sample)), \
706 I915_EVENT_STR(_name.unit, "ns")
708 #define I915_ENGINE_EVENTS(_name, _class, _instance) \
709 I915_ENGINE_EVENT(_name##_instance-busy, _class, _instance, I915_SAMPLE_BUSY), \
710 I915_ENGINE_EVENT(_name##_instance-sema, _class, _instance, I915_SAMPLE_SEMA), \
711 I915_ENGINE_EVENT(_name##_instance-wait, _class, _instance, I915_SAMPLE_WAIT)
713 static struct attribute *i915_pmu_events_attrs[] = {
714 I915_ENGINE_EVENTS(rcs, I915_ENGINE_CLASS_RENDER, 0),
715 I915_ENGINE_EVENTS(bcs, I915_ENGINE_CLASS_COPY, 0),
716 I915_ENGINE_EVENTS(vcs, I915_ENGINE_CLASS_VIDEO, 0),
717 I915_ENGINE_EVENTS(vcs, I915_ENGINE_CLASS_VIDEO, 1),
718 I915_ENGINE_EVENTS(vecs, I915_ENGINE_CLASS_VIDEO_ENHANCE, 0),
720 I915_EVENT(actual-frequency, I915_PMU_ACTUAL_FREQUENCY, "MHz"),
721 I915_EVENT(requested-frequency, I915_PMU_REQUESTED_FREQUENCY, "MHz"),
723 I915_EVENT_ATTR(interrupts, I915_PMU_INTERRUPTS),
725 I915_EVENT(rc6-residency, I915_PMU_RC6_RESIDENCY, "ns"),
726 I915_EVENT(rc6p-residency, I915_PMU_RC6p_RESIDENCY, "ns"),
727 I915_EVENT(rc6pp-residency, I915_PMU_RC6pp_RESIDENCY, "ns"),
732 static const struct attribute_group i915_pmu_events_attr_group = {
734 .attrs = i915_pmu_events_attrs,
738 i915_pmu_get_attr_cpumask(struct device *dev,
739 struct device_attribute *attr,
742 return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
745 static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL);
747 static struct attribute *i915_cpumask_attrs[] = {
748 &dev_attr_cpumask.attr,
752 static struct attribute_group i915_pmu_cpumask_attr_group = {
753 .attrs = i915_cpumask_attrs,
756 static const struct attribute_group *i915_pmu_attr_groups[] = {
757 &i915_pmu_format_attr_group,
758 &i915_pmu_events_attr_group,
759 &i915_pmu_cpumask_attr_group,
763 #ifdef CONFIG_HOTPLUG_CPU
764 static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
766 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
769 GEM_BUG_ON(!pmu->base.event_init);
771 target = cpumask_any_and(&i915_pmu_cpumask, &i915_pmu_cpumask);
772 /* Select the first online CPU as a designated reader. */
773 if (target >= nr_cpu_ids)
774 cpumask_set_cpu(cpu, &i915_pmu_cpumask);
779 static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
781 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
784 GEM_BUG_ON(!pmu->base.event_init);
786 if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
787 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
788 /* Migrate events if there is a valid target */
789 if (target < nr_cpu_ids) {
790 cpumask_set_cpu(target, &i915_pmu_cpumask);
791 perf_pmu_migrate_context(&pmu->base, cpu, target);
798 static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
801 static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
803 #ifdef CONFIG_HOTPLUG_CPU
804 enum cpuhp_state slot;
807 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
808 "perf/x86/intel/i915:online",
810 i915_pmu_cpu_offline);
815 ret = cpuhp_state_add_instance(slot, &i915->pmu.node);
817 cpuhp_remove_multi_state(slot);
826 static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
828 #ifdef CONFIG_HOTPLUG_CPU
829 WARN_ON(cpuhp_slot == CPUHP_INVALID);
830 WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node));
831 cpuhp_remove_multi_state(cpuhp_slot);
835 void i915_pmu_register(struct drm_i915_private *i915)
837 struct intel_engine_cs *engine;
838 enum intel_engine_id id;
841 if (INTEL_GEN(i915) <= 2) {
842 DRM_INFO("PMU not supported for this GPU.");
846 i915->pmu.base.attr_groups = i915_pmu_attr_groups;
847 i915->pmu.base.task_ctx_nr = perf_invalid_context;
848 i915->pmu.base.event_init = i915_pmu_event_init;
849 i915->pmu.base.add = i915_pmu_event_add;
850 i915->pmu.base.del = i915_pmu_event_del;
851 i915->pmu.base.start = i915_pmu_event_start;
852 i915->pmu.base.stop = i915_pmu_event_stop;
853 i915->pmu.base.read = i915_pmu_event_read;
854 i915->pmu.base.event_idx = i915_pmu_event_event_idx;
856 spin_lock_init(&i915->pmu.lock);
857 hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
858 i915->pmu.timer.function = i915_sample;
860 pmu_init_previous_samples(i915);
862 for_each_engine(engine, i915, id)
863 INIT_DELAYED_WORK(&engine->pmu.disable_busy_stats,
864 __disable_busy_stats);
866 ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
870 ret = i915_pmu_register_cpuhp_state(i915);
877 perf_pmu_unregister(&i915->pmu.base);
879 i915->pmu.base.event_init = NULL;
880 DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
883 void i915_pmu_unregister(struct drm_i915_private *i915)
885 struct intel_engine_cs *engine;
886 enum intel_engine_id id;
888 if (!i915->pmu.base.event_init)
891 WARN_ON(i915->pmu.enable);
893 hrtimer_cancel(&i915->pmu.timer);
895 for_each_engine(engine, i915, id) {
896 GEM_BUG_ON(engine->pmu.busy_stats);
897 flush_delayed_work(&engine->pmu.disable_busy_stats);
900 i915_pmu_unregister_cpuhp_state(i915);
902 perf_pmu_unregister(&i915->pmu.base);
903 i915->pmu.base.event_init = NULL;