2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
8 #include "intel_wakeref.h"
10 static void rpm_get(struct drm_i915_private *i915, struct intel_wakeref *wf)
12 wf->wakeref = intel_runtime_pm_get(i915);
15 static void rpm_put(struct drm_i915_private *i915, struct intel_wakeref *wf)
17 intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
19 intel_runtime_pm_put(i915, wakeref);
23 int __intel_wakeref_get_first(struct drm_i915_private *i915,
24 struct intel_wakeref *wf,
25 int (*fn)(struct intel_wakeref *wf))
28 * Treat get/put as different subclasses, as we may need to run
29 * the put callback from under the shrinker and do not want to
30 * cross-contanimate that callback with any extra work performed
31 * upon acquiring the wakeref.
33 mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING);
34 if (!atomic_read(&wf->count)) {
42 mutex_unlock(&wf->mutex);
46 smp_mb__before_atomic(); /* release wf->count */
48 atomic_inc(&wf->count);
49 mutex_unlock(&wf->mutex);
54 int __intel_wakeref_put_last(struct drm_i915_private *i915,
55 struct intel_wakeref *wf,
56 int (*fn)(struct intel_wakeref *wf))
64 atomic_inc(&wf->count);
65 mutex_unlock(&wf->mutex);
70 void __intel_wakeref_init(struct intel_wakeref *wf, struct lock_class_key *key)
72 __mutex_init(&wf->mutex, "wakeref", key);
73 atomic_set(&wf->count, 0);
77 static void wakeref_auto_timeout(struct timer_list *t)
79 struct intel_wakeref_auto *wf = from_timer(wf, t, timer);
80 intel_wakeref_t wakeref;
83 if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags))
86 wakeref = fetch_and_zero(&wf->wakeref);
87 spin_unlock_irqrestore(&wf->lock, flags);
89 intel_runtime_pm_put(wf->i915, wakeref);
92 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
93 struct drm_i915_private *i915)
95 spin_lock_init(&wf->lock);
96 timer_setup(&wf->timer, wakeref_auto_timeout, 0);
97 refcount_set(&wf->count, 0);
102 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
107 if (del_timer_sync(&wf->timer))
108 wakeref_auto_timeout(&wf->timer);
112 /* Our mission is that we only extend an already active wakeref */
113 assert_rpm_wakelock_held(&wf->i915->runtime_pm);
115 if (!refcount_inc_not_zero(&wf->count)) {
116 spin_lock_irqsave(&wf->lock, flags);
117 if (!refcount_inc_not_zero(&wf->count)) {
118 GEM_BUG_ON(wf->wakeref);
119 wf->wakeref = intel_runtime_pm_get_if_in_use(wf->i915);
120 refcount_set(&wf->count, 1);
122 spin_unlock_irqrestore(&wf->lock, flags);
126 * If we extend a pending timer, we will only get a single timer
127 * callback and so need to cancel the local inc by running the
128 * elided callback to keep the wf->count balanced.
130 if (mod_timer(&wf->timer, jiffies + timeout))
131 wakeref_auto_timeout(&wf->timer);
134 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
136 intel_wakeref_auto(wf, 0);
137 GEM_BUG_ON(wf->wakeref);