2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #ifndef INTEL_WAKEREF_H
8 #define INTEL_WAKEREF_H
10 #include <linux/atomic.h>
11 #include <linux/bitfield.h>
12 #include <linux/bits.h>
13 #include <linux/lockdep.h>
14 #include <linux/mutex.h>
15 #include <linux/refcount.h>
16 #include <linux/stackdepot.h>
17 #include <linux/timer.h>
18 #include <linux/workqueue.h>
20 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
21 #define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
23 #define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
26 struct intel_runtime_pm;
29 typedef depot_stack_handle_t intel_wakeref_t;
31 struct intel_wakeref_ops {
32 int (*get)(struct intel_wakeref *wf);
33 int (*put)(struct intel_wakeref *wf);
36 struct intel_wakeref {
40 intel_wakeref_t wakeref;
42 struct intel_runtime_pm *rpm;
43 const struct intel_wakeref_ops *ops;
45 struct delayed_work work;
48 struct intel_wakeref_lockclass {
49 struct lock_class_key mutex;
50 struct lock_class_key work;
53 void __intel_wakeref_init(struct intel_wakeref *wf,
54 struct intel_runtime_pm *rpm,
55 const struct intel_wakeref_ops *ops,
56 struct intel_wakeref_lockclass *key);
57 #define intel_wakeref_init(wf, rpm, ops) do { \
58 static struct intel_wakeref_lockclass __key; \
60 __intel_wakeref_init((wf), (rpm), (ops), &__key); \
63 int __intel_wakeref_get_first(struct intel_wakeref *wf);
64 void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
67 * intel_wakeref_get: Acquire the wakeref
70 * Acquire a hold on the wakeref. The first user to do so, will acquire
71 * the runtime pm wakeref and then call the @fn underneath the wakeref
74 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
75 * will be released and the acquisition unwound, and an error reported.
77 * Returns: 0 if the wakeref was acquired successfully, or a negative error
81 intel_wakeref_get(struct intel_wakeref *wf)
84 if (unlikely(!atomic_inc_not_zero(&wf->count)))
85 return __intel_wakeref_get_first(wf);
91 * __intel_wakeref_get: Acquire the wakeref, again
94 * Increment the wakeref counter, only valid if it is already held by
97 * See intel_wakeref_get().
100 __intel_wakeref_get(struct intel_wakeref *wf)
102 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
103 atomic_inc(&wf->count);
107 * intel_wakeref_get_if_in_use: Acquire the wakeref
110 * Acquire a hold on the wakeref, but only if the wakeref is already
113 * Returns: true if the wakeref was acquired, false otherwise.
116 intel_wakeref_get_if_active(struct intel_wakeref *wf)
118 return atomic_inc_not_zero(&wf->count);
122 INTEL_WAKEREF_PUT_ASYNC_BIT = 0,
123 __INTEL_WAKEREF_PUT_LAST_BIT__
127 intel_wakeref_might_get(struct intel_wakeref *wf)
129 might_lock(&wf->mutex);
133 * intel_wakeref_put_flags: Release the wakeref
135 * @flags: control flags
137 * Release our hold on the wakeref. When there are no more users,
138 * the runtime pm wakeref will be released after the @fn callback is called
139 * underneath the wakeref mutex.
141 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
142 * is retained and an error reported.
144 * Returns: 0 if the wakeref was released successfully, or a negative error
148 __intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
149 #define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT)
150 #define INTEL_WAKEREF_PUT_DELAY \
151 GENMASK(BITS_PER_LONG - 1, __INTEL_WAKEREF_PUT_LAST_BIT__)
153 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
154 if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
155 __intel_wakeref_put_last(wf, flags);
159 intel_wakeref_put(struct intel_wakeref *wf)
162 __intel_wakeref_put(wf, 0);
166 intel_wakeref_put_async(struct intel_wakeref *wf)
168 __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
172 intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay)
174 __intel_wakeref_put(wf,
175 INTEL_WAKEREF_PUT_ASYNC |
176 FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay));
180 intel_wakeref_might_put(struct intel_wakeref *wf)
182 might_lock(&wf->mutex);
186 * intel_wakeref_lock: Lock the wakeref (mutex)
189 * Locks the wakeref to prevent it being acquired or released. New users
190 * can still adjust the counter, but the wakeref itself (and callback)
191 * cannot be acquired or released.
194 intel_wakeref_lock(struct intel_wakeref *wf)
195 __acquires(wf->mutex)
197 mutex_lock(&wf->mutex);
201 * intel_wakeref_unlock: Unlock the wakeref
204 * Releases a previously acquired intel_wakeref_lock().
207 intel_wakeref_unlock(struct intel_wakeref *wf)
208 __releases(wf->mutex)
210 mutex_unlock(&wf->mutex);
214 * intel_wakeref_unlock_wait: Wait until the active callback is complete
217 * Waits for the active callback (under the @wf->mutex or another CPU) is
221 intel_wakeref_unlock_wait(struct intel_wakeref *wf)
223 mutex_lock(&wf->mutex);
224 mutex_unlock(&wf->mutex);
225 flush_delayed_work(&wf->work);
229 * intel_wakeref_is_active: Query whether the wakeref is currently held
232 * Returns: true if the wakeref is currently held.
235 intel_wakeref_is_active(const struct intel_wakeref *wf)
237 return READ_ONCE(wf->wakeref);
241 * __intel_wakeref_defer_park: Defer the current park callback
245 __intel_wakeref_defer_park(struct intel_wakeref *wf)
247 lockdep_assert_held(&wf->mutex);
248 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
249 atomic_set_release(&wf->count, 1);
253 * intel_wakeref_wait_for_idle: Wait until the wakeref is idle
256 * Wait for the earlier asynchronous release of the wakeref. Note
257 * this will wait for any third party as well, so make sure you only wait
258 * when you have control over the wakeref and trust no one else is acquiring
261 * Return: 0 on success, error code if killed.
263 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
265 struct intel_wakeref_auto {
266 struct intel_runtime_pm *rpm;
267 struct timer_list timer;
268 intel_wakeref_t wakeref;
274 * intel_wakeref_auto: Delay the runtime-pm autosuspend
276 * @timeout: relative timeout in jiffies
278 * The runtime-pm core uses a suspend delay after the last wakeref
279 * is released before triggering runtime suspend of the device. That
280 * delay is configurable via sysfs with little regard to the device
281 * characteristics. Instead, we want to tune the autosuspend based on our
282 * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied
285 * Pass @timeout = 0 to cancel a previous autosuspend by executing the
286 * suspend immediately.
288 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout);
290 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
291 struct intel_runtime_pm *rpm);
292 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf);
294 #endif /* INTEL_WAKEREF_H */