Commit | Line | Data |
---|---|---|
d91e6578 CW |
1 | /* |
2 | * SPDX-License-Identifier: MIT | |
3 | * | |
4 | * Copyright © 2019 Intel Corporation | |
5 | */ | |
6 | ||
c7302f20 CW |
7 | #include <linux/wait_bit.h> |
8 | ||
58a111f0 | 9 | #include "intel_runtime_pm.h" |
fb993aa7 | 10 | #include "intel_wakeref.h" |
8d208a5e | 11 | #include "i915_drv.h" |
d91e6578 | 12 | |
c7302f20 | 13 | static void rpm_get(struct intel_wakeref *wf) |
7ee280a7 | 14 | { |
8d208a5e | 15 | wf->wakeref = intel_runtime_pm_get(&wf->i915->runtime_pm); |
7ee280a7 CW |
16 | } |
17 | ||
c7302f20 | 18 | static void rpm_put(struct intel_wakeref *wf) |
7ee280a7 CW |
19 | { |
20 | intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref); | |
21 | ||
8d208a5e | 22 | intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref); |
fb993aa7 | 23 | INTEL_WAKEREF_BUG_ON(!wakeref); |
7ee280a7 CW |
24 | } |
25 | ||
c7302f20 | 26 | int __intel_wakeref_get_first(struct intel_wakeref *wf) |
d91e6578 CW |
27 | { |
28 | /* | |
29 | * Treat get/put as different subclasses, as we may need to run | |
30 | * the put callback from under the shrinker and do not want to | |
31 | * cross-contanimate that callback with any extra work performed | |
32 | * upon acquiring the wakeref. | |
33 | */ | |
34 | mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING); | |
35 | if (!atomic_read(&wf->count)) { | |
36 | int err; | |
37 | ||
c7302f20 | 38 | rpm_get(wf); |
d91e6578 | 39 | |
c7302f20 | 40 | err = wf->ops->get(wf); |
d91e6578 | 41 | if (unlikely(err)) { |
c7302f20 | 42 | rpm_put(wf); |
d91e6578 CW |
43 | mutex_unlock(&wf->mutex); |
44 | return err; | |
45 | } | |
46 | ||
47 | smp_mb__before_atomic(); /* release wf->count */ | |
48 | } | |
49 | atomic_inc(&wf->count); | |
50 | mutex_unlock(&wf->mutex); | |
51 | ||
fb993aa7 | 52 | INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); |
d91e6578 CW |
53 | return 0; |
54 | } | |
55 | ||
c7302f20 | 56 | static void ____intel_wakeref_put_last(struct intel_wakeref *wf) |
d91e6578 | 57 | { |
07779a76 CW |
58 | INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); |
59 | if (unlikely(!atomic_dec_and_test(&wf->count))) | |
c7302f20 CW |
60 | goto unlock; |
61 | ||
a79ca656 | 62 | /* ops->put() must reschedule its own release on error/deferral */ |
c7302f20 CW |
63 | if (likely(!wf->ops->put(wf))) { |
64 | rpm_put(wf); | |
65 | wake_up_var(&wf->wakeref); | |
c7302f20 | 66 | } |
d91e6578 | 67 | |
c7302f20 | 68 | unlock: |
d91e6578 | 69 | mutex_unlock(&wf->mutex); |
c7302f20 CW |
70 | } |
71 | ||
07779a76 | 72 | void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags) |
c7302f20 | 73 | { |
e9037e7f | 74 | INTEL_WAKEREF_BUG_ON(delayed_work_pending(&wf->work)); |
d91e6578 | 75 | |
c7302f20 | 76 | /* Assume we are not in process context and so cannot sleep. */ |
07779a76 | 77 | if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) { |
848a4e5c | 78 | mod_delayed_work(wf->i915->unordered_wq, &wf->work, |
e9037e7f | 79 | FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags)); |
c7302f20 CW |
80 | return; |
81 | } | |
82 | ||
83 | ____intel_wakeref_put_last(wf); | |
d91e6578 CW |
84 | } |
85 | ||
c7302f20 | 86 | static void __intel_wakeref_put_work(struct work_struct *wrk) |
d91e6578 | 87 | { |
e9037e7f | 88 | struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work.work); |
c7302f20 CW |
89 | |
90 | if (atomic_add_unless(&wf->count, -1, 1)) | |
91 | return; | |
92 | ||
93 | mutex_lock(&wf->mutex); | |
94 | ____intel_wakeref_put_last(wf); | |
95 | } | |
96 | ||
97 | void __intel_wakeref_init(struct intel_wakeref *wf, | |
8d208a5e | 98 | struct drm_i915_private *i915, |
c7302f20 | 99 | const struct intel_wakeref_ops *ops, |
cdd280b1 | 100 | struct intel_wakeref_lockclass *key) |
c7302f20 | 101 | { |
8d208a5e | 102 | wf->i915 = i915; |
c7302f20 CW |
103 | wf->ops = ops; |
104 | ||
cdd280b1 | 105 | __mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex); |
d91e6578 | 106 | atomic_set(&wf->count, 0); |
7ee280a7 | 107 | wf->wakeref = 0; |
c7302f20 | 108 | |
e9037e7f CW |
109 | INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work); |
110 | lockdep_init_map(&wf->work.work.lockdep_map, | |
111 | "wakeref.work", &key->work, 0); | |
c7302f20 CW |
112 | } |
113 | ||
114 | int intel_wakeref_wait_for_idle(struct intel_wakeref *wf) | |
115 | { | |
f4ba0707 CW |
116 | int err; |
117 | ||
118 | might_sleep(); | |
119 | ||
120 | err = wait_var_event_killable(&wf->wakeref, | |
121 | !intel_wakeref_is_active(wf)); | |
122 | if (err) | |
123 | return err; | |
124 | ||
125 | intel_wakeref_unlock_wait(wf); | |
126 | return 0; | |
d91e6578 | 127 | } |
b27e35ae CW |
128 | |
129 | static void wakeref_auto_timeout(struct timer_list *t) | |
130 | { | |
131 | struct intel_wakeref_auto *wf = from_timer(wf, t, timer); | |
132 | intel_wakeref_t wakeref; | |
133 | unsigned long flags; | |
134 | ||
135 | if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags)) | |
136 | return; | |
137 | ||
138 | wakeref = fetch_and_zero(&wf->wakeref); | |
139 | spin_unlock_irqrestore(&wf->lock, flags); | |
140 | ||
8d208a5e | 141 | intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref); |
b27e35ae CW |
142 | } |
143 | ||
144 | void intel_wakeref_auto_init(struct intel_wakeref_auto *wf, | |
8d208a5e | 145 | struct drm_i915_private *i915) |
b27e35ae CW |
146 | { |
147 | spin_lock_init(&wf->lock); | |
148 | timer_setup(&wf->timer, wakeref_auto_timeout, 0); | |
149 | refcount_set(&wf->count, 0); | |
150 | wf->wakeref = 0; | |
8d208a5e | 151 | wf->i915 = i915; |
b27e35ae CW |
152 | } |
153 | ||
154 | void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout) | |
155 | { | |
156 | unsigned long flags; | |
157 | ||
158 | if (!timeout) { | |
159 | if (del_timer_sync(&wf->timer)) | |
160 | wakeref_auto_timeout(&wf->timer); | |
161 | return; | |
162 | } | |
163 | ||
164 | /* Our mission is that we only extend an already active wakeref */ | |
8d208a5e | 165 | assert_rpm_wakelock_held(&wf->i915->runtime_pm); |
b27e35ae CW |
166 | |
167 | if (!refcount_inc_not_zero(&wf->count)) { | |
168 | spin_lock_irqsave(&wf->lock, flags); | |
0c1f8457 | 169 | if (!refcount_inc_not_zero(&wf->count)) { |
fb993aa7 | 170 | INTEL_WAKEREF_BUG_ON(wf->wakeref); |
8d208a5e LC |
171 | wf->wakeref = |
172 | intel_runtime_pm_get_if_in_use(&wf->i915->runtime_pm); | |
0c1f8457 | 173 | refcount_set(&wf->count, 1); |
b27e35ae | 174 | } |
b27e35ae CW |
175 | spin_unlock_irqrestore(&wf->lock, flags); |
176 | } | |
177 | ||
178 | /* | |
179 | * If we extend a pending timer, we will only get a single timer | |
180 | * callback and so need to cancel the local inc by running the | |
181 | * elided callback to keep the wf->count balanced. | |
182 | */ | |
183 | if (mod_timer(&wf->timer, jiffies + timeout)) | |
184 | wakeref_auto_timeout(&wf->timer); | |
185 | } | |
186 | ||
187 | void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf) | |
188 | { | |
189 | intel_wakeref_auto(wf, 0); | |
fb993aa7 | 190 | INTEL_WAKEREF_BUG_ON(wf->wakeref); |
b27e35ae | 191 | } |