1 /* SPDX-License-Identifier: MIT */
3 * Copyright © 2019 Intel Corporation
6 #ifndef __INTEL_RUNTIME_PM_H__
7 #define __INTEL_RUNTIME_PM_H__
9 #include <linux/pm_runtime.h>
10 #include <linux/types.h>
12 #include "intel_wakeref.h"
15 struct drm_i915_private;
19 * This struct helps tracking the state needed for runtime PM, which puts the
20 * device in PCI D3 state. Notice that when this happens, nothing on the
21 * graphics device works, even register access, so we don't get interrupts nor
24 * Every piece of our code that needs to actually touch the hardware needs to
25 * either call intel_runtime_pm_get or call intel_display_power_get with the
26 * appropriate power domain.
28 * Our driver uses the autosuspend delay feature, which means we'll only really
29 * suspend if we stay with zero refcount for a certain amount of time. The
30 * default value is currently very conservative (see intel_runtime_pm_enable), but
31 * it can be changed with the standard runtime PM files from sysfs.
33 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
34 * goes back to false exactly before we reenable the IRQs. We use this variable
35 * to check if someone is trying to enable/disable IRQs while they're supposed
36 * to be disabled. This shouldn't happen and we'll print some error messages in
39 * For more, read the Documentation/power/runtime_pm.rst.
41 struct intel_runtime_pm {
42 atomic_t wakeref_count;
43 struct device *kdev; /* points to i915->drm.dev */
46 bool no_wakeref_tracking;
49 * Protects access to lmem usefault list.
50 * It is required, if we are outside of the runtime suspend path,
51 * access to @lmem_userfault_list requires always first grabbing the
52 * runtime pm, to ensure we can't race against runtime suspend.
53 * Once we have that we also need to grab @lmem_userfault_lock,
54 * at which point we have exclusive access.
55 * The runtime suspend path is special since it doesn't really hold any locks,
56 * but instead has exclusive access by virtue of all other accesses requiring
57 * holding the runtime pm wakeref.
59 spinlock_t lmem_userfault_lock;
62 * Keep list of userfaulted gem obj, which require to release their
63 * mmap mappings at runtime suspend path.
65 struct list_head lmem_userfault_list;
67 /* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */
68 struct intel_wakeref_auto userfault_wakeref;
70 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
72 * To aide detection of wakeref leaks and general misuse, we
73 * track all wakeref holders. With manual markup (i.e. returning
74 * a cookie to each rpm_get caller which they then supply to their
75 * paired rpm_put) we can remove corresponding pairs of and keep
76 * the array trimmed to active wakerefs.
78 struct ref_tracker_dir debug;
82 #define BITS_PER_WAKEREF \
83 BITS_PER_TYPE(typeof_member(struct intel_runtime_pm, wakeref_count))
84 #define INTEL_RPM_WAKELOCK_SHIFT (BITS_PER_WAKEREF / 2)
85 #define INTEL_RPM_WAKELOCK_BIAS (1 << INTEL_RPM_WAKELOCK_SHIFT)
86 #define INTEL_RPM_RAW_WAKEREF_MASK (INTEL_RPM_WAKELOCK_BIAS - 1)
89 intel_rpm_raw_wakeref_count(int wakeref_count)
91 return wakeref_count & INTEL_RPM_RAW_WAKEREF_MASK;
95 intel_rpm_wakelock_count(int wakeref_count)
97 return wakeref_count >> INTEL_RPM_WAKELOCK_SHIFT;
101 assert_rpm_device_not_suspended(struct intel_runtime_pm *rpm)
103 WARN_ONCE(pm_runtime_suspended(rpm->kdev),
104 "Device suspended during HW access\n");
108 __assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm, int wakeref_count)
110 assert_rpm_device_not_suspended(rpm);
111 WARN_ONCE(!intel_rpm_raw_wakeref_count(wakeref_count),
112 "RPM raw-wakeref not held\n");
116 __assert_rpm_wakelock_held(struct intel_runtime_pm *rpm, int wakeref_count)
118 __assert_rpm_raw_wakeref_held(rpm, wakeref_count);
119 WARN_ONCE(!intel_rpm_wakelock_count(wakeref_count),
120 "RPM wakelock ref not held during HW access\n");
124 assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm)
126 __assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count));
130 assert_rpm_wakelock_held(struct intel_runtime_pm *rpm)
132 __assert_rpm_wakelock_held(rpm, atomic_read(&rpm->wakeref_count));
136 * disable_rpm_wakeref_asserts - disable the RPM assert checks
137 * @rpm: the intel_runtime_pm structure
139 * This function disable asserts that check if we hold an RPM wakelock
140 * reference, while keeping the device-not-suspended checks still enabled.
141 * It's meant to be used only in special circumstances where our rule about
142 * the wakelock refcount wrt. the device power state doesn't hold. According
143 * to this rule at any point where we access the HW or want to keep the HW in
144 * an active state we must hold an RPM wakelock reference acquired via one of
145 * the intel_runtime_pm_get() helpers. Currently there are a few special spots
146 * where this rule doesn't hold: the IRQ and suspend/resume handlers, the
147 * forcewake release timer, and the GPU RPS and hangcheck works. All other
148 * users should avoid using this function.
150 * Any calls to this function must have a symmetric call to
151 * enable_rpm_wakeref_asserts().
154 disable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
156 atomic_add(INTEL_RPM_WAKELOCK_BIAS + 1,
157 &rpm->wakeref_count);
161 * enable_rpm_wakeref_asserts - re-enable the RPM assert checks
162 * @rpm: the intel_runtime_pm structure
164 * This function re-enables the RPM assert checks after disabling them with
165 * disable_rpm_wakeref_asserts. It's meant to be used only in special
166 * circumstances otherwise its use should be avoided.
168 * Any calls to this function must have a symmetric call to
169 * disable_rpm_wakeref_asserts().
172 enable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
174 atomic_sub(INTEL_RPM_WAKELOCK_BIAS + 1,
175 &rpm->wakeref_count);
178 void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm);
179 void intel_runtime_pm_enable(struct intel_runtime_pm *rpm);
180 void intel_runtime_pm_disable(struct intel_runtime_pm *rpm);
181 void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
182 void intel_runtime_pm_driver_last_release(struct intel_runtime_pm *rpm);
184 intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
185 intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
186 intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm);
187 intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm);
188 intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
190 #define with_intel_runtime_pm(rpm, wf) \
191 for ((wf) = intel_runtime_pm_get(rpm); (wf); \
192 intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
194 #define with_intel_runtime_pm_if_in_use(rpm, wf) \
195 for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \
196 intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
198 #define with_intel_runtime_pm_if_active(rpm, wf) \
199 for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \
200 intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
202 void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
203 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
204 void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
207 intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
209 intel_runtime_pm_put_unchecked(rpm);
212 void intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
214 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
215 void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
216 struct drm_printer *p);
218 static inline void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
219 struct drm_printer *p)
224 #endif /* __INTEL_RUNTIME_PM_H__ */