Commit | Line | Data |
---|---|---|
907b28c5 CW |
1 | /* |
2 | * Copyright © 2013 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | */ | |
23 | ||
24 | #include "i915_drv.h" | |
25 | #include "intel_drv.h" | |
cf9d2890 | 26 | #include "i915_vgpu.h" |
907b28c5 | 27 | |
264ec1a8 | 28 | #include <asm/iosf_mbi.h> |
6daccb0b CW |
29 | #include <linux/pm_runtime.h> |
30 | ||
83e33372 | 31 | #define FORCEWAKE_ACK_TIMEOUT_MS 50 |
6b07b6d2 | 32 | #define GT_FIFO_TIMEOUT_MS 10 |
907b28c5 | 33 | |
75aa3f63 | 34 | #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__)) |
6af5d92f | 35 | |
05a2fb15 MK |
36 | static const char * const forcewake_domain_names[] = { |
37 | "render", | |
38 | "blitter", | |
39 | "media", | |
a89a70a8 DCS |
40 | "vdbox0", |
41 | "vdbox1", | |
42 | "vdbox2", | |
43 | "vdbox3", | |
44 | "vebox0", | |
45 | "vebox1", | |
05a2fb15 MK |
46 | }; |
47 | ||
48 | const char * | |
48c1026a | 49 | intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) |
05a2fb15 | 50 | { |
53abb679 | 51 | BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT); |
05a2fb15 MK |
52 | |
53 | if (id >= 0 && id < FW_DOMAIN_ID_COUNT) | |
54 | return forcewake_domain_names[id]; | |
55 | ||
56 | WARN_ON(id); | |
57 | ||
58 | return "unknown"; | |
59 | } | |
60 | ||
05a2fb15 | 61 | static inline void |
577ac4bd CW |
62 | fw_domain_reset(struct drm_i915_private *i915, |
63 | const struct intel_uncore_forcewake_domain *d) | |
907b28c5 | 64 | { |
26376a7e OM |
65 | /* |
66 | * We don't really know if the powerwell for the forcewake domain we are | |
67 | * trying to reset here does exist at this point (engines could be fused | |
68 | * off in ICL+), so no waiting for acks | |
69 | */ | |
6e3955a5 | 70 | __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset); |
907b28c5 CW |
71 | } |
72 | ||
05a2fb15 MK |
73 | static inline void |
74 | fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) | |
907b28c5 | 75 | { |
a57a4a67 TU |
76 | d->wake_count++; |
77 | hrtimer_start_range_ns(&d->timer, | |
8b0e1953 | 78 | NSEC_PER_MSEC, |
a57a4a67 TU |
79 | NSEC_PER_MSEC, |
80 | HRTIMER_MODE_REL); | |
907b28c5 CW |
81 | } |
82 | ||
71306303 MK |
83 | static inline int |
84 | __wait_for_ack(const struct drm_i915_private *i915, | |
85 | const struct intel_uncore_forcewake_domain *d, | |
86 | const u32 ack, | |
87 | const u32 value) | |
88 | { | |
89 | return wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & ack) == value, | |
90 | FORCEWAKE_ACK_TIMEOUT_MS); | |
91 | } | |
92 | ||
93 | static inline int | |
94 | wait_ack_clear(const struct drm_i915_private *i915, | |
95 | const struct intel_uncore_forcewake_domain *d, | |
96 | const u32 ack) | |
97 | { | |
98 | return __wait_for_ack(i915, d, ack, 0); | |
99 | } | |
100 | ||
101 | static inline int | |
102 | wait_ack_set(const struct drm_i915_private *i915, | |
103 | const struct intel_uncore_forcewake_domain *d, | |
104 | const u32 ack) | |
105 | { | |
106 | return __wait_for_ack(i915, d, ack, ack); | |
107 | } | |
108 | ||
05a2fb15 | 109 | static inline void |
6e3955a5 | 110 | fw_domain_wait_ack_clear(const struct drm_i915_private *i915, |
577ac4bd | 111 | const struct intel_uncore_forcewake_domain *d) |
907b28c5 | 112 | { |
71306303 | 113 | if (wait_ack_clear(i915, d, FORCEWAKE_KERNEL)) |
05a2fb15 MK |
114 | DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", |
115 | intel_uncore_forcewake_domain_to_str(d->id)); | |
116 | } | |
907b28c5 | 117 | |
71306303 MK |
118 | enum ack_type { |
119 | ACK_CLEAR = 0, | |
120 | ACK_SET | |
121 | }; | |
122 | ||
123 | static int | |
124 | fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915, | |
125 | const struct intel_uncore_forcewake_domain *d, | |
126 | const enum ack_type type) | |
127 | { | |
128 | const u32 ack_bit = FORCEWAKE_KERNEL; | |
129 | const u32 value = type == ACK_SET ? ack_bit : 0; | |
130 | unsigned int pass; | |
131 | bool ack_detected; | |
132 | ||
133 | /* | |
134 | * There is a possibility of driver's wake request colliding | |
135 | * with hardware's own wake requests and that can cause | |
136 | * hardware to not deliver the driver's ack message. | |
137 | * | |
138 | * Use a fallback bit toggle to kick the gpu state machine | |
139 | * in the hope that the original ack will be delivered along with | |
140 | * the fallback ack. | |
141 | * | |
cc38cae7 OM |
142 | * This workaround is described in HSDES #1604254524 and it's known as: |
143 | * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl | |
144 | * although the name is a bit misleading. | |
71306303 MK |
145 | */ |
146 | ||
147 | pass = 1; | |
148 | do { | |
149 | wait_ack_clear(i915, d, FORCEWAKE_KERNEL_FALLBACK); | |
150 | ||
151 | __raw_i915_write32(i915, d->reg_set, | |
152 | _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK)); | |
153 | /* Give gt some time to relax before the polling frenzy */ | |
154 | udelay(10 * pass); | |
155 | wait_ack_set(i915, d, FORCEWAKE_KERNEL_FALLBACK); | |
156 | ||
157 | ack_detected = (__raw_i915_read32(i915, d->reg_ack) & ack_bit) == value; | |
158 | ||
159 | __raw_i915_write32(i915, d->reg_set, | |
160 | _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK)); | |
161 | } while (!ack_detected && pass++ < 10); | |
162 | ||
163 | DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n", | |
164 | intel_uncore_forcewake_domain_to_str(d->id), | |
165 | type == ACK_SET ? "set" : "clear", | |
166 | __raw_i915_read32(i915, d->reg_ack), | |
167 | pass); | |
168 | ||
169 | return ack_detected ? 0 : -ETIMEDOUT; | |
170 | } | |
171 | ||
172 | static inline void | |
173 | fw_domain_wait_ack_clear_fallback(const struct drm_i915_private *i915, | |
174 | const struct intel_uncore_forcewake_domain *d) | |
175 | { | |
176 | if (likely(!wait_ack_clear(i915, d, FORCEWAKE_KERNEL))) | |
177 | return; | |
178 | ||
179 | if (fw_domain_wait_ack_with_fallback(i915, d, ACK_CLEAR)) | |
180 | fw_domain_wait_ack_clear(i915, d); | |
181 | } | |
182 | ||
05a2fb15 | 183 | static inline void |
577ac4bd CW |
184 | fw_domain_get(struct drm_i915_private *i915, |
185 | const struct intel_uncore_forcewake_domain *d) | |
05a2fb15 | 186 | { |
6e3955a5 | 187 | __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set); |
05a2fb15 | 188 | } |
907b28c5 | 189 | |
05a2fb15 | 190 | static inline void |
71306303 MK |
191 | fw_domain_wait_ack_set(const struct drm_i915_private *i915, |
192 | const struct intel_uncore_forcewake_domain *d) | |
05a2fb15 | 193 | { |
71306303 | 194 | if (wait_ack_set(i915, d, FORCEWAKE_KERNEL)) |
05a2fb15 MK |
195 | DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", |
196 | intel_uncore_forcewake_domain_to_str(d->id)); | |
197 | } | |
907b28c5 | 198 | |
71306303 MK |
199 | static inline void |
200 | fw_domain_wait_ack_set_fallback(const struct drm_i915_private *i915, | |
201 | const struct intel_uncore_forcewake_domain *d) | |
202 | { | |
203 | if (likely(!wait_ack_set(i915, d, FORCEWAKE_KERNEL))) | |
204 | return; | |
205 | ||
206 | if (fw_domain_wait_ack_with_fallback(i915, d, ACK_SET)) | |
207 | fw_domain_wait_ack_set(i915, d); | |
208 | } | |
209 | ||
05a2fb15 | 210 | static inline void |
6e3955a5 | 211 | fw_domain_put(const struct drm_i915_private *i915, |
577ac4bd | 212 | const struct intel_uncore_forcewake_domain *d) |
05a2fb15 | 213 | { |
6e3955a5 | 214 | __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear); |
907b28c5 CW |
215 | } |
216 | ||
05a2fb15 | 217 | static void |
577ac4bd | 218 | fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains) |
907b28c5 | 219 | { |
05a2fb15 | 220 | struct intel_uncore_forcewake_domain *d; |
d2dc94bc | 221 | unsigned int tmp; |
907b28c5 | 222 | |
d2dc94bc CW |
223 | GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); |
224 | ||
225 | for_each_fw_domain_masked(d, fw_domains, i915, tmp) { | |
577ac4bd CW |
226 | fw_domain_wait_ack_clear(i915, d); |
227 | fw_domain_get(i915, d); | |
05a2fb15 | 228 | } |
4e1176dd | 229 | |
d2dc94bc | 230 | for_each_fw_domain_masked(d, fw_domains, i915, tmp) |
71306303 MK |
231 | fw_domain_wait_ack_set(i915, d); |
232 | ||
233 | i915->uncore.fw_domains_active |= fw_domains; | |
234 | } | |
235 | ||
236 | static void | |
237 | fw_domains_get_with_fallback(struct drm_i915_private *i915, | |
238 | enum forcewake_domains fw_domains) | |
239 | { | |
240 | struct intel_uncore_forcewake_domain *d; | |
241 | unsigned int tmp; | |
242 | ||
243 | GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); | |
244 | ||
245 | for_each_fw_domain_masked(d, fw_domains, i915, tmp) { | |
246 | fw_domain_wait_ack_clear_fallback(i915, d); | |
247 | fw_domain_get(i915, d); | |
248 | } | |
249 | ||
250 | for_each_fw_domain_masked(d, fw_domains, i915, tmp) | |
251 | fw_domain_wait_ack_set_fallback(i915, d); | |
b8473050 | 252 | |
577ac4bd | 253 | i915->uncore.fw_domains_active |= fw_domains; |
05a2fb15 | 254 | } |
907b28c5 | 255 | |
05a2fb15 | 256 | static void |
577ac4bd | 257 | fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains) |
05a2fb15 MK |
258 | { |
259 | struct intel_uncore_forcewake_domain *d; | |
d2dc94bc CW |
260 | unsigned int tmp; |
261 | ||
262 | GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); | |
907b28c5 | 263 | |
0f966aaf | 264 | for_each_fw_domain_masked(d, fw_domains, i915, tmp) |
577ac4bd | 265 | fw_domain_put(i915, d); |
b8473050 | 266 | |
577ac4bd | 267 | i915->uncore.fw_domains_active &= ~fw_domains; |
05a2fb15 | 268 | } |
907b28c5 | 269 | |
05a2fb15 | 270 | static void |
577ac4bd CW |
271 | fw_domains_reset(struct drm_i915_private *i915, |
272 | enum forcewake_domains fw_domains) | |
05a2fb15 MK |
273 | { |
274 | struct intel_uncore_forcewake_domain *d; | |
d2dc94bc | 275 | unsigned int tmp; |
05a2fb15 | 276 | |
d2dc94bc | 277 | if (!fw_domains) |
3225b2f9 | 278 | return; |
f9b3927a | 279 | |
d2dc94bc CW |
280 | GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); |
281 | ||
282 | for_each_fw_domain_masked(d, fw_domains, i915, tmp) | |
577ac4bd | 283 | fw_domain_reset(i915, d); |
05a2fb15 MK |
284 | } |
285 | ||
a5b22b5e CW |
286 | static inline u32 gt_thread_status(struct drm_i915_private *dev_priv) |
287 | { | |
288 | u32 val; | |
289 | ||
290 | val = __raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG); | |
291 | val &= GEN6_GT_THREAD_STATUS_CORE_MASK; | |
292 | ||
293 | return val; | |
294 | } | |
295 | ||
05a2fb15 MK |
296 | static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) |
297 | { | |
a5b22b5e CW |
298 | /* |
299 | * w/a for a sporadic read returning 0 by waiting for the GT | |
05a2fb15 MK |
300 | * thread to wake up. |
301 | */ | |
a5b22b5e CW |
302 | WARN_ONCE(wait_for_atomic_us(gt_thread_status(dev_priv) == 0, 5000), |
303 | "GT thread status wait timed out\n"); | |
05a2fb15 MK |
304 | } |
305 | ||
306 | static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv, | |
48c1026a | 307 | enum forcewake_domains fw_domains) |
05a2fb15 MK |
308 | { |
309 | fw_domains_get(dev_priv, fw_domains); | |
907b28c5 | 310 | |
05a2fb15 | 311 | /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ |
c549f738 | 312 | __gen6_gt_wait_for_thread_c0(dev_priv); |
907b28c5 CW |
313 | } |
314 | ||
c32e3788 DG |
315 | static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv) |
316 | { | |
317 | u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL); | |
318 | ||
319 | return count & GT_FIFO_FREE_ENTRIES_MASK; | |
320 | } | |
321 | ||
6b07b6d2 | 322 | static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) |
907b28c5 | 323 | { |
6b07b6d2 | 324 | u32 n; |
907b28c5 | 325 | |
5135d64b D |
326 | /* On VLV, FIFO will be shared by both SW and HW. |
327 | * So, we need to read the FREE_ENTRIES everytime */ | |
2d1fe073 | 328 | if (IS_VALLEYVIEW(dev_priv)) |
6b07b6d2 MK |
329 | n = fifo_free_entries(dev_priv); |
330 | else | |
331 | n = dev_priv->uncore.fifo_count; | |
332 | ||
333 | if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) { | |
334 | if (wait_for_atomic((n = fifo_free_entries(dev_priv)) > | |
335 | GT_FIFO_NUM_RESERVED_ENTRIES, | |
336 | GT_FIFO_TIMEOUT_MS)) { | |
337 | DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n); | |
338 | return; | |
907b28c5 | 339 | } |
907b28c5 | 340 | } |
907b28c5 | 341 | |
6b07b6d2 | 342 | dev_priv->uncore.fifo_count = n - 1; |
907b28c5 CW |
343 | } |
344 | ||
a57a4a67 TU |
345 | static enum hrtimer_restart |
346 | intel_uncore_fw_release_timer(struct hrtimer *timer) | |
38cff0b1 | 347 | { |
a57a4a67 TU |
348 | struct intel_uncore_forcewake_domain *domain = |
349 | container_of(timer, struct intel_uncore_forcewake_domain, timer); | |
577ac4bd CW |
350 | struct drm_i915_private *dev_priv = |
351 | container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]); | |
b2cff0db | 352 | unsigned long irqflags; |
38cff0b1 | 353 | |
003342a5 | 354 | assert_rpm_device_not_suspended(dev_priv); |
38cff0b1 | 355 | |
c9e0c6da CW |
356 | if (xchg(&domain->active, false)) |
357 | return HRTIMER_RESTART; | |
358 | ||
003342a5 | 359 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
b2cff0db CW |
360 | if (WARN_ON(domain->wake_count == 0)) |
361 | domain->wake_count++; | |
362 | ||
b8473050 | 363 | if (--domain->wake_count == 0) |
003342a5 | 364 | dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); |
b2cff0db | 365 | |
003342a5 | 366 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
a57a4a67 TU |
367 | |
368 | return HRTIMER_NORESTART; | |
38cff0b1 ZW |
369 | } |
370 | ||
a5266db4 | 371 | /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */ |
d60996ab CW |
372 | static unsigned int |
373 | intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv) | |
38cff0b1 | 374 | { |
48c1026a | 375 | unsigned long irqflags; |
b2cff0db | 376 | struct intel_uncore_forcewake_domain *domain; |
48c1026a | 377 | int retry_count = 100; |
003342a5 | 378 | enum forcewake_domains fw, active_domains; |
38cff0b1 | 379 | |
a5266db4 HG |
380 | iosf_mbi_assert_punit_acquired(); |
381 | ||
b2cff0db CW |
382 | /* Hold uncore.lock across reset to prevent any register access |
383 | * with forcewake not set correctly. Wait until all pending | |
384 | * timers are run before holding. | |
385 | */ | |
386 | while (1) { | |
d2dc94bc CW |
387 | unsigned int tmp; |
388 | ||
b2cff0db | 389 | active_domains = 0; |
38cff0b1 | 390 | |
d2dc94bc | 391 | for_each_fw_domain(domain, dev_priv, tmp) { |
c9e0c6da | 392 | smp_store_mb(domain->active, false); |
a57a4a67 | 393 | if (hrtimer_cancel(&domain->timer) == 0) |
b2cff0db | 394 | continue; |
38cff0b1 | 395 | |
a57a4a67 | 396 | intel_uncore_fw_release_timer(&domain->timer); |
b2cff0db | 397 | } |
aec347ab | 398 | |
b2cff0db | 399 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
b2ec142c | 400 | |
d2dc94bc | 401 | for_each_fw_domain(domain, dev_priv, tmp) { |
a57a4a67 | 402 | if (hrtimer_active(&domain->timer)) |
33c582c1 | 403 | active_domains |= domain->mask; |
b2cff0db | 404 | } |
3123fcaf | 405 | |
b2cff0db CW |
406 | if (active_domains == 0) |
407 | break; | |
aec347ab | 408 | |
b2cff0db CW |
409 | if (--retry_count == 0) { |
410 | DRM_ERROR("Timed out waiting for forcewake timers to finish\n"); | |
411 | break; | |
412 | } | |
0294ae7b | 413 | |
b2cff0db CW |
414 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
415 | cond_resched(); | |
416 | } | |
0294ae7b | 417 | |
b2cff0db CW |
418 | WARN_ON(active_domains); |
419 | ||
003342a5 | 420 | fw = dev_priv->uncore.fw_domains_active; |
b2cff0db CW |
421 | if (fw) |
422 | dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); | |
ef46e0d2 | 423 | |
cb3600db | 424 | fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains); |
d60996ab | 425 | assert_forcewakes_inactive(dev_priv); |
b2cff0db | 426 | |
0294ae7b | 427 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
d60996ab CW |
428 | |
429 | return fw; /* track the lost user forcewake domains */ | |
ef46e0d2 DV |
430 | } |
431 | ||
c02e85a0 MK |
432 | static u64 gen9_edram_size(struct drm_i915_private *dev_priv) |
433 | { | |
434 | const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 }; | |
435 | const unsigned int sets[4] = { 1, 1, 2, 2 }; | |
436 | const u32 cap = dev_priv->edram_cap; | |
437 | ||
438 | return EDRAM_NUM_BANKS(cap) * | |
439 | ways[EDRAM_WAYS_IDX(cap)] * | |
440 | sets[EDRAM_SETS_IDX(cap)] * | |
441 | 1024 * 1024; | |
442 | } | |
443 | ||
3accaf7e | 444 | u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv) |
907b28c5 | 445 | { |
3accaf7e MK |
446 | if (!HAS_EDRAM(dev_priv)) |
447 | return 0; | |
448 | ||
c02e85a0 MK |
449 | /* The needed capability bits for size calculation |
450 | * are not there with pre gen9 so return 128MB always. | |
3accaf7e | 451 | */ |
c02e85a0 MK |
452 | if (INTEL_GEN(dev_priv) < 9) |
453 | return 128 * 1024 * 1024; | |
3accaf7e | 454 | |
c02e85a0 | 455 | return gen9_edram_size(dev_priv); |
3accaf7e | 456 | } |
907b28c5 | 457 | |
3accaf7e MK |
458 | static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv) |
459 | { | |
460 | if (IS_HASWELL(dev_priv) || | |
461 | IS_BROADWELL(dev_priv) || | |
462 | INTEL_GEN(dev_priv) >= 9) { | |
463 | dev_priv->edram_cap = __raw_i915_read32(dev_priv, | |
464 | HSW_EDRAM_CAP); | |
465 | ||
466 | /* NB: We can't write IDICR yet because we do not have gt funcs | |
18ce3994 | 467 | * set up */ |
3accaf7e MK |
468 | } else { |
469 | dev_priv->edram_cap = 0; | |
18ce3994 | 470 | } |
3accaf7e MK |
471 | |
472 | if (HAS_EDRAM(dev_priv)) | |
473 | DRM_INFO("Found %lluMB of eDRAM\n", | |
474 | intel_uncore_edram_size(dev_priv) / (1024 * 1024)); | |
f9b3927a MK |
475 | } |
476 | ||
8a47eb19 | 477 | static bool |
8ac3e1bb | 478 | fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) |
8a47eb19 MK |
479 | { |
480 | u32 dbg; | |
481 | ||
8a47eb19 MK |
482 | dbg = __raw_i915_read32(dev_priv, FPGA_DBG); |
483 | if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))) | |
484 | return false; | |
485 | ||
486 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); | |
487 | ||
488 | return true; | |
489 | } | |
490 | ||
8ac3e1bb MK |
491 | static bool |
492 | vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) | |
493 | { | |
494 | u32 cer; | |
495 | ||
496 | cer = __raw_i915_read32(dev_priv, CLAIM_ER); | |
497 | if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK)))) | |
498 | return false; | |
499 | ||
500 | __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR); | |
501 | ||
502 | return true; | |
503 | } | |
504 | ||
a338908c MK |
505 | static bool |
506 | gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv) | |
507 | { | |
508 | u32 fifodbg; | |
509 | ||
510 | fifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); | |
511 | ||
512 | if (unlikely(fifodbg)) { | |
513 | DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg); | |
514 | __raw_i915_write32(dev_priv, GTFIFODBG, fifodbg); | |
515 | } | |
516 | ||
517 | return fifodbg; | |
518 | } | |
519 | ||
8ac3e1bb MK |
520 | static bool |
521 | check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) | |
522 | { | |
a338908c MK |
523 | bool ret = false; |
524 | ||
8ac3e1bb | 525 | if (HAS_FPGA_DBG_UNCLAIMED(dev_priv)) |
a338908c | 526 | ret |= fpga_check_for_unclaimed_mmio(dev_priv); |
8ac3e1bb MK |
527 | |
528 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | |
a338908c MK |
529 | ret |= vlv_check_for_unclaimed_mmio(dev_priv); |
530 | ||
531 | if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) | |
532 | ret |= gen6_check_for_fifo_debug(dev_priv); | |
8ac3e1bb | 533 | |
a338908c | 534 | return ret; |
8ac3e1bb MK |
535 | } |
536 | ||
dc97997a | 537 | static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, |
d60996ab | 538 | unsigned int restore_forcewake) |
f9b3927a | 539 | { |
8a47eb19 MK |
540 | /* clear out unclaimed reg detection bit */ |
541 | if (check_for_unclaimed_mmio(dev_priv)) | |
542 | DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); | |
907b28c5 | 543 | |
a04f90a3 | 544 | /* WaDisableShadowRegForCpd:chv */ |
dc97997a | 545 | if (IS_CHERRYVIEW(dev_priv)) { |
a04f90a3 D |
546 | __raw_i915_write32(dev_priv, GTFIFOCTL, |
547 | __raw_i915_read32(dev_priv, GTFIFOCTL) | | |
548 | GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | | |
549 | GT_FIFO_CTL_RC6_POLICY_STALL); | |
550 | } | |
551 | ||
a5266db4 | 552 | iosf_mbi_punit_acquire(); |
d60996ab CW |
553 | intel_uncore_forcewake_reset(dev_priv); |
554 | if (restore_forcewake) { | |
555 | spin_lock_irq(&dev_priv->uncore.lock); | |
556 | dev_priv->uncore.funcs.force_wake_get(dev_priv, | |
557 | restore_forcewake); | |
558 | ||
559 | if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) | |
560 | dev_priv->uncore.fifo_count = | |
561 | fifo_free_entries(dev_priv); | |
562 | spin_unlock_irq(&dev_priv->uncore.lock); | |
563 | } | |
a5266db4 | 564 | iosf_mbi_punit_release(); |
521198a2 MK |
565 | } |
566 | ||
68f60946 | 567 | void intel_uncore_suspend(struct drm_i915_private *dev_priv) |
ed493883 | 568 | { |
a5266db4 HG |
569 | iosf_mbi_punit_acquire(); |
570 | iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( | |
264ec1a8 | 571 | &dev_priv->uncore.pmic_bus_access_nb); |
d60996ab CW |
572 | dev_priv->uncore.fw_domains_saved = |
573 | intel_uncore_forcewake_reset(dev_priv); | |
a5266db4 | 574 | iosf_mbi_punit_release(); |
68f60946 HG |
575 | } |
576 | ||
577 | void intel_uncore_resume_early(struct drm_i915_private *dev_priv) | |
578 | { | |
d60996ab CW |
579 | unsigned int restore_forcewake; |
580 | ||
581 | restore_forcewake = fetch_and_zero(&dev_priv->uncore.fw_domains_saved); | |
582 | __intel_uncore_early_sanitize(dev_priv, restore_forcewake); | |
583 | ||
264ec1a8 HG |
584 | iosf_mbi_register_pmic_bus_access_notifier( |
585 | &dev_priv->uncore.pmic_bus_access_nb); | |
dc97997a | 586 | i915_check_and_clear_faults(dev_priv); |
ed493883 ID |
587 | } |
588 | ||
bedf4d79 HG |
589 | void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv) |
590 | { | |
591 | iosf_mbi_register_pmic_bus_access_notifier( | |
592 | &dev_priv->uncore.pmic_bus_access_nb); | |
593 | } | |
594 | ||
dc97997a | 595 | void intel_uncore_sanitize(struct drm_i915_private *dev_priv) |
521198a2 | 596 | { |
907b28c5 | 597 | /* BIOS often leaves RC6 enabled, but disable it for hw init */ |
54b4f68f | 598 | intel_sanitize_gt_powersave(dev_priv); |
907b28c5 CW |
599 | } |
600 | ||
a6111f7b CW |
601 | static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, |
602 | enum forcewake_domains fw_domains) | |
603 | { | |
604 | struct intel_uncore_forcewake_domain *domain; | |
d2dc94bc | 605 | unsigned int tmp; |
a6111f7b | 606 | |
a6111f7b CW |
607 | fw_domains &= dev_priv->uncore.fw_domains; |
608 | ||
c9e0c6da CW |
609 | for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) { |
610 | if (domain->wake_count++) { | |
33c582c1 | 611 | fw_domains &= ~domain->mask; |
c9e0c6da CW |
612 | domain->active = true; |
613 | } | |
614 | } | |
a6111f7b | 615 | |
b8473050 | 616 | if (fw_domains) |
a6111f7b CW |
617 | dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); |
618 | } | |
619 | ||
59bad947 MK |
620 | /** |
621 | * intel_uncore_forcewake_get - grab forcewake domain references | |
622 | * @dev_priv: i915 device instance | |
623 | * @fw_domains: forcewake domains to get reference on | |
624 | * | |
625 | * This function can be used get GT's forcewake domain references. | |
626 | * Normal register access will handle the forcewake domains automatically. | |
627 | * However if some sequence requires the GT to not power down a particular | |
628 | * forcewake domains this function should be called at the beginning of the | |
629 | * sequence. And subsequently the reference should be dropped by symmetric | |
630 | * call to intel_unforce_forcewake_put(). Usually caller wants all the domains | |
631 | * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. | |
907b28c5 | 632 | */ |
59bad947 | 633 | void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, |
48c1026a | 634 | enum forcewake_domains fw_domains) |
907b28c5 CW |
635 | { |
636 | unsigned long irqflags; | |
637 | ||
ab484f8f BW |
638 | if (!dev_priv->uncore.funcs.force_wake_get) |
639 | return; | |
640 | ||
c9b8846a | 641 | assert_rpm_wakelock_held(dev_priv); |
c8c8fb33 | 642 | |
6daccb0b | 643 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
a6111f7b | 644 | __intel_uncore_forcewake_get(dev_priv, fw_domains); |
907b28c5 CW |
645 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
646 | } | |
647 | ||
d7a133d8 CW |
648 | /** |
649 | * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace | |
650 | * @dev_priv: i915 device instance | |
651 | * | |
652 | * This function is a wrapper around intel_uncore_forcewake_get() to acquire | |
653 | * the GT powerwell and in the process disable our debugging for the | |
654 | * duration of userspace's bypass. | |
655 | */ | |
656 | void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv) | |
657 | { | |
658 | spin_lock_irq(&dev_priv->uncore.lock); | |
659 | if (!dev_priv->uncore.user_forcewake.count++) { | |
660 | intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL); | |
661 | ||
662 | /* Save and disable mmio debugging for the user bypass */ | |
663 | dev_priv->uncore.user_forcewake.saved_mmio_check = | |
664 | dev_priv->uncore.unclaimed_mmio_check; | |
665 | dev_priv->uncore.user_forcewake.saved_mmio_debug = | |
4f044a88 | 666 | i915_modparams.mmio_debug; |
d7a133d8 CW |
667 | |
668 | dev_priv->uncore.unclaimed_mmio_check = 0; | |
4f044a88 | 669 | i915_modparams.mmio_debug = 0; |
d7a133d8 CW |
670 | } |
671 | spin_unlock_irq(&dev_priv->uncore.lock); | |
672 | } | |
673 | ||
674 | /** | |
675 | * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace | |
676 | * @dev_priv: i915 device instance | |
677 | * | |
678 | * This function complements intel_uncore_forcewake_user_get() and releases | |
679 | * the GT powerwell taken on behalf of the userspace bypass. | |
680 | */ | |
681 | void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv) | |
682 | { | |
683 | spin_lock_irq(&dev_priv->uncore.lock); | |
684 | if (!--dev_priv->uncore.user_forcewake.count) { | |
685 | if (intel_uncore_unclaimed_mmio(dev_priv)) | |
686 | dev_info(dev_priv->drm.dev, | |
687 | "Invalid mmio detected during user access\n"); | |
688 | ||
689 | dev_priv->uncore.unclaimed_mmio_check = | |
690 | dev_priv->uncore.user_forcewake.saved_mmio_check; | |
4f044a88 | 691 | i915_modparams.mmio_debug = |
d7a133d8 CW |
692 | dev_priv->uncore.user_forcewake.saved_mmio_debug; |
693 | ||
694 | intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL); | |
695 | } | |
696 | spin_unlock_irq(&dev_priv->uncore.lock); | |
697 | } | |
698 | ||
59bad947 | 699 | /** |
a6111f7b | 700 | * intel_uncore_forcewake_get__locked - grab forcewake domain references |
59bad947 | 701 | * @dev_priv: i915 device instance |
a6111f7b | 702 | * @fw_domains: forcewake domains to get reference on |
59bad947 | 703 | * |
a6111f7b CW |
704 | * See intel_uncore_forcewake_get(). This variant places the onus |
705 | * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. | |
907b28c5 | 706 | */ |
a6111f7b CW |
707 | void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, |
708 | enum forcewake_domains fw_domains) | |
709 | { | |
67520415 | 710 | lockdep_assert_held(&dev_priv->uncore.lock); |
a6111f7b CW |
711 | |
712 | if (!dev_priv->uncore.funcs.force_wake_get) | |
713 | return; | |
714 | ||
715 | __intel_uncore_forcewake_get(dev_priv, fw_domains); | |
716 | } | |
717 | ||
718 | static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, | |
719 | enum forcewake_domains fw_domains) | |
907b28c5 | 720 | { |
b2cff0db | 721 | struct intel_uncore_forcewake_domain *domain; |
d2dc94bc | 722 | unsigned int tmp; |
907b28c5 | 723 | |
b2cff0db CW |
724 | fw_domains &= dev_priv->uncore.fw_domains; |
725 | ||
d2dc94bc | 726 | for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) { |
b2cff0db CW |
727 | if (WARN_ON(domain->wake_count == 0)) |
728 | continue; | |
729 | ||
c9e0c6da CW |
730 | if (--domain->wake_count) { |
731 | domain->active = true; | |
b2cff0db | 732 | continue; |
c9e0c6da | 733 | } |
b2cff0db | 734 | |
05a2fb15 | 735 | fw_domain_arm_timer(domain); |
aec347ab | 736 | } |
a6111f7b | 737 | } |
dc9fb09c | 738 | |
a6111f7b CW |
739 | /** |
740 | * intel_uncore_forcewake_put - release a forcewake domain reference | |
741 | * @dev_priv: i915 device instance | |
742 | * @fw_domains: forcewake domains to put references | |
743 | * | |
744 | * This function drops the device-level forcewakes for specified | |
745 | * domains obtained by intel_uncore_forcewake_get(). | |
746 | */ | |
747 | void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, | |
748 | enum forcewake_domains fw_domains) | |
749 | { | |
750 | unsigned long irqflags; | |
751 | ||
752 | if (!dev_priv->uncore.funcs.force_wake_put) | |
753 | return; | |
754 | ||
755 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | |
756 | __intel_uncore_forcewake_put(dev_priv, fw_domains); | |
907b28c5 CW |
757 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
758 | } | |
759 | ||
a6111f7b CW |
760 | /** |
761 | * intel_uncore_forcewake_put__locked - grab forcewake domain references | |
762 | * @dev_priv: i915 device instance | |
763 | * @fw_domains: forcewake domains to get reference on | |
764 | * | |
765 | * See intel_uncore_forcewake_put(). This variant places the onus | |
766 | * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. | |
767 | */ | |
768 | void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, | |
769 | enum forcewake_domains fw_domains) | |
770 | { | |
67520415 | 771 | lockdep_assert_held(&dev_priv->uncore.lock); |
a6111f7b CW |
772 | |
773 | if (!dev_priv->uncore.funcs.force_wake_put) | |
774 | return; | |
775 | ||
776 | __intel_uncore_forcewake_put(dev_priv, fw_domains); | |
777 | } | |
778 | ||
59bad947 | 779 | void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) |
e998c40f PZ |
780 | { |
781 | if (!dev_priv->uncore.funcs.force_wake_get) | |
782 | return; | |
783 | ||
67e64564 CW |
784 | WARN(dev_priv->uncore.fw_domains_active, |
785 | "Expected all fw_domains to be inactive, but %08x are still on\n", | |
786 | dev_priv->uncore.fw_domains_active); | |
787 | } | |
788 | ||
789 | void assert_forcewakes_active(struct drm_i915_private *dev_priv, | |
790 | enum forcewake_domains fw_domains) | |
791 | { | |
792 | if (!dev_priv->uncore.funcs.force_wake_get) | |
793 | return; | |
794 | ||
795 | assert_rpm_wakelock_held(dev_priv); | |
796 | ||
797 | fw_domains &= dev_priv->uncore.fw_domains; | |
798 | WARN(fw_domains & ~dev_priv->uncore.fw_domains_active, | |
799 | "Expected %08x fw_domains to be active, but %08x are off\n", | |
800 | fw_domains, fw_domains & ~dev_priv->uncore.fw_domains_active); | |
e998c40f PZ |
801 | } |
802 | ||
907b28c5 | 803 | /* We give fast paths for the really cool registers */ |
40181697 | 804 | #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) |
907b28c5 | 805 | |
a89a70a8 DCS |
806 | #define GEN11_NEEDS_FORCE_WAKE(reg) \ |
807 | ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000)) | |
808 | ||
6863b76c TU |
809 | #define __gen6_reg_read_fw_domains(offset) \ |
810 | ({ \ | |
811 | enum forcewake_domains __fwd; \ | |
812 | if (NEEDS_FORCE_WAKE(offset)) \ | |
813 | __fwd = FORCEWAKE_RENDER; \ | |
814 | else \ | |
815 | __fwd = 0; \ | |
816 | __fwd; \ | |
817 | }) | |
818 | ||
9480dbf0 | 819 | static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry) |
91e630b9 | 820 | { |
91e630b9 TU |
821 | if (offset < entry->start) |
822 | return -1; | |
823 | else if (offset > entry->end) | |
824 | return 1; | |
825 | else | |
826 | return 0; | |
827 | } | |
828 | ||
9480dbf0 TU |
829 | /* Copied and "macroized" from lib/bsearch.c */ |
830 | #define BSEARCH(key, base, num, cmp) ({ \ | |
831 | unsigned int start__ = 0, end__ = (num); \ | |
832 | typeof(base) result__ = NULL; \ | |
833 | while (start__ < end__) { \ | |
834 | unsigned int mid__ = start__ + (end__ - start__) / 2; \ | |
835 | int ret__ = (cmp)((key), (base) + mid__); \ | |
836 | if (ret__ < 0) { \ | |
837 | end__ = mid__; \ | |
838 | } else if (ret__ > 0) { \ | |
839 | start__ = mid__ + 1; \ | |
840 | } else { \ | |
841 | result__ = (base) + mid__; \ | |
842 | break; \ | |
843 | } \ | |
844 | } \ | |
845 | result__; \ | |
846 | }) | |
847 | ||
9fc1117c | 848 | static enum forcewake_domains |
15157970 | 849 | find_fw_domain(struct drm_i915_private *dev_priv, u32 offset) |
9fc1117c | 850 | { |
9480dbf0 | 851 | const struct intel_forcewake_range *entry; |
9fc1117c | 852 | |
9480dbf0 TU |
853 | entry = BSEARCH(offset, |
854 | dev_priv->uncore.fw_domains_table, | |
855 | dev_priv->uncore.fw_domains_table_entries, | |
91e630b9 | 856 | fw_range_cmp); |
38fb6a40 | 857 | |
99191427 JL |
858 | if (!entry) |
859 | return 0; | |
860 | ||
a89a70a8 DCS |
861 | /* |
862 | * The list of FW domains depends on the SKU in gen11+ so we | |
863 | * can't determine it statically. We use FORCEWAKE_ALL and | |
864 | * translate it here to the list of available domains. | |
865 | */ | |
866 | if (entry->domains == FORCEWAKE_ALL) | |
867 | return dev_priv->uncore.fw_domains; | |
868 | ||
99191427 JL |
869 | WARN(entry->domains & ~dev_priv->uncore.fw_domains, |
870 | "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n", | |
871 | entry->domains & ~dev_priv->uncore.fw_domains, offset); | |
872 | ||
873 | return entry->domains; | |
9fc1117c TU |
874 | } |
875 | ||
876 | #define GEN_FW_RANGE(s, e, d) \ | |
877 | { .start = (s), .end = (e), .domains = (d) } | |
1938e59a | 878 | |
895833bd | 879 | #define HAS_FWTABLE(dev_priv) \ |
3d16ca58 | 880 | (INTEL_GEN(dev_priv) >= 9 || \ |
895833bd TU |
881 | IS_CHERRYVIEW(dev_priv) || \ |
882 | IS_VALLEYVIEW(dev_priv)) | |
883 | ||
b0081239 | 884 | /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ |
9fc1117c TU |
885 | static const struct intel_forcewake_range __vlv_fw_ranges[] = { |
886 | GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), | |
887 | GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER), | |
888 | GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER), | |
9fc1117c TU |
889 | GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), |
890 | GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA), | |
b0081239 | 891 | GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER), |
9fc1117c TU |
892 | GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), |
893 | }; | |
1938e59a | 894 | |
895833bd | 895 | #define __fwtable_reg_read_fw_domains(offset) \ |
6863b76c TU |
896 | ({ \ |
897 | enum forcewake_domains __fwd = 0; \ | |
0dd356bb | 898 | if (NEEDS_FORCE_WAKE((offset))) \ |
15157970 | 899 | __fwd = find_fw_domain(dev_priv, offset); \ |
6863b76c TU |
900 | __fwd; \ |
901 | }) | |
902 | ||
a89a70a8 DCS |
903 | #define __gen11_fwtable_reg_read_fw_domains(offset) \ |
904 | ({ \ | |
905 | enum forcewake_domains __fwd = 0; \ | |
906 | if (GEN11_NEEDS_FORCE_WAKE((offset))) \ | |
907 | __fwd = find_fw_domain(dev_priv, offset); \ | |
908 | __fwd; \ | |
909 | }) | |
910 | ||
47188574 | 911 | /* *Must* be sorted by offset! See intel_shadow_table_check(). */ |
6863b76c | 912 | static const i915_reg_t gen8_shadowed_regs[] = { |
47188574 TU |
913 | RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ |
914 | GEN6_RPNSWREQ, /* 0xA008 */ | |
915 | GEN6_RC_VIDEO_FREQ, /* 0xA00C */ | |
916 | RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */ | |
917 | RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */ | |
918 | RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ | |
6863b76c TU |
919 | /* TODO: Other registers are not yet used */ |
920 | }; | |
921 | ||
a89a70a8 DCS |
922 | static const i915_reg_t gen11_shadowed_regs[] = { |
923 | RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ | |
924 | GEN6_RPNSWREQ, /* 0xA008 */ | |
925 | GEN6_RC_VIDEO_FREQ, /* 0xA00C */ | |
926 | RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ | |
927 | RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */ | |
928 | RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */ | |
929 | RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */ | |
930 | RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */ | |
931 | RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */ | |
932 | RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */ | |
933 | /* TODO: Other registers are not yet used */ | |
934 | }; | |
935 | ||
9480dbf0 | 936 | static int mmio_reg_cmp(u32 key, const i915_reg_t *reg) |
5a659383 | 937 | { |
9480dbf0 | 938 | u32 offset = i915_mmio_reg_offset(*reg); |
5a659383 | 939 | |
9480dbf0 | 940 | if (key < offset) |
5a659383 | 941 | return -1; |
9480dbf0 | 942 | else if (key > offset) |
5a659383 TU |
943 | return 1; |
944 | else | |
945 | return 0; | |
946 | } | |
947 | ||
a89a70a8 DCS |
948 | #define __is_genX_shadowed(x) \ |
949 | static bool is_gen##x##_shadowed(u32 offset) \ | |
950 | { \ | |
951 | const i915_reg_t *regs = gen##x##_shadowed_regs; \ | |
952 | return BSEARCH(offset, regs, ARRAY_SIZE(gen##x##_shadowed_regs), \ | |
953 | mmio_reg_cmp); \ | |
6863b76c TU |
954 | } |
955 | ||
a89a70a8 DCS |
956 | __is_genX_shadowed(8) |
957 | __is_genX_shadowed(11) | |
958 | ||
6863b76c TU |
959 | #define __gen8_reg_write_fw_domains(offset) \ |
960 | ({ \ | |
961 | enum forcewake_domains __fwd; \ | |
962 | if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \ | |
963 | __fwd = FORCEWAKE_RENDER; \ | |
964 | else \ | |
965 | __fwd = 0; \ | |
966 | __fwd; \ | |
967 | }) | |
968 | ||
b0081239 | 969 | /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ |
9fc1117c TU |
970 | static const struct intel_forcewake_range __chv_fw_ranges[] = { |
971 | GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), | |
b0081239 | 972 | GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), |
9fc1117c | 973 | GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), |
b0081239 | 974 | GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), |
9fc1117c | 975 | GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), |
b0081239 | 976 | GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), |
9fc1117c | 977 | GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA), |
b0081239 TU |
978 | GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), |
979 | GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), | |
9fc1117c | 980 | GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), |
b0081239 TU |
981 | GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER), |
982 | GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), | |
9fc1117c TU |
983 | GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), |
984 | GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA), | |
985 | GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA), | |
986 | GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA), | |
9fc1117c | 987 | }; |
38fb6a40 | 988 | |
22d48c55 | 989 | #define __fwtable_reg_write_fw_domains(offset) \ |
6863b76c TU |
990 | ({ \ |
991 | enum forcewake_domains __fwd = 0; \ | |
0dd356bb | 992 | if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \ |
15157970 | 993 | __fwd = find_fw_domain(dev_priv, offset); \ |
6863b76c TU |
994 | __fwd; \ |
995 | }) | |
996 | ||
a89a70a8 DCS |
997 | #define __gen11_fwtable_reg_write_fw_domains(offset) \ |
998 | ({ \ | |
999 | enum forcewake_domains __fwd = 0; \ | |
1000 | if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \ | |
1001 | __fwd = find_fw_domain(dev_priv, offset); \ | |
1002 | __fwd; \ | |
1003 | }) | |
1004 | ||
b0081239 | 1005 | /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ |
9fc1117c | 1006 | static const struct intel_forcewake_range __gen9_fw_ranges[] = { |
0dd356bb | 1007 | GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), |
9fc1117c TU |
1008 | GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ |
1009 | GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), | |
0dd356bb | 1010 | GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), |
9fc1117c | 1011 | GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), |
0dd356bb | 1012 | GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), |
9fc1117c | 1013 | GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), |
0dd356bb | 1014 | GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER), |
b0081239 | 1015 | GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA), |
9fc1117c | 1016 | GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), |
0dd356bb | 1017 | GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), |
9fc1117c | 1018 | GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), |
0dd356bb | 1019 | GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER), |
b0081239 | 1020 | GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA), |
0dd356bb | 1021 | GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER), |
9fc1117c | 1022 | GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), |
0dd356bb | 1023 | GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), |
9fc1117c | 1024 | GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), |
0dd356bb | 1025 | GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), |
b0081239 | 1026 | GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), |
78424c92 | 1027 | GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER), |
9fc1117c | 1028 | GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), |
0dd356bb | 1029 | GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER), |
b0081239 | 1030 | GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), |
0dd356bb | 1031 | GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER), |
9fc1117c | 1032 | GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), |
0dd356bb | 1033 | GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER), |
9fc1117c | 1034 | GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA), |
0dd356bb | 1035 | GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER), |
b0081239 | 1036 | GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), |
0dd356bb | 1037 | GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER), |
9fc1117c TU |
1038 | GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), |
1039 | }; | |
6863b76c | 1040 | |
a89a70a8 DCS |
1041 | /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ |
1042 | static const struct intel_forcewake_range __gen11_fw_ranges[] = { | |
1043 | GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), | |
1044 | GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ | |
1045 | GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), | |
1046 | GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), | |
1047 | GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), | |
1048 | GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), | |
1049 | GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), | |
1050 | GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER), | |
1051 | GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), | |
1052 | GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), | |
1053 | GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), | |
1054 | GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER), | |
1055 | GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), | |
1056 | GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), | |
1057 | GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL), | |
1058 | GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), | |
1059 | GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), | |
1060 | GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER), | |
1061 | GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), | |
1062 | GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER), | |
1063 | GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), | |
1064 | GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER), | |
1065 | GEN_FW_RANGE(0x40000, 0x1bffff, 0), | |
1066 | GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), | |
1067 | GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), | |
1068 | GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), | |
1069 | GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER), | |
1070 | GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), | |
1071 | GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), | |
1072 | GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1) | |
1073 | }; | |
1074 | ||
907b28c5 CW |
1075 | static void |
1076 | ilk_dummy_write(struct drm_i915_private *dev_priv) | |
1077 | { | |
1078 | /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up | |
1079 | * the chip from rc6 before touching it for real. MI_MODE is masked, | |
1080 | * hence harmless to write 0 into. */ | |
6af5d92f | 1081 | __raw_i915_write32(dev_priv, MI_MODE, 0); |
907b28c5 CW |
1082 | } |
1083 | ||
1084 | static void | |
9c053501 MK |
1085 | __unclaimed_reg_debug(struct drm_i915_private *dev_priv, |
1086 | const i915_reg_t reg, | |
1087 | const bool read, | |
1088 | const bool before) | |
907b28c5 | 1089 | { |
dda96033 CW |
1090 | if (WARN(check_for_unclaimed_mmio(dev_priv) && !before, |
1091 | "Unclaimed %s register 0x%x\n", | |
1092 | read ? "read from" : "write to", | |
4bd0a25d | 1093 | i915_mmio_reg_offset(reg))) |
4f044a88 MW |
1094 | /* Only report the first N failures */ |
1095 | i915_modparams.mmio_debug--; | |
907b28c5 CW |
1096 | } |
1097 | ||
9c053501 MK |
1098 | static inline void |
1099 | unclaimed_reg_debug(struct drm_i915_private *dev_priv, | |
1100 | const i915_reg_t reg, | |
1101 | const bool read, | |
1102 | const bool before) | |
1103 | { | |
4f044a88 | 1104 | if (likely(!i915_modparams.mmio_debug)) |
9c053501 MK |
1105 | return; |
1106 | ||
1107 | __unclaimed_reg_debug(dev_priv, reg, read, before); | |
1108 | } | |
1109 | ||
51f67885 | 1110 | #define GEN2_READ_HEADER(x) \ |
5d738795 | 1111 | u##x val = 0; \ |
da5827c3 | 1112 | assert_rpm_wakelock_held(dev_priv); |
5d738795 | 1113 | |
51f67885 | 1114 | #define GEN2_READ_FOOTER \ |
5d738795 BW |
1115 | trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ |
1116 | return val | |
1117 | ||
51f67885 | 1118 | #define __gen2_read(x) \ |
0b274481 | 1119 | static u##x \ |
f0f59a00 | 1120 | gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ |
51f67885 | 1121 | GEN2_READ_HEADER(x); \ |
3967018e | 1122 | val = __raw_i915_read##x(dev_priv, reg); \ |
51f67885 | 1123 | GEN2_READ_FOOTER; \ |
3967018e BW |
1124 | } |
1125 | ||
1126 | #define __gen5_read(x) \ | |
1127 | static u##x \ | |
f0f59a00 | 1128 | gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ |
51f67885 | 1129 | GEN2_READ_HEADER(x); \ |
3967018e BW |
1130 | ilk_dummy_write(dev_priv); \ |
1131 | val = __raw_i915_read##x(dev_priv, reg); \ | |
51f67885 | 1132 | GEN2_READ_FOOTER; \ |
3967018e BW |
1133 | } |
1134 | ||
51f67885 CW |
1135 | __gen5_read(8) |
1136 | __gen5_read(16) | |
1137 | __gen5_read(32) | |
1138 | __gen5_read(64) | |
1139 | __gen2_read(8) | |
1140 | __gen2_read(16) | |
1141 | __gen2_read(32) | |
1142 | __gen2_read(64) | |
1143 | ||
1144 | #undef __gen5_read | |
1145 | #undef __gen2_read | |
1146 | ||
1147 | #undef GEN2_READ_FOOTER | |
1148 | #undef GEN2_READ_HEADER | |
1149 | ||
1150 | #define GEN6_READ_HEADER(x) \ | |
f0f59a00 | 1151 | u32 offset = i915_mmio_reg_offset(reg); \ |
51f67885 CW |
1152 | unsigned long irqflags; \ |
1153 | u##x val = 0; \ | |
da5827c3 | 1154 | assert_rpm_wakelock_held(dev_priv); \ |
9c053501 MK |
1155 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ |
1156 | unclaimed_reg_debug(dev_priv, reg, true, true) | |
51f67885 CW |
1157 | |
1158 | #define GEN6_READ_FOOTER \ | |
9c053501 | 1159 | unclaimed_reg_debug(dev_priv, reg, true, false); \ |
51f67885 CW |
1160 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ |
1161 | trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ | |
1162 | return val | |
1163 | ||
c521b0c8 TU |
1164 | static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv, |
1165 | enum forcewake_domains fw_domains) | |
b2cff0db CW |
1166 | { |
1167 | struct intel_uncore_forcewake_domain *domain; | |
d2dc94bc CW |
1168 | unsigned int tmp; |
1169 | ||
1170 | GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains); | |
b2cff0db | 1171 | |
d2dc94bc | 1172 | for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) |
c521b0c8 TU |
1173 | fw_domain_arm_timer(domain); |
1174 | ||
1175 | dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); | |
c521b0c8 TU |
1176 | } |
1177 | ||
1178 | static inline void __force_wake_auto(struct drm_i915_private *dev_priv, | |
1179 | enum forcewake_domains fw_domains) | |
1180 | { | |
b2cff0db CW |
1181 | if (WARN_ON(!fw_domains)) |
1182 | return; | |
1183 | ||
003342a5 TU |
1184 | /* Turn on all requested but inactive supported forcewake domains. */ |
1185 | fw_domains &= dev_priv->uncore.fw_domains; | |
1186 | fw_domains &= ~dev_priv->uncore.fw_domains_active; | |
b2cff0db | 1187 | |
c521b0c8 TU |
1188 | if (fw_domains) |
1189 | ___force_wake_auto(dev_priv, fw_domains); | |
b2cff0db CW |
1190 | } |
1191 | ||
ccfceda2 | 1192 | #define __gen_read(func, x) \ |
3967018e | 1193 | static u##x \ |
ccfceda2 | 1194 | func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ |
6863b76c | 1195 | enum forcewake_domains fw_engine; \ |
51f67885 | 1196 | GEN6_READ_HEADER(x); \ |
ccfceda2 | 1197 | fw_engine = __##func##_reg_read_fw_domains(offset); \ |
6a42d0f4 | 1198 | if (fw_engine) \ |
b208ba8e | 1199 | __force_wake_auto(dev_priv, fw_engine); \ |
6fe72865 | 1200 | val = __raw_i915_read##x(dev_priv, reg); \ |
51f67885 | 1201 | GEN6_READ_FOOTER; \ |
940aece4 | 1202 | } |
ccfceda2 DCS |
1203 | #define __gen6_read(x) __gen_read(gen6, x) |
1204 | #define __fwtable_read(x) __gen_read(fwtable, x) | |
a89a70a8 | 1205 | #define __gen11_fwtable_read(x) __gen_read(gen11_fwtable, x) |
940aece4 | 1206 | |
a89a70a8 DCS |
1207 | __gen11_fwtable_read(8) |
1208 | __gen11_fwtable_read(16) | |
1209 | __gen11_fwtable_read(32) | |
1210 | __gen11_fwtable_read(64) | |
6044c4a3 TU |
1211 | __fwtable_read(8) |
1212 | __fwtable_read(16) | |
1213 | __fwtable_read(32) | |
1214 | __fwtable_read(64) | |
3967018e BW |
1215 | __gen6_read(8) |
1216 | __gen6_read(16) | |
1217 | __gen6_read(32) | |
1218 | __gen6_read(64) | |
3967018e | 1219 | |
a89a70a8 | 1220 | #undef __gen11_fwtable_read |
6044c4a3 | 1221 | #undef __fwtable_read |
3967018e | 1222 | #undef __gen6_read |
51f67885 CW |
1223 | #undef GEN6_READ_FOOTER |
1224 | #undef GEN6_READ_HEADER | |
5d738795 | 1225 | |
51f67885 | 1226 | #define GEN2_WRITE_HEADER \ |
5d738795 | 1227 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ |
da5827c3 | 1228 | assert_rpm_wakelock_held(dev_priv); \ |
907b28c5 | 1229 | |
51f67885 | 1230 | #define GEN2_WRITE_FOOTER |
0d965301 | 1231 | |
51f67885 | 1232 | #define __gen2_write(x) \ |
0b274481 | 1233 | static void \ |
f0f59a00 | 1234 | gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ |
51f67885 | 1235 | GEN2_WRITE_HEADER; \ |
4032ef43 | 1236 | __raw_i915_write##x(dev_priv, reg, val); \ |
51f67885 | 1237 | GEN2_WRITE_FOOTER; \ |
4032ef43 BW |
1238 | } |
1239 | ||
1240 | #define __gen5_write(x) \ | |
1241 | static void \ | |
f0f59a00 | 1242 | gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ |
51f67885 | 1243 | GEN2_WRITE_HEADER; \ |
4032ef43 BW |
1244 | ilk_dummy_write(dev_priv); \ |
1245 | __raw_i915_write##x(dev_priv, reg, val); \ | |
51f67885 | 1246 | GEN2_WRITE_FOOTER; \ |
4032ef43 BW |
1247 | } |
1248 | ||
51f67885 CW |
1249 | __gen5_write(8) |
1250 | __gen5_write(16) | |
1251 | __gen5_write(32) | |
51f67885 CW |
1252 | __gen2_write(8) |
1253 | __gen2_write(16) | |
1254 | __gen2_write(32) | |
51f67885 CW |
1255 | |
1256 | #undef __gen5_write | |
1257 | #undef __gen2_write | |
1258 | ||
1259 | #undef GEN2_WRITE_FOOTER | |
1260 | #undef GEN2_WRITE_HEADER | |
1261 | ||
1262 | #define GEN6_WRITE_HEADER \ | |
f0f59a00 | 1263 | u32 offset = i915_mmio_reg_offset(reg); \ |
51f67885 CW |
1264 | unsigned long irqflags; \ |
1265 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ | |
da5827c3 | 1266 | assert_rpm_wakelock_held(dev_priv); \ |
9c053501 MK |
1267 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ |
1268 | unclaimed_reg_debug(dev_priv, reg, false, true) | |
51f67885 CW |
1269 | |
1270 | #define GEN6_WRITE_FOOTER \ | |
9c053501 | 1271 | unclaimed_reg_debug(dev_priv, reg, false, false); \ |
51f67885 CW |
1272 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) |
1273 | ||
4032ef43 BW |
1274 | #define __gen6_write(x) \ |
1275 | static void \ | |
f0f59a00 | 1276 | gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ |
51f67885 | 1277 | GEN6_WRITE_HEADER; \ |
a338908c MK |
1278 | if (NEEDS_FORCE_WAKE(offset)) \ |
1279 | __gen6_gt_wait_for_fifo(dev_priv); \ | |
4032ef43 | 1280 | __raw_i915_write##x(dev_priv, reg, val); \ |
51f67885 | 1281 | GEN6_WRITE_FOOTER; \ |
4032ef43 BW |
1282 | } |
1283 | ||
ccfceda2 | 1284 | #define __gen_write(func, x) \ |
ab2aa47e | 1285 | static void \ |
ccfceda2 | 1286 | func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ |
6863b76c | 1287 | enum forcewake_domains fw_engine; \ |
51f67885 | 1288 | GEN6_WRITE_HEADER; \ |
ccfceda2 | 1289 | fw_engine = __##func##_reg_write_fw_domains(offset); \ |
6a42d0f4 | 1290 | if (fw_engine) \ |
b208ba8e | 1291 | __force_wake_auto(dev_priv, fw_engine); \ |
1938e59a | 1292 | __raw_i915_write##x(dev_priv, reg, val); \ |
51f67885 | 1293 | GEN6_WRITE_FOOTER; \ |
1938e59a | 1294 | } |
ccfceda2 DCS |
1295 | #define __gen8_write(x) __gen_write(gen8, x) |
1296 | #define __fwtable_write(x) __gen_write(fwtable, x) | |
a89a70a8 | 1297 | #define __gen11_fwtable_write(x) __gen_write(gen11_fwtable, x) |
1938e59a | 1298 | |
a89a70a8 DCS |
1299 | __gen11_fwtable_write(8) |
1300 | __gen11_fwtable_write(16) | |
1301 | __gen11_fwtable_write(32) | |
22d48c55 TU |
1302 | __fwtable_write(8) |
1303 | __fwtable_write(16) | |
1304 | __fwtable_write(32) | |
ab2aa47e BW |
1305 | __gen8_write(8) |
1306 | __gen8_write(16) | |
1307 | __gen8_write(32) | |
4032ef43 BW |
1308 | __gen6_write(8) |
1309 | __gen6_write(16) | |
1310 | __gen6_write(32) | |
4032ef43 | 1311 | |
a89a70a8 | 1312 | #undef __gen11_fwtable_write |
22d48c55 | 1313 | #undef __fwtable_write |
ab2aa47e | 1314 | #undef __gen8_write |
4032ef43 | 1315 | #undef __gen6_write |
51f67885 CW |
1316 | #undef GEN6_WRITE_FOOTER |
1317 | #undef GEN6_WRITE_HEADER | |
907b28c5 | 1318 | |
0757ac8f | 1319 | #define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \ |
43d942a7 | 1320 | do { \ |
0757ac8f CW |
1321 | (i915)->uncore.funcs.mmio_writeb = x##_write8; \ |
1322 | (i915)->uncore.funcs.mmio_writew = x##_write16; \ | |
1323 | (i915)->uncore.funcs.mmio_writel = x##_write32; \ | |
43d942a7 YZ |
1324 | } while (0) |
1325 | ||
0757ac8f | 1326 | #define ASSIGN_READ_MMIO_VFUNCS(i915, x) \ |
43d942a7 | 1327 | do { \ |
0757ac8f CW |
1328 | (i915)->uncore.funcs.mmio_readb = x##_read8; \ |
1329 | (i915)->uncore.funcs.mmio_readw = x##_read16; \ | |
1330 | (i915)->uncore.funcs.mmio_readl = x##_read32; \ | |
1331 | (i915)->uncore.funcs.mmio_readq = x##_read64; \ | |
43d942a7 YZ |
1332 | } while (0) |
1333 | ||
05a2fb15 MK |
1334 | |
1335 | static void fw_domain_init(struct drm_i915_private *dev_priv, | |
48c1026a | 1336 | enum forcewake_domain_id domain_id, |
f0f59a00 VS |
1337 | i915_reg_t reg_set, |
1338 | i915_reg_t reg_ack) | |
05a2fb15 MK |
1339 | { |
1340 | struct intel_uncore_forcewake_domain *d; | |
1341 | ||
1342 | if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) | |
1343 | return; | |
1344 | ||
1345 | d = &dev_priv->uncore.fw_domain[domain_id]; | |
1346 | ||
1347 | WARN_ON(d->wake_count); | |
1348 | ||
6e3955a5 CW |
1349 | WARN_ON(!i915_mmio_reg_valid(reg_set)); |
1350 | WARN_ON(!i915_mmio_reg_valid(reg_ack)); | |
1351 | ||
05a2fb15 MK |
1352 | d->wake_count = 0; |
1353 | d->reg_set = reg_set; | |
1354 | d->reg_ack = reg_ack; | |
1355 | ||
05a2fb15 MK |
1356 | d->id = domain_id; |
1357 | ||
33c582c1 TU |
1358 | BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER)); |
1359 | BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER)); | |
1360 | BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA)); | |
a89a70a8 DCS |
1361 | BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0)); |
1362 | BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1)); | |
1363 | BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2)); | |
1364 | BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3)); | |
1365 | BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0)); | |
1366 | BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1)); | |
1367 | ||
33c582c1 | 1368 | |
d2dc94bc | 1369 | d->mask = BIT(domain_id); |
33c582c1 | 1370 | |
a57a4a67 TU |
1371 | hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
1372 | d->timer.function = intel_uncore_fw_release_timer; | |
05a2fb15 | 1373 | |
6e3955a5 | 1374 | dev_priv->uncore.fw_domains |= BIT(domain_id); |
f9b3927a | 1375 | |
577ac4bd | 1376 | fw_domain_reset(dev_priv, d); |
05a2fb15 MK |
1377 | } |
1378 | ||
26376a7e OM |
1379 | static void fw_domain_fini(struct drm_i915_private *dev_priv, |
1380 | enum forcewake_domain_id domain_id) | |
1381 | { | |
1382 | struct intel_uncore_forcewake_domain *d; | |
1383 | ||
1384 | if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) | |
1385 | return; | |
1386 | ||
1387 | d = &dev_priv->uncore.fw_domain[domain_id]; | |
1388 | ||
1389 | WARN_ON(d->wake_count); | |
1390 | WARN_ON(hrtimer_cancel(&d->timer)); | |
1391 | memset(d, 0, sizeof(*d)); | |
1392 | ||
1393 | dev_priv->uncore.fw_domains &= ~BIT(domain_id); | |
1394 | } | |
1395 | ||
dc97997a | 1396 | static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) |
0b274481 | 1397 | { |
e3b1895f | 1398 | if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv)) |
3225b2f9 MK |
1399 | return; |
1400 | ||
6e3955a5 CW |
1401 | if (IS_GEN6(dev_priv)) { |
1402 | dev_priv->uncore.fw_reset = 0; | |
1403 | dev_priv->uncore.fw_set = FORCEWAKE_KERNEL; | |
1404 | dev_priv->uncore.fw_clear = 0; | |
1405 | } else { | |
1406 | /* WaRsClearFWBitsAtReset:bdw,skl */ | |
1407 | dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff); | |
1408 | dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL); | |
1409 | dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); | |
1410 | } | |
1411 | ||
a89a70a8 DCS |
1412 | if (INTEL_GEN(dev_priv) >= 11) { |
1413 | int i; | |
1414 | ||
cc38cae7 OM |
1415 | dev_priv->uncore.funcs.force_wake_get = |
1416 | fw_domains_get_with_fallback; | |
a89a70a8 DCS |
1417 | dev_priv->uncore.funcs.force_wake_put = fw_domains_put; |
1418 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, | |
1419 | FORCEWAKE_RENDER_GEN9, | |
1420 | FORCEWAKE_ACK_RENDER_GEN9); | |
1421 | fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, | |
1422 | FORCEWAKE_BLITTER_GEN9, | |
1423 | FORCEWAKE_ACK_BLITTER_GEN9); | |
1424 | for (i = 0; i < I915_MAX_VCS; i++) { | |
1425 | if (!HAS_ENGINE(dev_priv, _VCS(i))) | |
1426 | continue; | |
1427 | ||
1428 | fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VDBOX0 + i, | |
1429 | FORCEWAKE_MEDIA_VDBOX_GEN11(i), | |
1430 | FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i)); | |
1431 | } | |
1432 | for (i = 0; i < I915_MAX_VECS; i++) { | |
1433 | if (!HAS_ENGINE(dev_priv, _VECS(i))) | |
1434 | continue; | |
1435 | ||
1436 | fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VEBOX0 + i, | |
1437 | FORCEWAKE_MEDIA_VEBOX_GEN11(i), | |
1438 | FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)); | |
1439 | } | |
ac128918 | 1440 | } else if (IS_GEN10(dev_priv) || IS_GEN9(dev_priv)) { |
71306303 MK |
1441 | dev_priv->uncore.funcs.force_wake_get = |
1442 | fw_domains_get_with_fallback; | |
05a2fb15 MK |
1443 | dev_priv->uncore.funcs.force_wake_put = fw_domains_put; |
1444 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, | |
1445 | FORCEWAKE_RENDER_GEN9, | |
1446 | FORCEWAKE_ACK_RENDER_GEN9); | |
1447 | fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, | |
1448 | FORCEWAKE_BLITTER_GEN9, | |
1449 | FORCEWAKE_ACK_BLITTER_GEN9); | |
1450 | fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, | |
1451 | FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); | |
dc97997a | 1452 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
05a2fb15 | 1453 | dev_priv->uncore.funcs.force_wake_get = fw_domains_get; |
a338908c | 1454 | dev_priv->uncore.funcs.force_wake_put = fw_domains_put; |
05a2fb15 MK |
1455 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
1456 | FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); | |
1457 | fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, | |
1458 | FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); | |
dc97997a | 1459 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
05a2fb15 MK |
1460 | dev_priv->uncore.funcs.force_wake_get = |
1461 | fw_domains_get_with_thread_status; | |
a338908c | 1462 | dev_priv->uncore.funcs.force_wake_put = fw_domains_put; |
05a2fb15 MK |
1463 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
1464 | FORCEWAKE_MT, FORCEWAKE_ACK_HSW); | |
dc97997a | 1465 | } else if (IS_IVYBRIDGE(dev_priv)) { |
0b274481 BW |
1466 | u32 ecobus; |
1467 | ||
1468 | /* IVB configs may use multi-threaded forcewake */ | |
1469 | ||
1470 | /* A small trick here - if the bios hasn't configured | |
1471 | * MT forcewake, and if the device is in RC6, then | |
1472 | * force_wake_mt_get will not wake the device and the | |
1473 | * ECOBUS read will return zero. Which will be | |
1474 | * (correctly) interpreted by the test below as MT | |
1475 | * forcewake being disabled. | |
1476 | */ | |
05a2fb15 MK |
1477 | dev_priv->uncore.funcs.force_wake_get = |
1478 | fw_domains_get_with_thread_status; | |
a338908c | 1479 | dev_priv->uncore.funcs.force_wake_put = fw_domains_put; |
05a2fb15 | 1480 | |
f9b3927a MK |
1481 | /* We need to init first for ECOBUS access and then |
1482 | * determine later if we want to reinit, in case of MT access is | |
6ea2556f MK |
1483 | * not working. In this stage we don't know which flavour this |
1484 | * ivb is, so it is better to reset also the gen6 fw registers | |
1485 | * before the ecobus check. | |
f9b3927a | 1486 | */ |
6ea2556f MK |
1487 | |
1488 | __raw_i915_write32(dev_priv, FORCEWAKE, 0); | |
1489 | __raw_posting_read(dev_priv, ECOBUS); | |
1490 | ||
05a2fb15 MK |
1491 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
1492 | FORCEWAKE_MT, FORCEWAKE_MT_ACK); | |
f9b3927a | 1493 | |
556ab7a6 | 1494 | spin_lock_irq(&dev_priv->uncore.lock); |
bd527504 | 1495 | fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER); |
0b274481 | 1496 | ecobus = __raw_i915_read32(dev_priv, ECOBUS); |
a338908c | 1497 | fw_domains_put(dev_priv, FORCEWAKE_RENDER); |
556ab7a6 | 1498 | spin_unlock_irq(&dev_priv->uncore.lock); |
0b274481 | 1499 | |
05a2fb15 | 1500 | if (!(ecobus & FORCEWAKE_MT_ENABLE)) { |
0b274481 BW |
1501 | DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); |
1502 | DRM_INFO("when using vblank-synced partial screen updates.\n"); | |
05a2fb15 MK |
1503 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
1504 | FORCEWAKE, FORCEWAKE_ACK); | |
0b274481 | 1505 | } |
dc97997a | 1506 | } else if (IS_GEN6(dev_priv)) { |
0b274481 | 1507 | dev_priv->uncore.funcs.force_wake_get = |
05a2fb15 | 1508 | fw_domains_get_with_thread_status; |
a338908c | 1509 | dev_priv->uncore.funcs.force_wake_put = fw_domains_put; |
05a2fb15 MK |
1510 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
1511 | FORCEWAKE, FORCEWAKE_ACK); | |
0b274481 | 1512 | } |
3225b2f9 MK |
1513 | |
1514 | /* All future platforms are expected to require complex power gating */ | |
1515 | WARN_ON(dev_priv->uncore.fw_domains == 0); | |
f9b3927a MK |
1516 | } |
1517 | ||
15157970 TU |
1518 | #define ASSIGN_FW_DOMAINS_TABLE(d) \ |
1519 | { \ | |
1520 | dev_priv->uncore.fw_domains_table = \ | |
1521 | (struct intel_forcewake_range *)(d); \ | |
1522 | dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \ | |
1523 | } | |
1524 | ||
264ec1a8 HG |
1525 | static int i915_pmic_bus_access_notifier(struct notifier_block *nb, |
1526 | unsigned long action, void *data) | |
1527 | { | |
1528 | struct drm_i915_private *dev_priv = container_of(nb, | |
1529 | struct drm_i915_private, uncore.pmic_bus_access_nb); | |
1530 | ||
1531 | switch (action) { | |
1532 | case MBI_PMIC_BUS_ACCESS_BEGIN: | |
1533 | /* | |
1534 | * forcewake all now to make sure that we don't need to do a | |
1535 | * forcewake later which on systems where this notifier gets | |
1536 | * called requires the punit to access to the shared pmic i2c | |
1537 | * bus, which will be busy after this notification, leading to: | |
1538 | * "render: timed out waiting for forcewake ack request." | |
1539 | * errors. | |
ce30560c HG |
1540 | * |
1541 | * The notifier is unregistered during intel_runtime_suspend(), | |
1542 | * so it's ok to access the HW here without holding a RPM | |
1543 | * wake reference -> disable wakeref asserts for the time of | |
1544 | * the access. | |
264ec1a8 | 1545 | */ |
ce30560c | 1546 | disable_rpm_wakeref_asserts(dev_priv); |
264ec1a8 | 1547 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); |
ce30560c | 1548 | enable_rpm_wakeref_asserts(dev_priv); |
264ec1a8 HG |
1549 | break; |
1550 | case MBI_PMIC_BUS_ACCESS_END: | |
1551 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | |
1552 | break; | |
1553 | } | |
1554 | ||
1555 | return NOTIFY_OK; | |
1556 | } | |
1557 | ||
dc97997a | 1558 | void intel_uncore_init(struct drm_i915_private *dev_priv) |
f9b3927a | 1559 | { |
dc97997a | 1560 | i915_check_vgpu(dev_priv); |
cf9d2890 | 1561 | |
3accaf7e | 1562 | intel_uncore_edram_detect(dev_priv); |
dc97997a | 1563 | intel_uncore_fw_domains_init(dev_priv); |
d60996ab | 1564 | __intel_uncore_early_sanitize(dev_priv, 0); |
0b274481 | 1565 | |
75714940 | 1566 | dev_priv->uncore.unclaimed_mmio_check = 1; |
264ec1a8 HG |
1567 | dev_priv->uncore.pmic_bus_access_nb.notifier_call = |
1568 | i915_pmic_bus_access_notifier; | |
75714940 | 1569 | |
e3b1895f | 1570 | if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) { |
0757ac8f CW |
1571 | ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2); |
1572 | ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2); | |
e3b1895f | 1573 | } else if (IS_GEN5(dev_priv)) { |
0757ac8f CW |
1574 | ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5); |
1575 | ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5); | |
e3b1895f | 1576 | } else if (IS_GEN(dev_priv, 6, 7)) { |
0757ac8f | 1577 | ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6); |
e3b1895f TU |
1578 | |
1579 | if (IS_VALLEYVIEW(dev_priv)) { | |
1580 | ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges); | |
0757ac8f | 1581 | ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); |
e3b1895f | 1582 | } else { |
0757ac8f | 1583 | ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6); |
85ee17eb | 1584 | } |
e3b1895f | 1585 | } else if (IS_GEN8(dev_priv)) { |
dc97997a | 1586 | if (IS_CHERRYVIEW(dev_priv)) { |
15157970 | 1587 | ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges); |
0757ac8f CW |
1588 | ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable); |
1589 | ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); | |
1938e59a D |
1590 | |
1591 | } else { | |
0757ac8f CW |
1592 | ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8); |
1593 | ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6); | |
1938e59a | 1594 | } |
a89a70a8 | 1595 | } else if (IS_GEN(dev_priv, 9, 10)) { |
e3b1895f | 1596 | ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges); |
0757ac8f CW |
1597 | ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable); |
1598 | ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); | |
a89a70a8 DCS |
1599 | } else { |
1600 | ASSIGN_FW_DOMAINS_TABLE(__gen11_fw_ranges); | |
1601 | ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen11_fwtable); | |
1602 | ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen11_fwtable); | |
3967018e | 1603 | } |
ed493883 | 1604 | |
264ec1a8 HG |
1605 | iosf_mbi_register_pmic_bus_access_notifier( |
1606 | &dev_priv->uncore.pmic_bus_access_nb); | |
0b274481 BW |
1607 | } |
1608 | ||
26376a7e OM |
1609 | /* |
1610 | * We might have detected that some engines are fused off after we initialized | |
1611 | * the forcewake domains. Prune them, to make sure they only reference existing | |
1612 | * engines. | |
1613 | */ | |
1614 | void intel_uncore_prune(struct drm_i915_private *dev_priv) | |
1615 | { | |
1616 | if (INTEL_GEN(dev_priv) >= 11) { | |
1617 | enum forcewake_domains fw_domains = dev_priv->uncore.fw_domains; | |
1618 | enum forcewake_domain_id domain_id; | |
1619 | int i; | |
1620 | ||
1621 | for (i = 0; i < I915_MAX_VCS; i++) { | |
1622 | domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i; | |
1623 | ||
1624 | if (HAS_ENGINE(dev_priv, _VCS(i))) | |
1625 | continue; | |
1626 | ||
1627 | if (fw_domains & BIT(domain_id)) | |
1628 | fw_domain_fini(dev_priv, domain_id); | |
1629 | } | |
1630 | ||
1631 | for (i = 0; i < I915_MAX_VECS; i++) { | |
1632 | domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i; | |
1633 | ||
1634 | if (HAS_ENGINE(dev_priv, _VECS(i))) | |
1635 | continue; | |
1636 | ||
1637 | if (fw_domains & BIT(domain_id)) | |
1638 | fw_domain_fini(dev_priv, domain_id); | |
1639 | } | |
1640 | } | |
1641 | } | |
1642 | ||
dc97997a | 1643 | void intel_uncore_fini(struct drm_i915_private *dev_priv) |
0b274481 | 1644 | { |
0b274481 | 1645 | /* Paranoia: make sure we have disabled everything before we exit. */ |
dc97997a | 1646 | intel_uncore_sanitize(dev_priv); |
a5266db4 HG |
1647 | |
1648 | iosf_mbi_punit_acquire(); | |
1649 | iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( | |
1650 | &dev_priv->uncore.pmic_bus_access_nb); | |
d60996ab | 1651 | intel_uncore_forcewake_reset(dev_priv); |
a5266db4 | 1652 | iosf_mbi_punit_release(); |
0b274481 BW |
1653 | } |
1654 | ||
3fd3a6ff JL |
1655 | static const struct reg_whitelist { |
1656 | i915_reg_t offset_ldw; | |
1657 | i915_reg_t offset_udw; | |
1658 | u16 gen_mask; | |
1659 | u8 size; | |
1660 | } reg_read_whitelist[] = { { | |
1661 | .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), | |
1662 | .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), | |
164daaf2 | 1663 | .gen_mask = INTEL_GEN_MASK(4, 11), |
3fd3a6ff JL |
1664 | .size = 8 |
1665 | } }; | |
907b28c5 CW |
1666 | |
1667 | int i915_reg_read_ioctl(struct drm_device *dev, | |
1668 | void *data, struct drm_file *file) | |
1669 | { | |
fac5e23e | 1670 | struct drm_i915_private *dev_priv = to_i915(dev); |
907b28c5 | 1671 | struct drm_i915_reg_read *reg = data; |
3fd3a6ff JL |
1672 | struct reg_whitelist const *entry; |
1673 | unsigned int flags; | |
1674 | int remain; | |
1675 | int ret = 0; | |
1676 | ||
1677 | entry = reg_read_whitelist; | |
1678 | remain = ARRAY_SIZE(reg_read_whitelist); | |
1679 | while (remain) { | |
1680 | u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw); | |
1681 | ||
1682 | GEM_BUG_ON(!is_power_of_2(entry->size)); | |
1683 | GEM_BUG_ON(entry->size > 8); | |
1684 | GEM_BUG_ON(entry_offset & (entry->size - 1)); | |
1685 | ||
1686 | if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask && | |
1687 | entry_offset == (reg->offset & -entry->size)) | |
907b28c5 | 1688 | break; |
3fd3a6ff JL |
1689 | entry++; |
1690 | remain--; | |
907b28c5 CW |
1691 | } |
1692 | ||
3fd3a6ff | 1693 | if (!remain) |
907b28c5 CW |
1694 | return -EINVAL; |
1695 | ||
3fd3a6ff | 1696 | flags = reg->offset & (entry->size - 1); |
648a9bc5 | 1697 | |
cf67c70f | 1698 | intel_runtime_pm_get(dev_priv); |
3fd3a6ff JL |
1699 | if (entry->size == 8 && flags == I915_REG_READ_8B_WA) |
1700 | reg->val = I915_READ64_2x32(entry->offset_ldw, | |
1701 | entry->offset_udw); | |
1702 | else if (entry->size == 8 && flags == 0) | |
1703 | reg->val = I915_READ64(entry->offset_ldw); | |
1704 | else if (entry->size == 4 && flags == 0) | |
1705 | reg->val = I915_READ(entry->offset_ldw); | |
1706 | else if (entry->size == 2 && flags == 0) | |
1707 | reg->val = I915_READ16(entry->offset_ldw); | |
1708 | else if (entry->size == 1 && flags == 0) | |
1709 | reg->val = I915_READ8(entry->offset_ldw); | |
1710 | else | |
cf67c70f | 1711 | ret = -EINVAL; |
cf67c70f | 1712 | intel_runtime_pm_put(dev_priv); |
3fd3a6ff | 1713 | |
cf67c70f | 1714 | return ret; |
907b28c5 CW |
1715 | } |
1716 | ||
87de8d56 MK |
1717 | static void gen3_stop_engine(struct intel_engine_cs *engine) |
1718 | { | |
1719 | struct drm_i915_private *dev_priv = engine->i915; | |
1720 | const u32 base = engine->mmio_base; | |
3f6e9822 CW |
1721 | |
1722 | if (intel_engine_stop_cs(engine)) | |
1723 | DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name); | |
87de8d56 | 1724 | |
11caf551 | 1725 | I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base))); |
bc8f2f5d | 1726 | POSTING_READ_FW(RING_HEAD(base)); /* paranoia */ |
11caf551 | 1727 | |
87de8d56 MK |
1728 | I915_WRITE_FW(RING_HEAD(base), 0); |
1729 | I915_WRITE_FW(RING_TAIL(base), 0); | |
bc8f2f5d | 1730 | POSTING_READ_FW(RING_TAIL(base)); |
87de8d56 | 1731 | |
11caf551 CW |
1732 | /* The ring must be empty before it is disabled */ |
1733 | I915_WRITE_FW(RING_CTL(base), 0); | |
1734 | ||
87de8d56 MK |
1735 | /* Check acts as a post */ |
1736 | if (I915_READ_FW(RING_HEAD(base)) != 0) | |
1737 | DRM_DEBUG_DRIVER("%s: ring head not parked\n", | |
1738 | engine->name); | |
1739 | } | |
1740 | ||
1741 | static void i915_stop_engines(struct drm_i915_private *dev_priv, | |
e02e6500 | 1742 | unsigned int engine_mask) |
2c80353f MK |
1743 | { |
1744 | struct intel_engine_cs *engine; | |
1745 | enum intel_engine_id id; | |
1746 | ||
5896a5c8 CW |
1747 | if (INTEL_GEN(dev_priv) < 3) |
1748 | return; | |
1749 | ||
87de8d56 MK |
1750 | for_each_engine_masked(engine, dev_priv, engine_mask, id) |
1751 | gen3_stop_engine(engine); | |
2c80353f MK |
1752 | } |
1753 | ||
a1ab7dcf | 1754 | static bool i915_in_reset(struct pci_dev *pdev) |
907b28c5 CW |
1755 | { |
1756 | u8 gdrst; | |
9593a657 | 1757 | |
dc97997a | 1758 | pci_read_config_byte(pdev, I915_GDRST, &gdrst); |
a1ab7dcf | 1759 | return gdrst & GRDOM_RESET_STATUS; |
907b28c5 CW |
1760 | } |
1761 | ||
e02e6500 MK |
1762 | static int i915_do_reset(struct drm_i915_private *dev_priv, |
1763 | unsigned int engine_mask, | |
1764 | unsigned int retry) | |
907b28c5 | 1765 | { |
91c8a326 | 1766 | struct pci_dev *pdev = dev_priv->drm.pdev; |
a1ab7dcf | 1767 | int err; |
dc97997a | 1768 | |
a1ab7dcf | 1769 | /* Assert reset for at least 20 usec, and wait for acknowledgement. */ |
dc97997a | 1770 | pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); |
9593a657 | 1771 | usleep_range(50, 200); |
a1ab7dcf CW |
1772 | err = wait_for(i915_in_reset(pdev), 500); |
1773 | ||
1774 | /* Clear the reset request. */ | |
dc97997a | 1775 | pci_write_config_byte(pdev, I915_GDRST, 0); |
a1ab7dcf CW |
1776 | usleep_range(50, 200); |
1777 | if (!err) | |
1778 | err = wait_for(!i915_in_reset(pdev), 500); | |
907b28c5 | 1779 | |
a1ab7dcf | 1780 | return err; |
73bbf6bd VS |
1781 | } |
1782 | ||
9593a657 | 1783 | static bool g4x_reset_complete(struct pci_dev *pdev) |
73bbf6bd VS |
1784 | { |
1785 | u8 gdrst; | |
9593a657 | 1786 | |
dc97997a | 1787 | pci_read_config_byte(pdev, I915_GDRST, &gdrst); |
73bbf6bd | 1788 | return (gdrst & GRDOM_RESET_ENABLE) == 0; |
907b28c5 CW |
1789 | } |
1790 | ||
e02e6500 MK |
1791 | static int g33_do_reset(struct drm_i915_private *dev_priv, |
1792 | unsigned int engine_mask, | |
1793 | unsigned int retry) | |
408d4b9e | 1794 | { |
91c8a326 | 1795 | struct pci_dev *pdev = dev_priv->drm.pdev; |
9593a657 | 1796 | |
dc97997a CW |
1797 | pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); |
1798 | return wait_for(g4x_reset_complete(pdev), 500); | |
408d4b9e VS |
1799 | } |
1800 | ||
e02e6500 MK |
1801 | static int g4x_do_reset(struct drm_i915_private *dev_priv, |
1802 | unsigned int engine_mask, | |
1803 | unsigned int retry) | |
fa4f53c4 | 1804 | { |
91c8a326 | 1805 | struct pci_dev *pdev = dev_priv->drm.pdev; |
fa4f53c4 VS |
1806 | int ret; |
1807 | ||
fa4f53c4 | 1808 | /* WaVcpClkGateDisableForMediaReset:ctg,elk */ |
44e1e7ba CW |
1809 | I915_WRITE(VDECCLK_GATE_D, |
1810 | I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); | |
fa4f53c4 VS |
1811 | POSTING_READ(VDECCLK_GATE_D); |
1812 | ||
dc97997a | 1813 | pci_write_config_byte(pdev, I915_GDRST, |
fa4f53c4 | 1814 | GRDOM_MEDIA | GRDOM_RESET_ENABLE); |
dc97997a | 1815 | ret = wait_for(g4x_reset_complete(pdev), 500); |
9593a657 CW |
1816 | if (ret) { |
1817 | DRM_DEBUG_DRIVER("Wait for media reset failed\n"); | |
44e1e7ba | 1818 | goto out; |
9593a657 | 1819 | } |
fa4f53c4 | 1820 | |
44e1e7ba CW |
1821 | pci_write_config_byte(pdev, I915_GDRST, |
1822 | GRDOM_RENDER | GRDOM_RESET_ENABLE); | |
1823 | ret = wait_for(g4x_reset_complete(pdev), 500); | |
1824 | if (ret) { | |
1825 | DRM_DEBUG_DRIVER("Wait for render reset failed\n"); | |
1826 | goto out; | |
1827 | } | |
fa4f53c4 | 1828 | |
9593a657 | 1829 | out: |
dc97997a | 1830 | pci_write_config_byte(pdev, I915_GDRST, 0); |
44e1e7ba CW |
1831 | |
1832 | I915_WRITE(VDECCLK_GATE_D, | |
1833 | I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); | |
1834 | POSTING_READ(VDECCLK_GATE_D); | |
1835 | ||
9593a657 | 1836 | return ret; |
fa4f53c4 VS |
1837 | } |
1838 | ||
dc97997a | 1839 | static int ironlake_do_reset(struct drm_i915_private *dev_priv, |
e02e6500 MK |
1840 | unsigned int engine_mask, |
1841 | unsigned int retry) | |
907b28c5 | 1842 | { |
907b28c5 CW |
1843 | int ret; |
1844 | ||
9593a657 | 1845 | I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); |
87273b71 CW |
1846 | ret = intel_wait_for_register(dev_priv, |
1847 | ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, | |
1848 | 500); | |
9593a657 CW |
1849 | if (ret) { |
1850 | DRM_DEBUG_DRIVER("Wait for render reset failed\n"); | |
1851 | goto out; | |
1852 | } | |
907b28c5 | 1853 | |
9593a657 | 1854 | I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); |
87273b71 CW |
1855 | ret = intel_wait_for_register(dev_priv, |
1856 | ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, | |
1857 | 500); | |
9593a657 CW |
1858 | if (ret) { |
1859 | DRM_DEBUG_DRIVER("Wait for media reset failed\n"); | |
1860 | goto out; | |
1861 | } | |
9aa7250f | 1862 | |
9593a657 | 1863 | out: |
c039b7f2 | 1864 | I915_WRITE(ILK_GDSR, 0); |
9593a657 CW |
1865 | POSTING_READ(ILK_GDSR); |
1866 | return ret; | |
907b28c5 CW |
1867 | } |
1868 | ||
ee4b6faf MK |
1869 | /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */ |
1870 | static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, | |
1871 | u32 hw_domain_mask) | |
907b28c5 | 1872 | { |
9593a657 CW |
1873 | int err; |
1874 | ||
907b28c5 CW |
1875 | /* GEN6_GDRST is not in the gt power well, no need to check |
1876 | * for fifo space for the write or forcewake the chip for | |
1877 | * the read | |
1878 | */ | |
ee4b6faf | 1879 | __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask); |
907b28c5 | 1880 | |
a3662830 | 1881 | /* Wait for the device to ack the reset requests */ |
5807e1c2 CW |
1882 | err = __intel_wait_for_register_fw(dev_priv, |
1883 | GEN6_GDRST, hw_domain_mask, 0, | |
1884 | 500, 0, | |
1885 | NULL); | |
9593a657 CW |
1886 | if (err) |
1887 | DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n", | |
1888 | hw_domain_mask); | |
1889 | ||
1890 | return err; | |
ee4b6faf MK |
1891 | } |
1892 | ||
1893 | /** | |
1894 | * gen6_reset_engines - reset individual engines | |
dc97997a | 1895 | * @dev_priv: i915 device |
ee4b6faf | 1896 | * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset |
e02e6500 | 1897 | * @retry: the count of of previous attempts to reset. |
ee4b6faf MK |
1898 | * |
1899 | * This function will reset the individual engines that are set in engine_mask. | |
1900 | * If you provide ALL_ENGINES as mask, full global domain reset will be issued. | |
1901 | * | |
1902 | * Note: It is responsibility of the caller to handle the difference between | |
1903 | * asking full domain reset versus reset for all available individual engines. | |
1904 | * | |
1905 | * Returns 0 on success, nonzero on error. | |
1906 | */ | |
dc97997a | 1907 | static int gen6_reset_engines(struct drm_i915_private *dev_priv, |
e02e6500 MK |
1908 | unsigned int engine_mask, |
1909 | unsigned int retry) | |
ee4b6faf | 1910 | { |
ee4b6faf MK |
1911 | struct intel_engine_cs *engine; |
1912 | const u32 hw_engine_mask[I915_NUM_ENGINES] = { | |
1913 | [RCS] = GEN6_GRDOM_RENDER, | |
1914 | [BCS] = GEN6_GRDOM_BLT, | |
1915 | [VCS] = GEN6_GRDOM_MEDIA, | |
1916 | [VCS2] = GEN8_GRDOM_MEDIA2, | |
1917 | [VECS] = GEN6_GRDOM_VECS, | |
1918 | }; | |
1919 | u32 hw_mask; | |
ee4b6faf MK |
1920 | |
1921 | if (engine_mask == ALL_ENGINES) { | |
1922 | hw_mask = GEN6_GRDOM_FULL; | |
1923 | } else { | |
bafb0fce CW |
1924 | unsigned int tmp; |
1925 | ||
ee4b6faf | 1926 | hw_mask = 0; |
bafb0fce | 1927 | for_each_engine_masked(engine, dev_priv, engine_mask, tmp) |
ee4b6faf MK |
1928 | hw_mask |= hw_engine_mask[engine->id]; |
1929 | } | |
1930 | ||
4055dc75 | 1931 | return gen6_hw_domain_reset(dev_priv, hw_mask); |
907b28c5 CW |
1932 | } |
1933 | ||
e34b0345 MT |
1934 | /** |
1935 | * gen11_reset_engines - reset individual engines | |
1936 | * @dev_priv: i915 device | |
1937 | * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset | |
1938 | * | |
1939 | * This function will reset the individual engines that are set in engine_mask. | |
1940 | * If you provide ALL_ENGINES as mask, full global domain reset will be issued. | |
1941 | * | |
1942 | * Note: It is responsibility of the caller to handle the difference between | |
1943 | * asking full domain reset versus reset for all available individual engines. | |
1944 | * | |
1945 | * Returns 0 on success, nonzero on error. | |
1946 | */ | |
1947 | static int gen11_reset_engines(struct drm_i915_private *dev_priv, | |
e02e6500 | 1948 | unsigned int engine_mask) |
e34b0345 MT |
1949 | { |
1950 | struct intel_engine_cs *engine; | |
1951 | const u32 hw_engine_mask[I915_NUM_ENGINES] = { | |
1952 | [RCS] = GEN11_GRDOM_RENDER, | |
1953 | [BCS] = GEN11_GRDOM_BLT, | |
1954 | [VCS] = GEN11_GRDOM_MEDIA, | |
1955 | [VCS2] = GEN11_GRDOM_MEDIA2, | |
1956 | [VCS3] = GEN11_GRDOM_MEDIA3, | |
1957 | [VCS4] = GEN11_GRDOM_MEDIA4, | |
1958 | [VECS] = GEN11_GRDOM_VECS, | |
1959 | [VECS2] = GEN11_GRDOM_VECS2, | |
1960 | }; | |
1961 | u32 hw_mask; | |
1962 | ||
1963 | BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES); | |
1964 | ||
1965 | if (engine_mask == ALL_ENGINES) { | |
1966 | hw_mask = GEN11_GRDOM_FULL; | |
1967 | } else { | |
1968 | unsigned int tmp; | |
1969 | ||
1970 | hw_mask = 0; | |
1971 | for_each_engine_masked(engine, dev_priv, engine_mask, tmp) | |
1972 | hw_mask |= hw_engine_mask[engine->id]; | |
1973 | } | |
1974 | ||
1975 | return gen6_hw_domain_reset(dev_priv, hw_mask); | |
1976 | } | |
1977 | ||
1758b90e | 1978 | /** |
1d1a9774 | 1979 | * __intel_wait_for_register_fw - wait until register matches expected state |
1758b90e CW |
1980 | * @dev_priv: the i915 device |
1981 | * @reg: the register to read | |
1982 | * @mask: mask to apply to register value | |
1983 | * @value: expected value | |
1d1a9774 MW |
1984 | * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait |
1985 | * @slow_timeout_ms: slow timeout in millisecond | |
1986 | * @out_value: optional placeholder to hold registry value | |
1758b90e CW |
1987 | * |
1988 | * This routine waits until the target register @reg contains the expected | |
3d466cd6 DV |
1989 | * @value after applying the @mask, i.e. it waits until :: |
1990 | * | |
1991 | * (I915_READ_FW(reg) & mask) == value | |
1992 | * | |
1d1a9774 | 1993 | * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds. |
6976e74b | 1994 | * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us |
84d84cb7 | 1995 | * must be not larger than 20,0000 microseconds. |
1758b90e CW |
1996 | * |
1997 | * Note that this routine assumes the caller holds forcewake asserted, it is | |
1998 | * not suitable for very long waits. See intel_wait_for_register() if you | |
1999 | * wish to wait without holding forcewake for the duration (i.e. you expect | |
2000 | * the wait to be slow). | |
2001 | * | |
2002 | * Returns 0 if the register matches the desired condition, or -ETIMEOUT. | |
2003 | */ | |
1d1a9774 MW |
2004 | int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, |
2005 | i915_reg_t reg, | |
3fc7d86b MW |
2006 | u32 mask, |
2007 | u32 value, | |
2008 | unsigned int fast_timeout_us, | |
2009 | unsigned int slow_timeout_ms, | |
1d1a9774 | 2010 | u32 *out_value) |
1758b90e | 2011 | { |
ff26ffa8 | 2012 | u32 uninitialized_var(reg_value); |
1d1a9774 MW |
2013 | #define done (((reg_value = I915_READ_FW(reg)) & mask) == value) |
2014 | int ret; | |
2015 | ||
6976e74b | 2016 | /* Catch any overuse of this function */ |
84d84cb7 CW |
2017 | might_sleep_if(slow_timeout_ms); |
2018 | GEM_BUG_ON(fast_timeout_us > 20000); | |
6976e74b | 2019 | |
84d84cb7 CW |
2020 | ret = -ETIMEDOUT; |
2021 | if (fast_timeout_us && fast_timeout_us <= 20000) | |
1d1a9774 | 2022 | ret = _wait_for_atomic(done, fast_timeout_us, 0); |
ff26ffa8 | 2023 | if (ret && slow_timeout_ms) |
1d1a9774 | 2024 | ret = wait_for(done, slow_timeout_ms); |
84d84cb7 | 2025 | |
1d1a9774 MW |
2026 | if (out_value) |
2027 | *out_value = reg_value; | |
84d84cb7 | 2028 | |
1758b90e CW |
2029 | return ret; |
2030 | #undef done | |
2031 | } | |
2032 | ||
2033 | /** | |
23fdbdd7 | 2034 | * __intel_wait_for_register - wait until register matches expected state |
1758b90e CW |
2035 | * @dev_priv: the i915 device |
2036 | * @reg: the register to read | |
2037 | * @mask: mask to apply to register value | |
2038 | * @value: expected value | |
23fdbdd7 SP |
2039 | * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait |
2040 | * @slow_timeout_ms: slow timeout in millisecond | |
2041 | * @out_value: optional placeholder to hold registry value | |
1758b90e CW |
2042 | * |
2043 | * This routine waits until the target register @reg contains the expected | |
3d466cd6 DV |
2044 | * @value after applying the @mask, i.e. it waits until :: |
2045 | * | |
2046 | * (I915_READ(reg) & mask) == value | |
2047 | * | |
1758b90e CW |
2048 | * Otherwise, the wait will timeout after @timeout_ms milliseconds. |
2049 | * | |
2050 | * Returns 0 if the register matches the desired condition, or -ETIMEOUT. | |
2051 | */ | |
23fdbdd7 | 2052 | int __intel_wait_for_register(struct drm_i915_private *dev_priv, |
1758b90e | 2053 | i915_reg_t reg, |
3fc7d86b MW |
2054 | u32 mask, |
2055 | u32 value, | |
23fdbdd7 SP |
2056 | unsigned int fast_timeout_us, |
2057 | unsigned int slow_timeout_ms, | |
2058 | u32 *out_value) | |
7fd2d269 | 2059 | { |
1758b90e CW |
2060 | unsigned fw = |
2061 | intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); | |
23fdbdd7 | 2062 | u32 reg_value; |
1758b90e CW |
2063 | int ret; |
2064 | ||
3df82dd4 | 2065 | might_sleep_if(slow_timeout_ms); |
05646543 CW |
2066 | |
2067 | spin_lock_irq(&dev_priv->uncore.lock); | |
2068 | intel_uncore_forcewake_get__locked(dev_priv, fw); | |
2069 | ||
2070 | ret = __intel_wait_for_register_fw(dev_priv, | |
2071 | reg, mask, value, | |
23fdbdd7 | 2072 | fast_timeout_us, 0, ®_value); |
05646543 CW |
2073 | |
2074 | intel_uncore_forcewake_put__locked(dev_priv, fw); | |
2075 | spin_unlock_irq(&dev_priv->uncore.lock); | |
2076 | ||
3df82dd4 | 2077 | if (ret && slow_timeout_ms) |
23fdbdd7 SP |
2078 | ret = __wait_for(reg_value = I915_READ_NOTRACE(reg), |
2079 | (reg_value & mask) == value, | |
2080 | slow_timeout_ms * 1000, 10, 1000); | |
2081 | ||
2082 | if (out_value) | |
2083 | *out_value = reg_value; | |
1758b90e CW |
2084 | |
2085 | return ret; | |
d431440c TE |
2086 | } |
2087 | ||
f4e60c5c | 2088 | static int gen8_engine_reset_prepare(struct intel_engine_cs *engine) |
d431440c | 2089 | { |
c033666a | 2090 | struct drm_i915_private *dev_priv = engine->i915; |
d431440c | 2091 | int ret; |
d431440c TE |
2092 | |
2093 | I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), | |
2094 | _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); | |
2095 | ||
5807e1c2 CW |
2096 | ret = __intel_wait_for_register_fw(dev_priv, |
2097 | RING_RESET_CTL(engine->mmio_base), | |
2098 | RESET_CTL_READY_TO_RESET, | |
2099 | RESET_CTL_READY_TO_RESET, | |
2100 | 700, 0, | |
2101 | NULL); | |
d431440c TE |
2102 | if (ret) |
2103 | DRM_ERROR("%s: reset request timeout\n", engine->name); | |
2104 | ||
2105 | return ret; | |
2106 | } | |
2107 | ||
f4e60c5c | 2108 | static void gen8_engine_reset_cancel(struct intel_engine_cs *engine) |
d431440c | 2109 | { |
c033666a | 2110 | struct drm_i915_private *dev_priv = engine->i915; |
d431440c TE |
2111 | |
2112 | I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), | |
2113 | _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); | |
7fd2d269 MK |
2114 | } |
2115 | ||
f4e60c5c MK |
2116 | static int reset_engines(struct drm_i915_private *i915, |
2117 | unsigned int engine_mask, | |
2118 | unsigned int retry) | |
2119 | { | |
2120 | if (INTEL_GEN(i915) >= 11) | |
2121 | return gen11_reset_engines(i915, engine_mask); | |
2122 | else | |
2123 | return gen6_reset_engines(i915, engine_mask, retry); | |
2124 | } | |
2125 | ||
dc97997a | 2126 | static int gen8_reset_engines(struct drm_i915_private *dev_priv, |
e02e6500 MK |
2127 | unsigned int engine_mask, |
2128 | unsigned int retry) | |
7fd2d269 | 2129 | { |
7fd2d269 | 2130 | struct intel_engine_cs *engine; |
f4e60c5c | 2131 | const bool reset_non_ready = retry >= 1; |
bafb0fce | 2132 | unsigned int tmp; |
c30acb04 | 2133 | int ret; |
7fd2d269 | 2134 | |
c30acb04 | 2135 | for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { |
f4e60c5c MK |
2136 | ret = gen8_engine_reset_prepare(engine); |
2137 | if (ret && !reset_non_ready) | |
2138 | goto skip_reset; | |
2139 | ||
2140 | /* | |
2141 | * If this is not the first failed attempt to prepare, | |
2142 | * we decide to proceed anyway. | |
2143 | * | |
2144 | * By doing so we risk context corruption and with | |
2145 | * some gens (kbl), possible system hang if reset | |
2146 | * happens during active bb execution. | |
2147 | * | |
2148 | * We rather take context corruption instead of | |
2149 | * failed reset with a wedged driver/gpu. And | |
2150 | * active bb execution case should be covered by | |
2151 | * i915_stop_engines we have before the reset. | |
2152 | */ | |
c30acb04 | 2153 | } |
7fd2d269 | 2154 | |
f4e60c5c | 2155 | ret = reset_engines(dev_priv, engine_mask, retry); |
7fd2d269 | 2156 | |
f4e60c5c | 2157 | skip_reset: |
bafb0fce | 2158 | for_each_engine_masked(engine, dev_priv, engine_mask, tmp) |
f4e60c5c | 2159 | gen8_engine_reset_cancel(engine); |
7fd2d269 | 2160 | |
c30acb04 | 2161 | return ret; |
7fd2d269 MK |
2162 | } |
2163 | ||
e02e6500 MK |
2164 | typedef int (*reset_func)(struct drm_i915_private *, |
2165 | unsigned int engine_mask, unsigned int retry); | |
dc97997a CW |
2166 | |
2167 | static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) | |
907b28c5 | 2168 | { |
4f044a88 | 2169 | if (!i915_modparams.reset) |
b1330fbb CW |
2170 | return NULL; |
2171 | ||
c56b89f1 | 2172 | if (INTEL_GEN(dev_priv) >= 8) |
ee4b6faf | 2173 | return gen8_reset_engines; |
c56b89f1 | 2174 | else if (INTEL_GEN(dev_priv) >= 6) |
ee4b6faf | 2175 | return gen6_reset_engines; |
dc97997a | 2176 | else if (IS_GEN5(dev_priv)) |
49e4d842 | 2177 | return ironlake_do_reset; |
dc97997a | 2178 | else if (IS_G4X(dev_priv)) |
49e4d842 | 2179 | return g4x_do_reset; |
73f67aa8 | 2180 | else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) |
49e4d842 | 2181 | return g33_do_reset; |
c56b89f1 | 2182 | else if (INTEL_GEN(dev_priv) >= 3) |
49e4d842 | 2183 | return i915_do_reset; |
542c184f | 2184 | else |
49e4d842 CW |
2185 | return NULL; |
2186 | } | |
2187 | ||
f4e60c5c MK |
2188 | int intel_gpu_reset(struct drm_i915_private *dev_priv, |
2189 | const unsigned int engine_mask) | |
49e4d842 | 2190 | { |
5896a5c8 | 2191 | reset_func reset = intel_get_gpu_reset(dev_priv); |
e02e6500 | 2192 | unsigned int retry; |
99106bc1 | 2193 | int ret; |
49e4d842 | 2194 | |
f4e60c5c MK |
2195 | GEM_BUG_ON(!engine_mask); |
2196 | ||
5807e1c2 CW |
2197 | /* |
2198 | * We want to perform per-engine reset from atomic context (e.g. | |
2199 | * softirq), which imposes the constraint that we cannot sleep. | |
2200 | * However, experience suggests that spending a bit of time waiting | |
2201 | * for a reset helps in various cases, so for a full-device reset | |
2202 | * we apply the opposite rule and wait if we want to. As we should | |
2203 | * always follow up a failed per-engine reset with a full device reset, | |
2204 | * being a little faster, stricter and more error prone for the | |
2205 | * atomic case seems an acceptable compromise. | |
2206 | * | |
2207 | * Unfortunately this leads to a bimodal routine, when the goal was | |
2208 | * to have a single reset function that worked for resetting any | |
2209 | * number of engines simultaneously. | |
2210 | */ | |
2211 | might_sleep_if(engine_mask == ALL_ENGINES); | |
9593a657 | 2212 | |
5807e1c2 CW |
2213 | /* |
2214 | * If the power well sleeps during the reset, the reset | |
99106bc1 MK |
2215 | * request may be dropped and never completes (causing -EIO). |
2216 | */ | |
2217 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | |
9593a657 | 2218 | for (retry = 0; retry < 3; retry++) { |
87de8d56 | 2219 | |
5807e1c2 CW |
2220 | /* |
2221 | * We stop engines, otherwise we might get failed reset and a | |
87de8d56 MK |
2222 | * dead gpu (on elk). Also as modern gpu as kbl can suffer |
2223 | * from system hang if batchbuffer is progressing when | |
2224 | * the reset is issued, regardless of READY_TO_RESET ack. | |
2225 | * Thus assume it is best to stop engines on all gens | |
2226 | * where we have a gpu reset. | |
2227 | * | |
39e78234 MK |
2228 | * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) |
2229 | * | |
87de8d56 MK |
2230 | * WaMediaResetMainRingCleanup:ctg,elk (presumably) |
2231 | * | |
2232 | * FIXME: Wa for more modern gens needs to be validated | |
2233 | */ | |
2234 | i915_stop_engines(dev_priv, engine_mask); | |
2235 | ||
5896a5c8 | 2236 | ret = -ENODEV; |
0f36a85c | 2237 | if (reset) { |
e02e6500 MK |
2238 | ret = reset(dev_priv, engine_mask, retry); |
2239 | GEM_TRACE("engine_mask=%x, ret=%d, retry=%d\n", | |
2240 | engine_mask, ret, retry); | |
0f36a85c | 2241 | } |
5807e1c2 | 2242 | if (ret != -ETIMEDOUT || engine_mask != ALL_ENGINES) |
9593a657 CW |
2243 | break; |
2244 | ||
2245 | cond_resched(); | |
2246 | } | |
99106bc1 MK |
2247 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
2248 | ||
2249 | return ret; | |
49e4d842 CW |
2250 | } |
2251 | ||
dc97997a | 2252 | bool intel_has_gpu_reset(struct drm_i915_private *dev_priv) |
49e4d842 | 2253 | { |
dc97997a | 2254 | return intel_get_gpu_reset(dev_priv) != NULL; |
907b28c5 CW |
2255 | } |
2256 | ||
142bc7d9 MT |
2257 | bool intel_has_reset_engine(struct drm_i915_private *dev_priv) |
2258 | { | |
2259 | return (dev_priv->info.has_reset_engine && | |
4f044a88 | 2260 | i915_modparams.reset >= 2); |
142bc7d9 MT |
2261 | } |
2262 | ||
cb20a3c0 | 2263 | int intel_reset_guc(struct drm_i915_private *dev_priv) |
6b332fa2 | 2264 | { |
e34b0345 MT |
2265 | u32 guc_domain = INTEL_GEN(dev_priv) >= 11 ? GEN11_GRDOM_GUC : |
2266 | GEN9_GRDOM_GUC; | |
6b332fa2 | 2267 | int ret; |
6b332fa2 | 2268 | |
6f25d0be | 2269 | GEM_BUG_ON(!HAS_GUC(dev_priv)); |
6b332fa2 AS |
2270 | |
2271 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | |
e34b0345 | 2272 | ret = gen6_hw_domain_reset(dev_priv, guc_domain); |
6b332fa2 AS |
2273 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
2274 | ||
2275 | return ret; | |
2276 | } | |
2277 | ||
fc97618b | 2278 | bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv) |
907b28c5 | 2279 | { |
fc97618b | 2280 | return check_for_unclaimed_mmio(dev_priv); |
907b28c5 | 2281 | } |
75714940 | 2282 | |
bc3b9346 | 2283 | bool |
75714940 MK |
2284 | intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) |
2285 | { | |
a167b1e1 CW |
2286 | bool ret = false; |
2287 | ||
2288 | spin_lock_irq(&dev_priv->uncore.lock); | |
2289 | ||
7ef4ac6e | 2290 | if (unlikely(dev_priv->uncore.unclaimed_mmio_check <= 0)) |
a167b1e1 | 2291 | goto out; |
75714940 MK |
2292 | |
2293 | if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) { | |
7ef4ac6e CW |
2294 | if (!i915_modparams.mmio_debug) { |
2295 | DRM_DEBUG("Unclaimed register detected, " | |
2296 | "enabling oneshot unclaimed register reporting. " | |
2297 | "Please use i915.mmio_debug=N for more information.\n"); | |
2298 | i915_modparams.mmio_debug++; | |
2299 | } | |
75714940 | 2300 | dev_priv->uncore.unclaimed_mmio_check--; |
a167b1e1 | 2301 | ret = true; |
75714940 | 2302 | } |
bc3b9346 | 2303 | |
a167b1e1 CW |
2304 | out: |
2305 | spin_unlock_irq(&dev_priv->uncore.lock); | |
2306 | ||
2307 | return ret; | |
75714940 | 2308 | } |
3756685a TU |
2309 | |
2310 | static enum forcewake_domains | |
2311 | intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv, | |
2312 | i915_reg_t reg) | |
2313 | { | |
895833bd | 2314 | u32 offset = i915_mmio_reg_offset(reg); |
3756685a TU |
2315 | enum forcewake_domains fw_domains; |
2316 | ||
a89a70a8 DCS |
2317 | if (INTEL_GEN(dev_priv) >= 11) { |
2318 | fw_domains = __gen11_fwtable_reg_read_fw_domains(offset); | |
2319 | } else if (HAS_FWTABLE(dev_priv)) { | |
895833bd TU |
2320 | fw_domains = __fwtable_reg_read_fw_domains(offset); |
2321 | } else if (INTEL_GEN(dev_priv) >= 6) { | |
2322 | fw_domains = __gen6_reg_read_fw_domains(offset); | |
2323 | } else { | |
2324 | WARN_ON(!IS_GEN(dev_priv, 2, 5)); | |
2325 | fw_domains = 0; | |
3756685a TU |
2326 | } |
2327 | ||
2328 | WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); | |
2329 | ||
2330 | return fw_domains; | |
2331 | } | |
2332 | ||
2333 | static enum forcewake_domains | |
2334 | intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, | |
2335 | i915_reg_t reg) | |
2336 | { | |
22d48c55 | 2337 | u32 offset = i915_mmio_reg_offset(reg); |
3756685a TU |
2338 | enum forcewake_domains fw_domains; |
2339 | ||
a89a70a8 DCS |
2340 | if (INTEL_GEN(dev_priv) >= 11) { |
2341 | fw_domains = __gen11_fwtable_reg_write_fw_domains(offset); | |
2342 | } else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) { | |
22d48c55 TU |
2343 | fw_domains = __fwtable_reg_write_fw_domains(offset); |
2344 | } else if (IS_GEN8(dev_priv)) { | |
2345 | fw_domains = __gen8_reg_write_fw_domains(offset); | |
2346 | } else if (IS_GEN(dev_priv, 6, 7)) { | |
3756685a | 2347 | fw_domains = FORCEWAKE_RENDER; |
22d48c55 TU |
2348 | } else { |
2349 | WARN_ON(!IS_GEN(dev_priv, 2, 5)); | |
2350 | fw_domains = 0; | |
3756685a TU |
2351 | } |
2352 | ||
2353 | WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); | |
2354 | ||
2355 | return fw_domains; | |
2356 | } | |
2357 | ||
2358 | /** | |
2359 | * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access | |
2360 | * a register | |
2361 | * @dev_priv: pointer to struct drm_i915_private | |
2362 | * @reg: register in question | |
2363 | * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE | |
2364 | * | |
2365 | * Returns a set of forcewake domains required to be taken with for example | |
2366 | * intel_uncore_forcewake_get for the specified register to be accessible in the | |
2367 | * specified mode (read, write or read/write) with raw mmio accessors. | |
2368 | * | |
2369 | * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the | |
2370 | * callers to do FIFO management on their own or risk losing writes. | |
2371 | */ | |
2372 | enum forcewake_domains | |
2373 | intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, | |
2374 | i915_reg_t reg, unsigned int op) | |
2375 | { | |
2376 | enum forcewake_domains fw_domains = 0; | |
2377 | ||
2378 | WARN_ON(!op); | |
2379 | ||
895833bd TU |
2380 | if (intel_vgpu_active(dev_priv)) |
2381 | return 0; | |
2382 | ||
3756685a TU |
2383 | if (op & FW_REG_READ) |
2384 | fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg); | |
2385 | ||
2386 | if (op & FW_REG_WRITE) | |
2387 | fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg); | |
2388 | ||
2389 | return fw_domains; | |
2390 | } | |
26e7a2a1 CW |
2391 | |
2392 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) | |
0757ac8f | 2393 | #include "selftests/mock_uncore.c" |
26e7a2a1 CW |
2394 | #include "selftests/intel_uncore.c" |
2395 | #endif |